Skip to content

Commit

Permalink
ci: add backup routine for SPAR
Browse files Browse the repository at this point in the history
issue #1268
  • Loading branch information
Ricardo Campos committed Oct 2, 2024
1 parent f730b62 commit 793ea91
Show file tree
Hide file tree
Showing 3 changed files with 285 additions and 0 deletions.
33 changes: 33 additions & 0 deletions .github/workflows/.backup.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
name: .Backup

on:
workflow_call:
inputs:
### Required
# Nothing! Only `secrets: inherit` is required

### Typical / recommended
environment:
description: GitHub/OpenShift environment; usually PR number, test or prod
default: ''
required: false
type: string
target:
description: Deployment target; usually PR number, test or prod
default: ${{ github.event.number }}
required: false
type: string

jobs:
backup:
name: Backup
environment: ${{ inputs.environment }}
runs-on: ubuntu-24.04
steps:
- name: Backup database before update
continue-on-error: true
run: |
oc login --token=${{ secrets.OC_TOKEN }} --server=${{ secrets.OC_SERVER }}
oc project ${{ vars.OC_NAMESPACE }}
# Run a backup before deploying a new version
oc create job --from=cronjob/${{ github.event.repository.name }}-${{ inputs.target }}-database-backup ${{ github.event.repository.name }}-${{ inputs.target }}-database-backup-$(date +%Y%m%d%H%M%S)
21 changes: 21 additions & 0 deletions .github/workflows/.deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,27 @@ jobs:
${{ github.event_name == 'pull_request' && '-p MEMORY_REQUEST=100Mi' || '' }}
${{ github.event_name == 'pull_request' && '-p MEMORY_LIMIT=200Mi' || '' }}

- name: Deploy Database Backup
uses: bcgov-nr/[email protected]
with:
file: common/openshift.backup.yml
oc_namespace: ${{ vars.OC_NAMESPACE }}
oc_server: ${{ secrets.OC_SERVER }}
oc_token: ${{ secrets.OC_TOKEN }}
oc_version: "4.13"
overwrite: true
parameters:
-p ZONE=${{ env.ZONE }} -p NAME=${{ github.event.repository.name }}
-p PG_DB_IMAGE=postgis/postgis:15-master

- name: Backup
if: steps.triggers.outputs.core == 'true' || steps.triggers.outputs.sync == 'true'
secrets: inherit
uses: ./.github/workflows/.deploy.yml
with:
environment: ${{ inputs.environment }}
target: ${{ inputs.target }}

deploy:
name: Deploy
environment: ${{ inputs.environment }}
Expand Down
231 changes: 231 additions & 0 deletions common/openshift.backup.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,231 @@
apiVersion: template.openshift.io/v1
kind: Template
labels:
app: ${NAME}-${ZONE}
app.kubernetes.io/part-of: ${NAME}-${ZONE}
parameters:
- name: NAME
description: Product name
value: nr-spar
- name: COMPONENT
description: Component name
value: database-backup
- name: ZONE
description: Deployment zone, e.g. pr-### or prod
required: true
- name: RESTORE_DIR
description: Directory to be used for restoring the backup
value: /tmp/restore
- name: REGISTRY
description: Container registry to import from (internal is image-registry.openshift-image-registry.svc:5000)
value: ghcr.io
- name: BACKUP_DIR
description: "The name of the root backup directory"
required: true
value: /tmp/backup
- name: NUM_BACKUPS
description: The number of backup files to be retained
required: false
value: "5"
- name: "JOB_SERVICE_ACCOUNT"
description: "Name of the Service Account To Exeucte the Job As."
value: "default"
required: true
- name: "SUCCESS_JOBS_HISTORY_LIMIT"
description: "The number of successful jobs that will be retained"
value: "5"
required: true
- name: "FAILED_JOBS_HISTORY_LIMIT"
description: "The number of failed jobs that will be retained"
value: "2"
required: true
- name: "JOB_BACKOFF_LIMIT"
description: "The number of attempts to try for a successful job outcome"
value: "0"
- name: PVC_SIZE
description: Volume space available for data, e.g. 512Mi, 2Gi.
value: 256Mi
- name: PG_DB_IMAGE
description: PostgreSQL Image (namespace/name:tag) to be used for backup
required: true
objects:
- kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: ${NAME}-${ZONE}-${COMPONENT}
labels:
app: ${NAME}-${ZONE}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "${PVC_SIZE}"
storageClassName: netapp-file-standard
- kind: CronJob
apiVersion: "batch/v1"
metadata:
name: ${NAME}-${ZONE}-${COMPONENT}
labels:
app: ${NAME}-${ZONE}
cronjob: ${NAME}-${ZONE}
spec:
schedule: "0 0 * * *"
concurrencyPolicy: "Replace"
successfulJobsHistoryLimit: ${{SUCCESS_JOBS_HISTORY_LIMIT}}
failedJobsHistoryLimit: ${{FAILED_JOBS_HISTORY_LIMIT}}
jobTemplate:
metadata:
labels:
app: ${NAME}-${ZONE}
cronjob: ${NAME}-${ZONE}
spec:
backoffLimit: ${{JOB_BACKOFF_LIMIT}}
template:
metadata:
labels:
app: ${NAME}-${ZONE}
cronjob: ${NAME}-${ZONE}
spec:
containers:
- name: ${NAME}-${ZONE}-${COMPONENT}
image: ${REGISTRY}/${PG_DB_IMAGE}
command: ["/bin/sh", "-c"]
args:
- |
pg_dump \
-U ${POSTGRESQL_USER} \
-h ${NAME}-${ZONE}-database \
-d ${POSTGRESQL_DATABASE} \
--data-only \
--schema=nr-spar \
--inserts \
--no-comments \
--on-conflict-do-nothing \
--no-sync \
--exclude-table=nrfc.client_type_code \
--exclude-table=nrfc.submission_status_code \
--exclude-table=nrfc.submission_type_code \
--exclude-table=nrfc.province_code \
--exclude-table=nrfc.country_code \
--exclude-table=nrfc.contact_type_code \
--exclude-table=nrfc.business_type_code \
--exclude-table=nrfc.district_code \
--file=${BACKUP_DIR}/backup_$(date +%Y-%m-%d).sql \
&&
find "${BACKUP_DIR}" -type f -mtime +$NUM_BACKUPS -exec rm -f {} \; &&
cp -r ${BACKUP_DIR}/backup_$(date +%Y-%m-%d).sql ${RESTORE_DIR}/W0__restore.sql
volumeMounts:
- mountPath: "${BACKUP_DIR}"
name: ${NAME}-${ZONE}-${COMPONENT}
- mountPath: "${RESTORE_DIR}"
name: ${NAME}-${ZONE}-${COMPONENT}
env:
- name: RESTORE_DIR
value: "${RESTORE_DIR}"
- name: BACKUP_DIR
value: "${BACKUP_DIR}"
- name: NUM_BACKUPS
value: "${NUM_BACKUPS}"
- name: POSTGRESQL_DATABASE
valueFrom:
secretKeyRef:
name: ${NAME}-${ZONE}-database
key: database-name
- name: POSTGRESQL_USER
valueFrom:
secretKeyRef:
name: ${NAME}-${ZONE}-database
key: database-user
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
name: ${NAME}-${ZONE}-database
key: database-password
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: ${NAME}-${ZONE}-database
key: database-password
volumes:
- name: ${NAME}-${ZONE}-${COMPONENT}
persistentVolumeClaim:
claimName: ${NAME}-${ZONE}-${COMPONENT}
restartPolicy: "Never"
terminationGracePeriodSeconds: 30
activeDeadlineSeconds: 1600
dnsPolicy: "ClusterFirst"
serviceAccountName: "${JOB_SERVICE_ACCOUNT}"
serviceAccount: "${JOB_SERVICE_ACCOUNT}"
- kind: CronJob
apiVersion: "batch/v1"
metadata:
name: ${NAME}-${ZONE}-${COMPONENT}-restore
labels:
app: ${NAME}-${ZONE}
cronjob: ${NAME}-${ZONE}
spec:
schedule: "0 0 31 2 *"
concurrencyPolicy: "Replace"
successfulJobsHistoryLimit: ${{SUCCESS_JOBS_HISTORY_LIMIT}}
failedJobsHistoryLimit: ${{FAILED_JOBS_HISTORY_LIMIT}}
jobTemplate:
metadata:
labels:
app: ${NAME}-${ZONE}
cronjob: ${NAME}-${ZONE}
spec:
backoffLimit: ${{JOB_BACKOFF_LIMIT}}
template:
metadata:
labels:
app: ${NAME}-${ZONE}
cronjob: ${NAME}-${ZONE}
spec:
containers:
- name: ${NAME}-${ZONE}-${COMPONENT}-restore
image: ${REGISTRY}/${PG_DB_IMAGE}
command: ["/bin/sh", "-c"]
args:
- |
find ${RESTORE_DIR} -type f -name "*.sql" -print0 | sort -zV |
while IFS= read -r -d '' sql_file; do
echo "Running SQL file: $sql_file"
psql -h ${TARGET_HOST} -U ${POSTGRESQL_USER} -d ${POSTGRESQL_DATABASE} -f $sql_file
done
volumeMounts:
- mountPath: "${RESTORE_DIR}"
name: ${NAME}-${ZONE}-${COMPONENT}
env:
- name: RESTORE_DIR
value: "${RESTORE_DIR}"
- name: POSTGRESQL_DATABASE
valueFrom:
secretKeyRef:
name: ${NAME}-${ZONE}-database
key: database-name
- name: POSTGRESQL_USER
valueFrom:
secretKeyRef:
name: ${NAME}-${ZONE}-database
key: database-user
- name: POSTGRESQL_PASSWORD
valueFrom:
secretKeyRef:
name: ${NAME}-${ZONE}-database
key: database-password
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: ${NAME}-${ZONE}-database
key: database-password
volumes:
- name: ${NAME}-${ZONE}-${COMPONENT}
persistentVolumeClaim:
claimName: ${NAME}-${ZONE}-${COMPONENT}
restartPolicy: "Never"
terminationGracePeriodSeconds: 30
activeDeadlineSeconds: 1600
dnsPolicy: "ClusterFirst"
serviceAccountName: "${JOB_SERVICE_ACCOUNT}"
serviceAccount: "${JOB_SERVICE_ACCOUNT}"

0 comments on commit 793ea91

Please sign in to comment.