diff --git a/.github/workflows/.deploy.yml b/.github/workflows/.deploy.yml index 8b86dc323..ad6b3c355 100644 --- a/.github/workflows/.deploy.yml +++ b/.github/workflows/.deploy.yml @@ -34,7 +34,7 @@ jobs: file: common/openshift.init.yml overwrite: true parameters: - -p NAME_SUFFIX=${{ inputs.target }} + -p ZONE=${{ inputs.target }} -p POSTGRESQL_PASSWORD=${{ secrets.POSTGRES_PASSWORD }} -p POSTGRESQL_USER=${{ secrets.POSTGRES_USER }} -p FDW_DATABASE_PASSWORD=${{ secrets.FDW_DATABASE_PASSWORD }} @@ -70,7 +70,7 @@ jobs: oc_version: "4.14.37" file: minio/openshift.deploy.yml parameters: - -p NAME_SUFFIX=${{ inputs.target }} + -p ZONE=${{ inputs.target }} -p IMAGE_TAG=${{ inputs.tag }} # ${{ inputs.environment && '' || '-p MINIO_DATA_DIR=/tmp/data' }} # ${{ inputs.environment && '' || '-p DEST_PVC_SIZE=1Mi' }} @@ -94,7 +94,7 @@ jobs: -p IMAGE_STREAM_NAMESPACE=${{ vars.OC_NAMESPACE }} -p IMAGE_STREAM_NAME=crunchy-postgres-gis -p IMAGE_TAG=${{ inputs.tag }} - -p NAME_SUFFIX=${{ inputs.target }} + -p ZONE=${{ inputs.target }} -p STORAGE_CLASS=netapp-file-standard -p REQUEST_CPU=200m -p LIMIT_CPU=500m @@ -115,7 +115,7 @@ jobs: oc_version: "4.14.37" file: backend/openshift.deploy.yml parameters: - -p NAME_SUFFIX=${{ inputs.target }} + -p ZONE=${{ inputs.target }} -p IMAGE_TAG=${{ inputs.tag }} -p E_LICENSING_URL=${{ vars.E_LICENSING_URL }} -p DB_REPLICATE=${{ vars.DB_REPLICATE }} @@ -160,6 +160,6 @@ jobs: oc_version: "4.14.37" file: frontend/openshift.deploy.yml parameters: - -p NAME_SUFFIX=${{ inputs.target }} + -p ZONE=${{ inputs.target }} -p IMAGE_TAG=${{ inputs.tag }} \ No newline at end of file diff --git a/backend/openshift.deploy.yml b/backend/openshift.deploy.yml index 066c540ba..58a5276e1 100644 --- a/backend/openshift.deploy.yml +++ b/backend/openshift.deploy.yml @@ -1,94 +1,92 @@ kind: Template apiVersion: template.openshift.io/v1 parameters: -- name: NAME_SUFFIX - displayName: Name Suffix - description: A suffix appended to all objects - required: true -- name: IMAGE_TAG - required: true -- name: HOST - required: false - value: '' -- name: CPU_REQUEST - required: false - value: 25m -- name: CPU_LIMIT - required: false - value: 100m -- name: MEMORY_REQUEST - required: false - value: 150Mi -- name: MEMORY_LIMIT - required: false - value: 450Mi -- name: PSQL_IMAGE - description: A psql client image (a PostgreSQL image will suffice) - required: true - value: image-registry.openshift-image-registry.svc:5000/26e83e-tools/crunchy-postgres-gis:centos7-12.4-3.0-4.5.0 -- name: E_LICENSING_URL - required: true -- name: DB_REPLICATE - required: true -- name: DJANGO_DEBUG - required: true -- name: ENABLE_ADDITIONAL_DOCUMENTS - required: true -- name: S3_PRIVATE_BUCKET - required: true -- name: S3_PRIVATE_HOST - required: true -- name: S3_WELL_EXPORT_BUCKET - required: true -- name: SSO_AUDIENCE - required: true -- name: SSO_AUTH_HOST - required: true -- name: SSO_CLIENT - required: true -- name: SSO_IDP_HINT - required: true -- name: SSO_PORT - required: true -- name: SSO_PUBKEY - required: true -- name: SSO_REALM - required: true -- name: SSO_TEST_AUDIENCE - required: true -- name: SSO_TEST_CLIENT - required: true -- name: GDAL_LIBRARY_PATH - required: true -- name: GEOS_LIBRARY_PATH - required: true -- name: S3_AQUIFER_BUCKET - required: true -- name: S3_REGISTRANT_BUCKET - required: true -- name: S3_PRIVATE_ROOT_BUCKET - required: true -- name: S3_PRIVATE_AQUIFER_BUCKET - required: true -- name: S3_PRIVATE_REGISTRANT_BUCKET - required: true -- name: S3_PRIVATE_WELL_BUCKET - required: true -- name: ENABLE_AQUIFERS_SEARCH - required: true -- name: EMAIL_NOTIFICATION_RECIPIENT - required: true -- name: GEOCODER_ADDRESS_API_BASE - required: true + - name: REPO + description: Repository name + value: nr-gwells + - name: COMPONENT + description: Component name + value: backend + - name: ZONE + description: Deployment zone, e.g. pr-### or prod + required: true + - name: TAG + required: true + - name: CPU_REQUEST + required: false + value: 25m + - name: CPU_LIMIT + required: false + value: 100m + - name: MEMORY_REQUEST + required: false + value: 150Mi + - name: MEMORY_LIMIT + required: false + value: 450Mi + - name: E_LICENSING_URL + required: true + - name: DB_REPLICATE + required: true + - name: DJANGO_DEBUG + required: true + - name: ENABLE_ADDITIONAL_DOCUMENTS + required: true + - name: S3_PRIVATE_BUCKET + required: true + - name: S3_PRIVATE_HOST + required: true + - name: S3_WELL_EXPORT_BUCKET + required: true + - name: SSO_AUDIENCE + required: true + - name: SSO_AUTH_HOST + required: true + - name: SSO_CLIENT + required: true + - name: SSO_IDP_HINT + required: true + - name: SSO_PORT + required: true + - name: SSO_PUBKEY + required: true + - name: SSO_REALM + required: true + - name: SSO_TEST_AUDIENCE + required: true + - name: SSO_TEST_CLIENT + required: true + - name: GDAL_LIBRARY_PATH + required: true + - name: GEOS_LIBRARY_PATH + required: true + - name: S3_AQUIFER_BUCKET + required: true + - name: S3_REGISTRANT_BUCKET + required: true + - name: S3_PRIVATE_ROOT_BUCKET + required: true + - name: S3_PRIVATE_AQUIFER_BUCKET + required: true + - name: S3_PRIVATE_REGISTRANT_BUCKET + required: true + - name: S3_PRIVATE_WELL_BUCKET + required: true + - name: ENABLE_AQUIFERS_SEARCH + required: true + - name: EMAIL_NOTIFICATION_RECIPIENT + required: true + - name: GEOCODER_ADDRESS_API_BASE + required: true objects: - kind: ConfigMap apiVersion: v1 metadata: creationTimestamp: - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config labels: - appver: gwells-${NAME_SUFFIX} - app: nr-gwells-${NAME_SUFFIX} + appver: ${REPO}-${ZONE} + app: ${REPO}-${ZONE} data: DB_REPLICATE: ${DB_REPLICATE} DJANGO_DEBUG: ${DJANGO_DEBUG} @@ -120,11 +118,10 @@ objects: - kind: Deployment apiVersion: apps/v1 metadata: - name: gwells-${NAME_SUFFIX}-backend + name: ${REPO}-${ZONE}-${COMPONENT} creationTimestamp: labels: - app: nr-gwells-${NAME_SUFFIX} - appver: gwells-${NAME_SUFFIX}-backend + app: ${REPO}-${ZONE}-${COMPONENT} annotations: description: Defines how to deploy the application server spec: @@ -138,49 +135,48 @@ objects: # command: # - "/usr/bin/container-entrypoint" # - "/opt/app-root/src/scripts/pre-deploy.sh" - # containerName: gwells-app-${NAME_SUFFIX}-backend + # containerName: ${REPO}-${ZONE}-${COMPONENT}-app # env: # - name: PGDATABASE # valueFrom: # secretKeyRef: - # name: gwells-pg12-${NAME_SUFFIX}-backend + # name: ${REPO}-${ZONE}-${COMPONENT}-pg12 # key: database-name # - name: PGUSER # valueFrom: # secretKeyRef: - # name: gwells-pg12-${NAME_SUFFIX}-backend + # name: ${REPO}-${ZONE}-${COMPONENT}-pg12 # key: database-user # - name: PGPASSWORD # valueFrom: # secretKeyRef: - # name: gwells-pg12-${NAME_SUFFIX}-backend + # name: ${REPO}-${ZONE}-${COMPONENT}-pg12 # key: database-password # - name: PGHOST - # value: gwells-pg12-${NAME_SUFFIX}-backend + # value: ${REPO}-${ZONE}-${COMPONENT}-pg12 resources: {} activeDeadlineSeconds: 21600 replicas: 1 test: false selector: - name: gwells-${NAME_SUFFIX}-backend + name: ${REPO}-${ZONE}-${COMPONENT} matchLabels: - deployment: gwells-${NAME_SUFFIX}-backend + deployment: ${REPO}-${ZONE}-${COMPONENT} template: metadata: - name: gwells-${NAME_SUFFIX}-backend + name: ${REPO}-${ZONE}-${COMPONENT} creationTimestamp: labels: - name: gwells-${NAME_SUFFIX}-backend - appver: gwells-${NAME_SUFFIX}-backend - deployment: gwells-${NAME_SUFFIX}-backend + name: ${REPO}-${ZONE}-${COMPONENT} + deployment: ${REPO}-${ZONE}-${COMPONENT} spec: volumes: - name: staticfiles emptyDir: sizeLimit: 250Mi containers: - - name: gwells-app-${NAME_SUFFIX}-backend - image: 'ghcr.io/bcgov/nr-gwells/backend:${IMAGE_TAG}' + - name: ${REPO}-${ZONE}-${COMPONENT}-app + image: 'ghcr.io/bcgov/${REPO}/${COMPONENT}:${TAG}' volumeMounts: - name: staticfiles mountPath: /app/staticfiles @@ -189,237 +185,237 @@ objects: protocol: TCP env: - name: DATABASE_SERVICE_NAME - value: gwells-${NAME_SUFFIX}-database + value: ${REPO}-${ZONE}-database - name: DATABASE_NAME valueFrom: secretKeyRef: - name: gwells-${NAME_SUFFIX}-database + name: ${REPO}-${ZONE}-database key: database-name - name: DATABASE_USER valueFrom: secretKeyRef: - name: gwells-${NAME_SUFFIX}-database + name: ${REPO}-${ZONE}-database key: database-user - name: DATABASE_PASSWORD valueFrom: secretKeyRef: - name: gwells-${NAME_SUFFIX}-database + name: ${REPO}-${ZONE}-database key: database-password - name: DATABASE_SCHEMA value: public - name: APP_MODULE value: wsgi:application - name: APP_HOME - value: backend + value: ${COMPONENT} - name: APP_CONFIG - value: "/opt/app-root/src/backend/gunicorn.ocp4.cfg" + value: "/opt/app-root/src/${COMPONENT}/gunicorn.ocp4.cfg" - name: DJANGO_SECRET_KEY valueFrom: secretKeyRef: - name: gwells-django-${NAME_SUFFIX} + name: ${REPO}-django-${ZONE} key: secret_key - name: DJANGO_ADMIN_URL valueFrom: secretKeyRef: - name: gwells-django-${NAME_SUFFIX} + name: ${REPO}-django-${ZONE} key: admin_url - name: DJANGO_ADMIN_USER valueFrom: secretKeyRef: - name: gwells-django-${NAME_SUFFIX} + name: ${REPO}-django-${ZONE} key: admin_user - name: DJANGO_ADMIN_PASSWORD valueFrom: secretKeyRef: - name: gwells-django-${NAME_SUFFIX} + name: ${REPO}-django-${ZONE} key: admin_password - name: E_LICENSING_AUTH_PASSWORD valueFrom: secretKeyRef: - name: gwells-e-licensing-${NAME_SUFFIX} + name: ${REPO}-e-licensing-${ZONE} key: E_LICENSING_AUTH_PASSWORD - name: E_LICENSING_AUTH_USERNAME valueFrom: secretKeyRef: - name: gwells-e-licensing-${NAME_SUFFIX} + name: ${REPO}-e-licensing-${ZONE} key: E_LICENSING_AUTH_USERNAME - name: E_LICENSING_URL valueFrom: configMapKeyRef: - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config key: E_LICENSING_URL - name: DJANGO_DEBUG valueFrom: configMapKeyRef: key: DJANGO_DEBUG - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: GDAL_LIBRARY_PATH valueFrom: configMapKeyRef: key: GDAL_LIBRARY_PATH - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: GEOS_LIBRARY_PATH valueFrom: configMapKeyRef: key: GEOS_LIBRARY_PATH - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: S3_AQUIFER_BUCKET valueFrom: configMapKeyRef: key: S3_AQUIFER_BUCKET - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: S3_REGISTRANT_BUCKET valueFrom: configMapKeyRef: key: S3_REGISTRANT_BUCKET - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: S3_PRIVATE_ROOT_BUCKET valueFrom: configMapKeyRef: key: S3_PRIVATE_ROOT_BUCKET - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: S3_PRIVATE_AQUIFER_BUCKET valueFrom: configMapKeyRef: key: S3_PRIVATE_AQUIFER_BUCKET - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: S3_PRIVATE_REGISTRANT_BUCKET valueFrom: configMapKeyRef: key: S3_PRIVATE_REGISTRANT_BUCKET - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: S3_PRIVATE_WELL_BUCKET valueFrom: configMapKeyRef: key: S3_PRIVATE_WELL_BUCKET - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: SSO_CLIENT valueFrom: configMapKeyRef: key: SSO_CLIENT - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: SSO_PUBKEY valueFrom: configMapKeyRef: key: SSO_PUBKEY - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: SSO_AUTH_HOST valueFrom: configMapKeyRef: key: SSO_AUTH_HOST - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: SSO_AUDIENCE valueFrom: configMapKeyRef: key: SSO_AUDIENCE - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: SSO_REALM valueFrom: configMapKeyRef: key: SSO_REALM - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: SSO_PORT valueFrom: configMapKeyRef: key: SSO_PORT - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: SSO_TEST_CLIENT valueFrom: configMapKeyRef: key: SSO_TEST_CLIENT - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: SSO_TEST_AUDIENCE valueFrom: configMapKeyRef: key: SSO_TEST_AUDIENCE - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: ENABLE_ADDITIONAL_DOCUMENTS valueFrom: configMapKeyRef: key: ENABLE_ADDITIONAL_DOCUMENTS - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: ENABLE_AQUIFERS_SEARCH valueFrom: configMapKeyRef: key: ENABLE_AQUIFERS_SEARCH - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: APP_CONTEXT_ROOT - value: gwells + value: ${REPO} - name: SESSION_COOKIE_SECURE - value: 'True' + value: 'true' - name: CSRF_COOKIE_SECURE - value: 'True' + value: 'true' - name: DB_REPLICATE valueFrom: configMapKeyRef: key: DB_REPLICATE - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config # - name: MINIO_ACCESS_KEY # valueFrom: # secretKeyRef: - # name: minio-access-parameters-${NAME_SUFFIX} + # name: minio-access-parameters-${ZONE} # key: MINIO_ACCESS_KEY # - name: MINIO_SECRET_KEY # valueFrom: # secretKeyRef: - # name: minio-access-parameters-${NAME_SUFFIX} + # name: minio-access-parameters-${ZONE} # key: MINIO_SECRET_KEY # - name: S3_PUBLIC_ACCESS_KEY # valueFrom: # secretKeyRef: - # name: minio-access-parameters-${NAME_SUFFIX} + # name: minio-access-parameters-${ZONE} # key: S3_PUBLIC_ACCESS_KEY # - name: S3_PUBLIC_SECRET_KEY # valueFrom: # secretKeyRef: - # name: minio-access-parameters-${NAME_SUFFIX} + # name: minio-access-parameters-${ZONE} # key: S3_PUBLIC_SECRET_KEY # - name: S3_HOST # valueFrom: # secretKeyRef: - # name: minio-access-parameters-${NAME_SUFFIX} + # name: minio-access-parameters-${ZONE} # key: S3_HOST # - name: S3_ROOT_BUCKET # valueFrom: # secretKeyRef: - # name: minio-access-parameters-${NAME_SUFFIX} + # name: minio-access-parameters-${ZONE} # key: S3_ROOT_BUCKET - name: S3_PRIVATE_HOST valueFrom: configMapKeyRef: key: S3_PRIVATE_HOST - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: S3_WELL_EXPORT_BUCKET valueFrom: configMapKeyRef: key: S3_WELL_EXPORT_BUCKET - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: S3_PRIVATE_BUCKET valueFrom: configMapKeyRef: key: S3_PRIVATE_BUCKET - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: SSO_IDP_HINT valueFrom: configMapKeyRef: key: SSO_IDP_HINT - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: WEB_CONCURRENCY value: '4' - name: GUNICORN_WORKERS value: '4' - name: ENFORCE_ENV_VARIABLES - value: 'False' + value: 'false' - name: EMAIL_NOTIFICATION_RECIPIENT valueFrom: configMapKeyRef: key: EMAIL_NOTIFICATION_RECIPIENT - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config - name: GEOCODER_ADDRESS_API_BASE valueFrom: configMapKeyRef: key: GEOCODER_ADDRESS_API_BASE - name: gwells-global-config-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-global-config resources: limits: cpu: "${CPU_LIMIT}" @@ -460,57 +456,51 @@ objects: metadata: creationTimestamp: labels: - app: nr-gwells-${NAME_SUFFIX} - appver: gwells-${NAME_SUFFIX}-backend - name: gwells-${NAME_SUFFIX}-backend + app: ${REPO}-${ZONE} + appver: ${REPO}-${ZONE}-${COMPONENT} + name: ${REPO}-${ZONE}-${COMPONENT} spec: maxReplicas: 5 minReplicas: 1 scaleTargetRef: apiVersion: v1 kind: Deployment - name: gwells-${NAME_SUFFIX}-backend + name: ${REPO}-${ZONE}-${COMPONENT} targetCPUUtilizationPercentage: 90 - kind: Service apiVersion: v1 metadata: - name: nr-gwells-${NAME_SUFFIX}-backend + name: ${REPO}-${ZONE}-${COMPONENT} creationTimestamp: labels: - app: nr-gwells-${NAME_SUFFIX} - appver: gwells-${NAME_SUFFIX}-backend - annotations: - description: Exposes and load balances the application pods + app: ${REPO}-${ZONE}-${COMPONENT} spec: ports: - name: web protocol: TCP - port: 8000 - targetPort: 8000 + port: 8080 + targetPort: 8080 selector: - name: nr-gwells-${NAME_SUFFIX}-backend + name: ${REPO}-${ZONE}-${COMPONENT} type: ClusterIP sessionAffinity: None -# - kind: Route -# apiVersion: v1 -# metadata: -# name: nr-gwells-${NAME_SUFFIX}-backend -# creationTimestamp: -# labels: -# frontend: 'true' -# app: nr-gwells-${NAME_SUFFIX} -# appver: gwells-${NAME_SUFFIX}-backend -# annotations: {} -# spec: -# host: nr-gwells-${NAME_SUFFIX}-backend.apps.silver.devops.gov.bc.ca -# path: "/gwells" -# to: -# kind: Service -# name: nr-gwells-${NAME_SUFFIX}-backend -# weight: 100 -# port: -# targetPort: web -# tls: -# insecureEdgeTerminationPolicy: Redirect -# termination: edge -# wildcardPolicy: None +- kind: Route + apiVersion: v1 + metadata: + name: ${REPO}-${ZONE}-${COMPONENT} + labels: + frontend: 'true' + app: ${REPO}-${ZONE}-${COMPONENT} + spec: + host: ${REPO}-${ZONE}-${COMPONENT}.apps.silver.devops.gov.bc.ca + path: "/gwells" + to: + kind: Service + name: ${REPO}-${ZONE}-${COMPONENT} + weight: 100 + port: + targetPort: web + tls: + insecureEdgeTerminationPolicy: Redirect + termination: edge + wildcardPolicy: None diff --git a/common/openshift.init.yml b/common/openshift.init.yml index 725b9ac5b..efa303940 100644 --- a/common/openshift.init.yml +++ b/common/openshift.init.yml @@ -1,9 +1,11 @@ apiVersion: template.openshift.io/v1 kind: Template parameters: - - description: A suffix appended to all objects - displayName: Name Suffix - name: NAME_SUFFIX + - name: REPO + description: Repository name + value: ${REPO} + - name: ZONE + description: Deployment zone, e.g. pr-### or prod required: true - description: The name of the OpenShift Service exposed for the database. displayName: Database Service Name @@ -60,9 +62,9 @@ objects: - apiVersion: v1 kind: Secret metadata: - name: gwells-${NAME_SUFFIX}-database + name: ${REPO}-${ZONE}-database labels: - app: nr-gwells-${NAME_SUFFIX} + app: ${REPO}-${ZONE} stringData: database-name: ${POSTGRESQL_DATABASE} database-password: ${POSTGRESQL_PASSWORD} @@ -74,10 +76,9 @@ objects: - apiVersion: v1 kind: Secret metadata: - name: gwells-minio-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-minio labels: - app: nr-gwells-${NAME_SUFFIX} - creationTimestamp: + app: ${REPO}-${ZONE} stringData: MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY} MINIO_SECRET_KEY: ${MINIO_SECRET_KEY} @@ -88,10 +89,9 @@ objects: - apiVersion: v1 kind: Secret metadata: - creationTimestamp: - name: gwells-django-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-django labels: - app: nr-gwells-${NAME_SUFFIX} + app: ${REPO}-${ZONE} stringData: admin_password: ${DJANGO_ADMIN_PASSWORD} admin_url: ${DJANGO_ADMIN_URL} @@ -100,10 +100,9 @@ objects: - apiVersion: v1 kind: Secret metadata: - creationTimestamp: - name: gwells-e-licensing-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-e-licensing labels: - app: nr-gwells-${NAME_SUFFIX} + app: ${REPO}-${ZONE} stringData: E_LICENSING_AUTH_PASSWORD: ${E_LICENSING_AUTH_PASSWORD} E_LICENSING_AUTH_USERNAME: ${E_LICENSING_AUTH_USERNAME} @@ -111,9 +110,9 @@ objects: kind: Secret metadata: creationTimestamp: - name: crunchy-db-credentials-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-crunchy-db-credentials labels: - app: nr-gwells-${NAME_SUFFIX} + app: ${REPO}-${ZONE} stringData: PG_MODE: ${PG_MODE} PG_PRIMARY_PASSWORD: ${PG_PRIMARY_PASSWORD} @@ -138,7 +137,7 @@ objects: metadata: name: allow-same-namespace labels: - template: nr-gwells-backend-network-security-policy + template: ${REPO}-backend-network-security-policy spec: policyTypes: - Ingress diff --git a/database/openshift.deploy.yml b/database/openshift.deploy.yml index 95d89d6aa..998c728ed 100644 --- a/database/openshift.deploy.yml +++ b/database/openshift.deploy.yml @@ -1,23 +1,17 @@ -apiVersion: v1 kind: Template -labels: - template: postgresql-persistent-template -metadata: - creationTimestamp: null - name: gwells-postgresql-dc +apiVersion: template.openshift.io/v1 parameters: - - description: The OpenShift ImageStream name. - displayName: IMAGE_STREAM_NAME - name: IMAGE_STREAM_NAME - value: crunchy-postgres-gis - - description: The OpenShift Namespace where the ImageStream resides. - displayName: Namespace - name: IMAGE_STREAM_NAMESPACE - value: 26e83e-tools - - description: The image registry for the container image or ImageStream. - displayName: Image Registry - name: IMAGE_REGISTRY - value: image-registry.openshift-image-registry.svc:5000 + - name: REPO + description: Repository name + value: nr-gwells + - name: COMPONENT + description: Component name + value: database + - name: ZONE + description: Deployment zone, e.g. pr-### or prod + required: true + - name: TAG + required: true - description: Volume space available for data, e.g. 512Mi, 2Gi. displayName: Volume Capacity name: VOLUME_CAPACITY @@ -47,10 +41,6 @@ parameters: name: IMAGE_STREAM_VERSION required: true value: centos7-12.4-3.0-4.5.0 - - name: NAME_SUFFIX - required: true - - name: IMAGE_TAG - required: true - description: Storage class for PVCs. displayName: Storage class for PVCs. name: STORAGE_CLASS @@ -117,18 +107,18 @@ objects: kind: ConfigMap metadata: labels: - app: nr-gwells-${NAME_SUFFIX} - name: gwells-${NAME_SUFFIX}-database-setupcfg - appver: gwells-${NAME_SUFFIX}-database - name: gwells-${NAME_SUFFIX}-database-setupcfg + app: ${REPO}-${ZONE} + name: ${REPO}-${ZONE}-${COMPONENT}-setupcfg + appver: ${REPO}-${ZONE}-${COMPONENT} + name: ${REPO}-${ZONE}-${COMPONENT}-setupcfg - apiVersion: v1 kind: Service metadata: labels: - app: nr-gwells-${NAME_SUFFIX} + app: ${REPO}-${ZONE} annotations: template.openshift.io/expose-uri: postgres://{.spec.clusterIP}:{.spec.ports[?(.name=="postgresql")].port} - name: gwells-${NAME_SUFFIX}-database + name: ${REPO}-${ZONE}-${COMPONENT} spec: ports: - name: postgresql @@ -137,7 +127,7 @@ objects: protocol: TCP targetPort: 5432 selector: - deployment: gwells-${NAME_SUFFIX}-database + deployment: ${REPO}-${ZONE}-${COMPONENT} sessionAffinity: None type: ClusterIP status: @@ -145,9 +135,9 @@ objects: - apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: gwells-${NAME_SUFFIX}-database + name: ${REPO}-${ZONE}-${COMPONENT} labels: - app: nr-gwells-${NAME_SUFFIX} + app: ${REPO}-${ZONE} annotations: template.openshift.io.bcgov/create: "true" spec: @@ -162,22 +152,22 @@ objects: metadata: annotations: template.alpha.openshift.io/wait-for-ready: "true" - name: gwells-${NAME_SUFFIX}-database + name: ${REPO}-${ZONE}-${COMPONENT} labels: - app: nr-gwells-${NAME_SUFFIX} + app: ${REPO}-${ZONE} spec: replicas: 1 selector: matchLabels: - deployment: gwells-${NAME_SUFFIX}-database + deployment: ${REPO}-${ZONE}-${COMPONENT} strategy: type: Recreate template: metadata: labels: - name: gwells-${NAME_SUFFIX}-database - appver: gwells-${NAME_SUFFIX} - deployment: gwells-${NAME_SUFFIX}-database + name: ${REPO}-${ZONE}-${COMPONENT} + appver: ${REPO}-${ZONE} + deployment: ${REPO}-${ZONE}-${COMPONENT} spec: containers: - capabilities: {} @@ -187,7 +177,7 @@ objects: - name: CONTAINER_NAME value: postgresql - name: PG_PRIMARY_HOST - value: gwells-${NAME_SUFFIX}-database + value: ${REPO}-${ZONE}-${COMPONENT} - name: PGDATA_PATH_OVERRIDE value: userdata - name: PGHOST @@ -196,38 +186,38 @@ objects: valueFrom: secretKeyRef: key: database-name - name: gwells-${NAME_SUFFIX}-database + name: ${REPO}-${ZONE}-${COMPONENT} - name: PG_PASSWORD valueFrom: secretKeyRef: key: database-password - name: gwells-${NAME_SUFFIX}-database + name: ${REPO}-${ZONE}-${COMPONENT} - name: PG_USER valueFrom: secretKeyRef: key: database-user - name: gwells-${NAME_SUFFIX}-database + name: ${REPO}-${ZONE}-${COMPONENT} - name: PG_MODE valueFrom: secretKeyRef: key: PG_MODE - name: crunchy-db-credentials-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-crunchy-db-credentials - name: PG_PRIMARY_PASSWORD valueFrom: secretKeyRef: key: PG_PRIMARY_PASSWORD - name: crunchy-db-credentials-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-crunchy-db-credentials - name: PG_PRIMARY_USER valueFrom: secretKeyRef: key: PG_PRIMARY_USER - name: crunchy-db-credentials-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-crunchy-db-credentials - name: PG_ROOT_PASSWORD valueFrom: secretKeyRef: key: PG_ROOT_PASSWORD - name: crunchy-db-credentials-${NAME_SUFFIX} - image: "ghcr.io/bcgov/nr-gwells/database:${IMAGE_TAG}" + name: ${REPO}-${ZONE}-crunchy-db-credentials + image: "ghcr.io/bcgov/${REPO}/${COMPONENT}:${TAG}" imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 3 @@ -268,20 +258,19 @@ objects: terminationMessagePath: /dev/termination-log volumeMounts: - mountPath: /pgdata - name: gwells-${NAME_SUFFIX}-database-data + name: ${REPO}-${ZONE}-${COMPONENT}-data - mountPath: /var/run/postgresql - name: gwells-${NAME_SUFFIX}-database-run + name: ${REPO}-${ZONE}-${COMPONENT}-run - mountPath: /pgconf - name: gwells-${NAME_SUFFIX}-database-setupcfg + name: ${REPO}-${ZONE}-${COMPONENT}-setupcfg dnsPolicy: ClusterFirst restartPolicy: Always volumes: - - name: gwells-${NAME_SUFFIX}-database-data + - name: ${REPO}-${ZONE}-${COMPONENT}-data persistentVolumeClaim: - claimName: gwells-${NAME_SUFFIX}-database - - name: gwells-${NAME_SUFFIX}-database-run + claimName: ${REPO}-${ZONE}-${COMPONENT} + - name: ${REPO}-${ZONE}-${COMPONENT}-run emptyDir: {} - configMap: - name: gwells-${NAME_SUFFIX}-database-setupcfg - name: gwells-${NAME_SUFFIX}-database-setupcfg - status: {} + name: ${REPO}-${ZONE}-${COMPONENT}-setupcfg + name: ${REPO}-${ZONE}-${COMPONENT}-setupcfg diff --git a/frontend/openshift.deploy.yml b/frontend/openshift.deploy.yml index 6001c6b0f..e24e0c7ae 100644 --- a/frontend/openshift.deploy.yml +++ b/frontend/openshift.deploy.yml @@ -1,19 +1,24 @@ apiVersion: template.openshift.io/v1 kind: Template parameters: - - name: NAME_SUFFIX - displayName: Name Suffix - description: A suffix appended to all objects + - name: REPO + description: Repository name + value: nr-gwells + - name: COMPONENT + description: Component name + value: frontend + - name: ZONE + description: Deployment zone, e.g. pr-### or prod required: true - - name: IMAGE_TAG + - name: TAG required: true objects: - kind: Service apiVersion: v1 metadata: labels: - app: nr-gwells-${NAME_SUFFIX} - name: nr-gwells-${NAME_SUFFIX}-frontend + app: ${REPO}-${ZONE} + name: ${REPO}-${ZONE}-${COMPONENT} spec: type: ClusterIP ports: @@ -22,30 +27,30 @@ objects: protocol: TCP name: 3000-tcp selector: - service: nr-gwells-${NAME_SUFFIX}-frontend + service: ${REPO}-${ZONE}-${COMPONENT} - kind: Deployment apiVersion: apps/v1 metadata: labels: - app: nr-gwells-${NAME_SUFFIX} - deployment: gwells-${NAME_SUFFIX}-frontend - name: gwells-${NAME_SUFFIX}-frontend + app: ${REPO}-${ZONE} + deployment: ${REPO}-${ZONE}-${COMPONENT} + name: ${REPO}-${ZONE}-${COMPONENT} spec: strategy: type: Recreate selector: matchLabels: - deployment: gwells-${NAME_SUFFIX}-frontend + deployment: ${REPO}-${ZONE}-${COMPONENT} template: metadata: labels: - app: nr-gwells-${NAME_SUFFIX} - deployment: gwells-${NAME_SUFFIX}-frontend - service: nr-gwells-${NAME_SUFFIX}-frontend + app: ${REPO}-${ZONE} + deployment: ${REPO}-${ZONE}-${COMPONENT} + service: ${REPO}-${ZONE}-${COMPONENT} spec: containers: - - name: gwells-${NAME_SUFFIX}-frontend - image: ghcr.io/bcgov/nr-gwells/frontend:${IMAGE_TAG} + - name: ${REPO}-${ZONE}-${COMPONENT} + image: ghcr.io/bcgov/${REPO}/${COMPONENT}:${TAG} securityContext: capabilities: add: ["NET_BIND_SERVICE"] @@ -54,9 +59,9 @@ objects: - name: LOG_LEVEL value: info - name: VUE_APP_AXIOS_BASE_URL - value: http://gwells-${NAME_SUFFIX}-frontend/gwells/api/v2/ + value: http://${REPO}-${ZONE}-${COMPONENT}/gwells/api/v2/ - name: BACKEND_URL - value: http://gwells-${NAME_SUFFIX}-frontend + value: http://${REPO}-${ZONE}-${COMPONENT} ports: - name: container-port containerPort: 3000 @@ -93,15 +98,15 @@ objects: apiVersion: route.openshift.io/v1 metadata: labels: - app: nr-gwells-${NAME_SUFFIX} - name: nr-gwells-${NAME_SUFFIX}-frontend + app: ${REPO}-${ZONE} + name: ${REPO}-${ZONE}-${COMPONENT} spec: - host: nr-gwells-${NAME_SUFFIX}-frontend.apps.silver.devops.gov.bc.ca + host: ${REPO}-${ZONE}-${COMPONENT}.apps.silver.devops.gov.bc.ca port: targetPort: 3000-tcp to: kind: Service - name: nr-gwells-${NAME_SUFFIX}-frontend + name: ${REPO}-${ZONE}-${COMPONENT} weight: 100 tls: termination: edge diff --git a/gwells/.dockerignore b/gwells/.dockerignore deleted file mode 100644 index 1b6a64eda..000000000 --- a/gwells/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -**/node_modules -npm-debug.log diff --git a/gwells/.gitattributes b/gwells/.gitattributes deleted file mode 100644 index b8048b428..000000000 --- a/gwells/.gitattributes +++ /dev/null @@ -1,35 +0,0 @@ -# Auto detect text files and perform LF normalization -* text eol=lf - -# Source files -# ============ -*.pxd text -*.py text -*.py3 text -*.pyw text -*.pyx text - -# Binary files -# ============ -*.db binary -*.eot binary -*.jar binary -*.p binary -*.pkl binary -*.png binary -*.pyc binary -*.pyd binary -*.pyo binary -*.ttf binary -*.gz binary -*.zip binary - - -# Static files -# ============ -/app/backend/gwells/static/** binary - -# Note: .db, .p, and .pkl files are associated -# with the python modules ``pickle``, ``dbm.*``, -# ``shelve``, ``marshal``, ``anydbm``, & ``bsddb`` -# (among others). diff --git a/gwells/.github/PULL_REQUEST_TEMPLATE.md b/gwells/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 55122144a..000000000 --- a/gwells/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,13 +0,0 @@ -## Pull Request Standards - -- [ ] The title of the PR is accurate -- [ ] The title includes the type of change [`HOTFIX`, `FEATURE`, `etc`] -- [ ] The PR title includes the ticket number in format of `[GWELLS-###]` -- [ ] Documentation is updated to reflect change [`README`, `functions`, `team documents`] - -# Description - -This PR includes the following proposed change(s): - -- { List all the changes made } -- { Include any screenshots necessary } diff --git a/gwells/.gitignore b/gwells/.gitignore deleted file mode 100644 index c6c9facc6..000000000 --- a/gwells/.gitignore +++ /dev/null @@ -1,174 +0,0 @@ -########################## -# Recommended .gitignore # -########################## -# -# https://gist.github.com/octocat/9257657#file-gitignore - -# Compiled source # -################### -*.com -*.class -*.dll -*.exe -*.o -*.so - -# Packages # -############ -# it's better to unpack these files and commit the raw source -# git has its own built in compression methods -*.7z -*.dmg -*.gz -*.iso -#*.jar -*.rar -*.tar -*.zip -!regional_areas.zip -!qaqc_well_data.zip - -# Logs and databases # -###################### -*.log -app/backend/aquifers/fixtures/tmp/* - -# OS generated files # -###################### -.DS_Store -.DS_Store? -._* -.Spotlight-V100 -.Trashes -ehthumbs.db -Thumbs.db -*.md5 -.env* - -################## -# This repo only # -################## - -# Django -staticfiles/ - -# All static files (Vue app) are placed in the static folder during build -app/backend/gwells/static/* -# index.html places in the template folder during build. -app/backend/gwells/templates/index.html - -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] - -# C extensions -*.so - -# Distribution / packaging -.Python -env/ -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -*.egg-info/ -.installed.cfg -*.egg -app/backend/*.sh -app/backend/.env -*.secret_env -app/backend/.pip -app/backend/bulk -# PyInstaller and Pip install files -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec -pip-log.txt -pip-delete-this-directory.txt -app/backend/get-pip.py - -# Unit test / coverage reports -htmlcov/ -.tox/ -.coverage -**/coverage -.coverage.* -.cache -*,cover - -# Test outputs -*.xlsx - -# Translations -*.mo -*.pot - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Code editor files -gwells.sln -gwells.pyproj* -.vs/ -/obj/ -/bin/ -*.sublime-project -*.sublime-workspace -.vscode -*.ipynb -.idea - -# functional-tests -functional-tests/.gradle/ -functional-tests/build -functional-tests/.project -functional-tests/.settings/org.eclipse.buildship.core.prefs -functional-tests/.classpath -functional-tests/bin -functional-tests/src/test/groovy/testtamplet.txt - -#virtual env -/deactivate -venv -.venv -venv -.direnv -app/backend/.env -*.secret_env - -# Frontend files -app/backend/gwells/static/vue -**/node_modules/ - -sonar-runner/.gradle -*.patch -*.orig -sonar-runner/.settings/org.eclipse.buildship.core.prefs -sonar-runner/.project -sonar-runner/.classpath -.gradle - -# OS files -.directory -*~ - -# DB -*.dmp -*.dump -.tmp/ -app/backend/tmp - -# Exceptions -!app/*/fixtures/*.gz -!tests/api-tests/files/*.zip diff --git a/gwells/Jenkinsfile b/gwells/Jenkinsfile deleted file mode 100644 index 6142b053b..000000000 --- a/gwells/Jenkinsfile +++ /dev/null @@ -1,736 +0,0 @@ -#!groovy - -import groovy.json.JsonOutput -import bcgov.GitHubHelper - - -// Notify stage status and pass to Jenkins-GitHub library -void notifyStageStatus (String name, String status) { - GitHubHelper.createCommitStatus( - this, - GitHubHelper.getPullRequestLastCommitId(this), - status, - "${env.BUILD_URL}", - "Stage '${name}'", - "Stage: ${name}" - ) -} - - -// Create deployment status and pass to Jenkins-GitHub library -void createDeploymentStatus (String suffix, String status, String stageUrl) { - def ghDeploymentId = new GitHubHelper().createDeployment( - this, - "pull/${env.CHANGE_ID}/head", - [ - 'environment':"${suffix}", - 'task':"deploy:pull:${env.CHANGE_ID}" - ] - ) - - new GitHubHelper().createDeploymentStatus( - this, - ghDeploymentId, - "${status}", - ['targetUrl':"https://${stageUrl}/gwells"] - ) - - if ('SUCCESS'.equalsIgnoreCase("${status}")) { - echo "${suffix} deployment successful!" - } else if ('PENDING'.equalsIgnoreCase("${status}")){ - echo "${suffix} deployment pending." - } -} - - -// Print stack trace of error -@NonCPS -private static String stackTraceAsString(Throwable t) { - StringWriter sw = new StringWriter(); - t.printStackTrace(new PrintWriter(sw)); - return sw.toString() -} - - -// OpenShift wrapper -def _openshift(String name, String project, Closure body) { - script { - openshift.withCluster() { - openshift.withProject(project) { - echo "Running Stage '${name}'" - waitUntil { - notifyStageStatus (name, 'PENDING') - boolean isDone=false - try { - body() - isDone=true - notifyStageStatus(name, 'SUCCESS') - echo "Completed Stage '${name}'" - } catch (error){ - notifyStageStatus(name, 'FAILURE') - echo "${stackTraceAsString(error)}" - def inputAction = input( - message: "This step (${name}) has failed. See related messages.", - ok: 'Confirm', - parameters: [ - choice( - name: 'action', - choices: 'Re-run\nIgnore', - description: 'What would you like to do?' - ) - ] - ) - if ('Ignore'.equalsIgnoreCase(inputAction)){ - isDone=true - } - } - return isDone - } - } - } - } -} - -// Functional test script -// Can be limited by assinging toTest var -def unitTestDjango (String stageName, String envProject, String envSuffix) { - _openshift(env.STAGE_NAME, envProject) { - def DB_target = envSuffix == "staging" ? "${appName}-pg12-${envSuffix}" : "${appName}-pg12-${envSuffix}-${prNumber}" - def DB_newVersion = openshift.selector("dc", "${DB_target}").object().status.latestVersion - def DB_pod = openshift.selector('pod', [deployment: "${DB_target}-${DB_newVersion}"]) - echo "Temporarily granting elevated DB rights" - echo DB_target - def db_ocoutput_grant = openshift.exec( - DB_pod.objects()[0].metadata.name, - "--", - "bash -c '\ - psql -c \"ALTER USER \\\"\${PG_USER}\\\" WITH SUPERUSER;\" \ - '" - ) - echo "Temporary DB grant results: "+ db_ocoutput_grant.actions[0].out - - def target = envSuffix == "staging" ? "${appName}-${envSuffix}" : "${appName}-${envSuffix}-${prNumber}" - def newVersion = openshift.selector("dc", "${target}").object().status.latestVersion - def pods = openshift.selector('pod', [deployment: "${target}-${newVersion}"]) - - // Wait here and make sure the app pods are ready before running unit tests. - // We wait for both pods to be ready so that we can execute the test command - // on either one, without having to check which one was ready first. - timeout(15) { - pods.untilEach(2) { - return it.object().status.containerStatuses.every { - it.ready - } - } - } - - echo "Running Django unit tests" - def ocoutput = openshift.exec( - pods.objects()[0].metadata.name, - "--", - "bash -c '\ - cd /opt/app-root/src/backend; \ - python manage.py test \ - '" - ) - echo "Django test results: "+ ocoutput.actions[0].out - - echo "Revoking ADMIN rights" - def db_ocoutput_revoke = openshift.exec( - DB_pod.objects()[0].metadata.name, - "--", - "bash -c '\ - psql -c \"ALTER USER \\\"\${PG_USER}\\\" WITH NOSUPERUSER;\" \ - '" - ) - echo "DB Revocation results: "+ db_ocoutput_revoke.actions[0].out - } -} - - -// API test function -def apiTest (String stageName, String stageUrl, String envSuffix) { - _openshift(env.STAGE_NAME, toolsProject) { - podTemplate( - label: "nodejs-${appName}-${envSuffix}-${prNumber}", - name: "nodejs-${appName}-${envSuffix}-${prNumber}", - serviceAccount: 'jenkins', - cloud: 'openshift', - activeDeadlineSeconds: 1800, - containers: [ - containerTemplate( - name: 'jnlp', - image: 'registry.access.redhat.com/openshift3/jenkins-agent-nodejs-8-rhel7', - resourceRequestCpu: '500m', - resourceLimitCpu: '800m', - resourceRequestMemory: '512Mi', - resourceLimitMemory: '1Gi', - activeDeadlineSeconds: '600', - podRetention: 'never', - workingDir: '/tmp', - command: '', - args: '${computer.jnlpmac} ${computer.name}', - envVars: [ - envVar( - key:'BASE_URL', - value: "https://${stageUrl}/gwells" - ), - secretEnvVar( - key: 'GWELLS_API_TEST_USER', - secretName: 'apitest-secrets', - secretKey: 'username' - ), - secretEnvVar( - key: 'GWELLS_API_TEST_PASSWORD', - secretName: 'apitest-secrets', - secretKey: 'password' - ), - secretEnvVar( - key: 'GWELLS_API_TEST_AUTH_SERVER', - secretName: 'apitest-secrets', - secretKey: 'auth_server' - ), - secretEnvVar( - key: 'GWELLS_API_TEST_CLIENT_ID', - secretName: 'apitest-secrets', - secretKey: 'client_id' - ), - secretEnvVar( - key: 'GWELLS_API_TEST_CLIENT_SECRET', - secretName: 'apitest-secrets', - secretKey: 'client_secret' - ) - ] - ) - ] - ) { - node("nodejs-${appName}-${envSuffix}-${prNumber}") { - checkout scm - dir('tests/api-tests') { - sh 'npm install -g newman@4.6.1' - try { - sh """ - newman run ./registries_api_tests.json \ - --global-var test_user=\$GWELLS_API_TEST_USER \ - --global-var test_password=\$GWELLS_API_TEST_PASSWORD \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./registries_v2_api_tests.json \ - --global-var test_user=\$GWELLS_API_TEST_USER \ - --global-var test_password=\$GWELLS_API_TEST_PASSWORD \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./wells_api_tests.json \ - --global-var test_user=\$GWELLS_API_TEST_USER \ - --global-var test_password=\$GWELLS_API_TEST_PASSWORD \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./wells_v2_api_tests.json \ - --global-var test_user=\$GWELLS_API_TEST_USER \ - --global-var test_password=\$GWELLS_API_TEST_PASSWORD \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./submissions_api_tests.json \ - --global-var test_user=\$GWELLS_API_TEST_USER \ - --global-var test_password=\$GWELLS_API_TEST_PASSWORD \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./submissions_v2_api_tests.json \ - --global-var test_user=\$GWELLS_API_TEST_USER \ - --global-var test_password=\$GWELLS_API_TEST_PASSWORD \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./aquifers_api_tests.json \ - --global-var test_user=\$GWELLS_API_TEST_USER \ - --global-var test_password=\$GWELLS_API_TEST_PASSWORD \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./aquifers_v2_api_tests.json \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./cities_api_tests.json \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./configuration_api_tests.json \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./utilities_api_tests.json \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - """ - - if ("dev".equalsIgnoreCase("${envSuffix}")) { - sh """ - newman run ./wells_search_api_tests.json \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./wells_search_v2_api_tests.json \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./exports_api_tests.json \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - """ - } - - } finally { - junit 'newman/*.xml' - publishHTML ( - target: [ - allowMissing: false, - alwaysLinkToLastBuild: false, - keepAll: true, - reportDir: 'newman', - reportFiles: 'newman*.html', - reportName: "API Test Report" - ] - ) - stash includes: 'newman/*.xml', name: 'api-tests' - } - } - } - } - } - return true -} - - -def zapTests (String stageName, String envUrl, String envSuffix) { - _openshift(env.STAGE_NAME, toolsProject) { - def podName = envSuffix == "dev" ? "zap-${envSuffix}-${prNumber}" : "zap-${envSuffix}" - podTemplate( - label: "${podName}", - name: "${podName}", - serviceAccount: "jenkins", - cloud: "openshift", - containers: [ - containerTemplate( - name: 'jnlp', - image: 'docker-registry.default.svc:5000/openshift/jenkins-slave-zap', - resourceRequestCpu: '1', - resourceLimitCpu: '1', - resourceRequestMemory: '2Gi', - resourceLimitMemory: '2Gi', - activeDeadlineSeconds: '600', - workingDir: '/home/jenkins', - command: '', - args: '${computer.jnlpmac} ${computer.name}', - envVars: [ - envVar( - key:'BASE_URL', - value: "https://${envUrl}/gwells" - ) - ] - ) - ] - ) { - node("${podName}") { - checkout scm - sh ( - script: "/zap/zap-baseline.py -r index.html -t $BASE_URL", - returnStatus: true - ) - - publishHTML( - target: [ - allowMissing: false, - alwaysLinkToLastBuild: false, - keepAll: true, - reportDir: '/zap/wrk', - reportFiles: 'index.html', - reportName: 'ZAP Baseline Scan', - reportTitles: 'ZAP Baseline Scan' - ] - ) - } - } - } - return true -} - - -// Database backup -def dbBackup (String envProject, String envSuffix) { - def dcName = envSuffix == "dev" ? "${appName}-pg12-${envSuffix}-${prNumber}" : "${appName}-pg12-${envSuffix}" - def dumpDir = "/pgdata/deployment-backups" - def dumpName = "${envSuffix}-\$( date +%Y-%m-%d-%H%M ).dump" - def dumpOpts = "--no-privileges --no-tablespaces --schema=public --exclude-table=spatial_ref_sys" - def dumpTemp = "/tmp/unverified.dump" - int maxBackups = 10 - - // Dump to temporary file - sh "oc rsh -n ${envProject} dc/${dcName} bash -c ' \ - pg_dump -U \${PG_USER} -d \${PG_DATABASE} -Fc -f ${dumpTemp} ${dumpOpts} \ - '" - - // Verify dump size is at least 1M - int sizeAtLeast1M = sh ( - script: "oc rsh -n ${envProject} dc/${dcName} bash -c ' \ - du --threshold=1M ${dumpTemp} | wc -l \ - '", - returnStdout: true - ) - assert sizeAtLeast1M == 1 - - // Restore (schema only, w/ extensions) to temporary db - // note: command needs to be updated. - // See Jira ticket WATER-1163. - // sh """ - // oc rsh -n ${envProject} dc/${dcName} bash -c ' \ - // set -e; \ - // psql -c "DROP DATABASE IF EXISTS db_verify"; \ - // createdb db_verify; \ - // psql -d db_verify -c "CREATE EXTENSION IF NOT EXISTS postgis;"; \ - // psql -d db_verify -c "COMMIT;"; \ - // pg_restore -U postgres -d db_verify -e --schema-only ${dumpTemp}; \ - // psql -c "DROP DATABASE IF EXISTS db_verify" - // ' - // """ - - // Store verified dump - sh "oc rsh -n ${envProject} dc/${dcName} bash -c ' \ - mkdir -p ${dumpDir}; \ - mv ${dumpTemp} ${dumpDir}/${dumpName}; \ - ls -lh ${dumpDir} \ - '" - - // Database purge - sh "oc rsh -n ${envProject} dc/${dcName} bash -c \" \ - find ${dumpDir} -name *.dump -printf '%Ts\t%p\n' \ - | sort -nr | cut -f2 | tail -n +${maxBackups} | xargs rm 2>/dev/null \ - || echo 'No extra backups to remove' \ - \"" -} - -pipeline { - environment { - // Project-wide settings - app name, repo - appName = "gwells" - repository = 'https://www.github.com/bcgov/gwells.git' - - // prNumber is the pull request number e.g. 'pr-4' - prNumber = "${env.JOB_BASE_NAME}".toLowerCase() - - // toolsProject is where images are built - toolsProject = "moe-gwells-tools" - - // devProject is the project where individual development environments are spun up - devProject = "moe-gwells-dev" - devSuffix = "dev" - devAppName = "${appName}-${devSuffix}-${prNumber}" - devHost = "${devAppName}.pathfinder.gov.bc.ca" - - // stagingProject contains the test deployment. The test image is a candidate for promotion to prod. - stagingProject = "moe-gwells-test" - stagingSuffix = "staging" - stagingHost = "gwells-staging.pathfinder.gov.bc.ca" - - // prodProject is the prod deployment. - // TODO: New production images can be deployed by tagging an existing "test" image as "prod". - prodProject = "moe-gwells-prod" - prodSuffix = "production" - prodHost = "gwells-prod.pathfinder.gov.bc.ca" - - // name of the provisioned PVC claim for NFS backup storage - // this will not be created during the pipeline; it must be created - // before running the production pipeline. - nfsProdBackupPVC = "bk-moe-gwells-prod-0z6f0qq0k2fz" - nfsStagingBackupPVC = "bk-moe-gwells-test-dcog9cfksxat" - - // name of the PVC where documents are stored (e.g. Minio PVC) - // this should be the same across all environments. - minioDataPVC = "minio-data-vol" - } - agent any - stages { - // the Start Pipeline stage will process and apply OpenShift build templates which will create - // buildconfigs and an imagestream for built images. - // each pull request gets its own buildconfig but all new builds are pushed to a single imagestream, - // to be tagged with the pull request number. - // e.g.: gwells-app:pr-999 - stage('ALL - Prepare Templates') { - steps { - script { - echo "Cancelling previous builds..." - timeout(10) { - abortAllPreviousBuildInProgress(currentBuild) - } - echo "Previous builds cancelled" - - _openshift(env.STAGE_NAME, toolsProject) { - // - variable substitution - def buildtemplate = openshift.process("-f", - "openshift/backend.bc.json", - "ENV_NAME=${devSuffix}", - "NAME_SUFFIX=-${devSuffix}-${prNumber}", - "APP_IMAGE_TAG=${prNumber}", - "SOURCE_REPOSITORY_URL=${repository}", - "SOURCE_REPOSITORY_REF=pull/${CHANGE_ID}/head" - ) - - // Apply oc list objects - // - add docker image reference as tag in gwells-application - // - create build config - echo "Preparing backend imagestream and buildconfig" - echo " \$ oc process -f openshift/backend.bc.json -p ENV_NAME=${devSuffix} -p NAME_SUFFIX=-${devSuffix}-${prNumber} -p APP_IMAGE_TAG=${prNumber} -p SOURCE_REPOSITORY_URL=${REPOSITORY} -p SOURCE_REPOSITORY_REF=pull/${CHANGE_ID}/head | oc apply -n moe-gwells-tools -f -" - openshift.apply(buildtemplate) - } - } - } - } - - - // the Build stage builds files; an image will be outputted to the app's imagestream, - // using the source-to-image (s2i) strategy. See /app/.s2i/assemble for image build script - stage('ALL - Build') { - steps { - script { - _openshift(env.STAGE_NAME, toolsProject) { - echo "Running unit tests and building images..." - echo "This may take several minutes. Logs are not forwarded to Jenkins by default (at this time)." - echo "Additional logs can be found by monitoring builds in ${toolsProject}" - - // Select appropriate buildconfig - def appBuild = openshift.selector("bc", "${devAppName}") - echo "Building" - echo " \$ oc start-build -n moe-gwells-tools ${devAppName} --wait --follow=true" - appBuild.startBuild("--wait").logs("-f") - } - } - } - } - - // the Deploy to Dev stage creates a new dev environment for the pull request (if necessary), tagging - // the newly built application image into that environment. This stage monitors the newest deployment - // for pods/containers to report back as ready. - stage('DEV - Deploy') { - when { - expression { env.CHANGE_TARGET != 'master' } - } - steps { - script { - _openshift(env.STAGE_NAME, devProject) { - // Process postgres deployment config (sub in vars, create list items) - echo "Processing database deployment" - def deployDBTemplate = openshift.process("-f", - "openshift/postgresql.dc.yml", - "DATABASE_SERVICE_NAME=gwells-pg12-${devSuffix}-${prNumber}", - "IMAGE_STREAM_NAMESPACE=${devProject}", - "IMAGE_STREAM_NAME=crunchy-postgres-gis", - "IMAGE_STREAM_VERSION=centos7-12.2-4.2.2", - "NAME_SUFFIX=-${devSuffix}-${prNumber}", - "POSTGRESQL_DATABASE=gwells", - "VOLUME_CAPACITY=1Gi", - "STORAGE_CLASS=netapp-file-standard", - "REQUEST_CPU=200m", - "REQUEST_MEMORY=512Mi", - "LIMIT_CPU=500m", - "LIMIT_MEMORY=1Gi" - ) - - // Process postgres deployment config (sub in vars, create list items) - echo "Processing deployment config for pull request ${prNumber}" - def deployTemplate = openshift.process("-f", - "openshift/backend.dc.json", - "ENV_NAME=${devSuffix}", - "HOST=${devHost}", - "NAME_SUFFIX=-${devSuffix}-${prNumber}" - ) - - echo "Processing deployment config for tile server" - def pgtileservTemplate = openshift.process("-f", - "openshift/pg_tileserv/pg_tileserv.dc.yaml", - "NAME_SUFFIX=-${devSuffix}-${prNumber}", - "DATABASE_SERVICE_NAME=gwells-pg12-${devSuffix}-${prNumber}", - "IMAGE_TAG=20200610", - "HOST=${devHost}", - ) - - // proxy to reroute traffic to OCP4 - echo "Processing deployment config for OCP3/OCP4 proxy" - def maintProxyTemplate = openshift.process("-f", - "openshift/maintenance.dc.yaml", - "NAME_SUFFIX=-${devSuffix}-${prNumber}", - "SOURCE_HOST_NAME=gwells-${devSuffix}-${prNumber}-maintenance.pathfinder.gov.bc.ca", - "DESTINATION_HOST_NAME=gwells-${devSuffix}-${prNumber}.apps.silver.devops.gov.bc.ca", - ) - - // some objects need to be copied from a base secret or configmap - // these objects have an annotation "as-copy-of" in their object spec (e.g. an object in backend.dc.json) - echo "Creating configmaps and secrets objects" - List newObjectCopies = [] - - for (o in (deployTemplate + deployDBTemplate)) { - - // only perform this operation on objects with 'as-copy-of' - def sourceName = o.metadata && o.metadata.annotations && o.metadata.annotations['as-copy-of'] - if (sourceName && sourceName.length() > 0) { - def selector = openshift.selector("${o.kind}/${sourceName}") - if (selector.count() == 1) { - - // create a copy of the object and add it to the new list of objects to be applied - Map copiedModel = selector.object(exportable:true) - copiedModel.metadata.name = o.metadata.name - echo "[as-copy-of] Copying ${o.kind} ${o.metadata.name}" - newObjectCopies.add(copiedModel) - } - } - } - - echo "Applying deployment config for pull request ${prNumber} on ${devProject}" - - // apply the templates, which will create new objects or modify existing ones as necessary. - // the copies of base objects (secrets, configmaps) are also applied. - openshift.apply(pgtileservTemplate).label(['app':"${devAppName}", 'app-name':"${appName}", 'env-name':"${devSuffix}"], "--overwrite") - openshift.apply(maintProxyTemplate).label(['app':"${devAppName}", 'app-name':"${appName}", 'env-name':"${devSuffix}"], "--overwrite") - - openshift.apply(deployTemplate).label(['app':"${devAppName}", 'app-name':"${appName}", 'env-name':"${devSuffix}"], "--overwrite") - openshift.apply(deployDBTemplate).label(['app':"${devAppName}", 'app-name':"${appName}", 'env-name':"${devSuffix}"], "--overwrite") - openshift.apply(newObjectCopies).label(['app':"${devAppName}", 'app-name':"${appName}", 'env-name':"${devSuffix}"], "--overwrite") - echo "Successfully applied deployment configs for ${prNumber}" - - // promote the newly built image to DEV - echo "Tagging new image to DEV imagestream." - openshift.tag("${toolsProject}/gwells-application:${prNumber}", "${devProject}/${devAppName}:dev") // todo: clean up labels/tags - - // post a notification to Github that this pull request is being deployed - createDeploymentStatus(devSuffix, 'PENDING', devHost) - - // monitor the deployment status and wait until deployment is successful - echo "Waiting for deployment to dev..." - def newVersion = openshift.selector("dc", "${devAppName}").object().status.latestVersion - def pods = openshift.selector('pod', [deployment: "${devAppName}-${newVersion}"]) - - // wait until each container in this deployment's pod reports as ready - timeout(15) { - pods.untilEach(2) { - return it.object().status.containerStatuses.every { - it.ready - } - } - } - - def pgtileservVersion = openshift.selector("dc", "pgtileserv-${devSuffix}-${prNumber}").object().status.latestVersion - def pgtileservPods = openshift.selector('pod', [deployment: "pgtileserv-${devSuffix}-${prNumber}-${newVersion}"]) - - // wait until each container in this deployment's pod reports as ready - timeout(15) { - pods.untilEach(2) { - return it.object().status.containerStatuses.every { - it.ready - } - } - } - - - - // Report a pass to GitHub - createDeploymentStatus(devSuffix, 'SUCCESS', devHost) - } - } - } - } - - // the Django Unit Tests stage runs backend unit tests using a test DB that is - // created and destroyed afterwards. - stage('DEV - Django Unit Tests') { - when { - expression { env.CHANGE_TARGET != 'master' } - } - steps { - script { - def result = unitTestDjango (env.STAGE_NAME, devProject, devSuffix) - } - } - } - - - stage('DEV - Load Fixtures') { - when { - expression { env.CHANGE_TARGET != 'master' } - } - steps { - script { - _openshift(env.STAGE_NAME, devProject) { - def newVersion = openshift.selector("dc", "${devAppName}").object().status.latestVersion - def pods = openshift.selector('pod', [deployment: "${devAppName}-${newVersion}"]) - - echo "Loading fixtures" - def ocoutput = openshift.exec( - pods.objects()[0].metadata.name, - "--", - "bash -c '\ - cd /opt/app-root/src/backend; \ - ./load_fixtures.sh all \ - '" - ) - echo "Load Fixtures results: "+ ocoutput.actions[0].out - - openshift.exec( - pods.objects()[0].metadata.name, - "--", - "bash -c '\ - cd /opt/app-root/src/backend; \ - python manage.py createinitialrevisions \ - '" - ) - } - } - } - } - - - stage('DEV - API Tests') { - when { - expression { env.CHANGE_TARGET != 'master' } - } - steps { - script { - def result = apiTest ('DEV - API Tests', devHost, devSuffix) - } - } - } - } -} diff --git a/gwells/Jenkinsfile.ocp4 b/gwells/Jenkinsfile.ocp4 deleted file mode 100644 index 8c5914e6e..000000000 --- a/gwells/Jenkinsfile.ocp4 +++ /dev/null @@ -1,1477 +0,0 @@ -#!groovy - -import groovy.json.JsonOutput -import bcgov.GitHubHelper - - -// Notify stage status and pass to Jenkins-GitHub library -void notifyStageStatus (String name, String status) { - GitHubHelper.createCommitStatus( - this, - GitHubHelper.getPullRequestLastCommitId(this), - status, - "${env.BUILD_URL}", - "Stage '${name}'", - "Stage: ${name}" - ) -} - - -// Create deployment status and pass to Jenkins-GitHub library -void createDeploymentStatus (String suffix, String status, String stageUrl) { - def ghDeploymentId = new GitHubHelper().createDeployment( - this, - "pull/${env.CHANGE_ID}/head", - [ - 'environment':"${suffix}", - 'task':"deploy:pull:${env.CHANGE_ID}" - ] - ) - - // NOTE: this function in GitHubHelper no longer works. - // https://github.com/BCDevOps/jenkins-pipeline-shared-lib/issues/6 - // TODO: convert to use GitHub REST API - // https://docs.github.com/en/rest/reference/repos#deployments - - new GitHubHelper().createDeploymentStatus( - this, - ghDeploymentId, - "${status}", - ['targetUrl':"https://${stageUrl}/gwells"] - ) - - if ('SUCCESS'.equalsIgnoreCase("${status}")) { - echo "${suffix} deployment successful!" - } else if ('PENDING'.equalsIgnoreCase("${status}")){ - echo "${suffix} deployment pending." - } -} - - -// Print stack trace of error -@NonCPS -private static String stackTraceAsString(Throwable t) { - StringWriter sw = new StringWriter(); - t.printStackTrace(new PrintWriter(sw)); - return sw.toString() -} - - -// OpenShift wrapper -def _openshift(String name, String project, Closure body) { - script { - openshift.withCluster() { - openshift.withProject(project) { - echo "Running Stage '${name}'" - waitUntil { - notifyStageStatus (name, 'PENDING') - boolean isDone=false - try { - body() - isDone=true - notifyStageStatus(name, 'SUCCESS') - echo "Completed Stage '${name}'" - } catch (error){ - notifyStageStatus(name, 'FAILURE') - echo "${stackTraceAsString(error)}" - def inputAction = input( - message: "This step (${name}) has failed. See related messages.", - ok: 'Confirm', - parameters: [ - choice( - name: 'action', - choices: 'Re-run\nIgnore', - description: 'What would you like to do?' - ) - ] - ) - if ('Ignore'.equalsIgnoreCase(inputAction)){ - isDone=true - } - } - return isDone - } - } - } - } -} - -// Functional test script -// Can be limited by assinging toTest var -def unitTestDjango (String stageName, String envProject, String envSuffix) { - _openshift(env.STAGE_NAME, envProject) { - def DB_target = envSuffix == "staging" ? "${appName}-pg12-${envSuffix}" : "${appName}-pg12-${envSuffix}-${prNumber}" - def DB_newVersion = openshift.selector("dc", "${DB_target}").object().status.latestVersion - def DB_pod = openshift.selector('pod', [deployment: "${DB_target}-${DB_newVersion}"]) - echo "Temporarily granting elevated DB rights" - echo DB_target - - - - sh "oc rsh -n ${envProject} dc/${DB_target} bash -c ' \ - psql -c \"ALTER USER \\\"\${PG_USER}\\\" WITH SUPERUSER;\" \ - '" - - def target = envSuffix == "staging" ? "${appName}-${envSuffix}" : "${appName}-${envSuffix}-${prNumber}" - def newVersion = openshift.selector("dc", "${target}").object().status.latestVersion - def pods = openshift.selector('pod', [deployment: "${target}-${newVersion}"]) - - // Wait here and make sure the app pods are ready before running unit tests. - // We wait for both pods to be ready so that we can execute the test command - // on either one, without having to check which one was ready first. - timeout(15) { - pods.untilEach(2) { - return it.object().status.containerStatuses.every { - it.ready - } - } - } - - - echo "Running Django unit tests" - def ocoutput = openshift.exec( - pods.objects()[0].metadata.name, - "--", - "bash -c '\ - cd \${APP_SOURCE_DIR:-\"\${APP_ROOT}/src\"}/backend; \ - python manage.py test --noinput \ - '" - ) - echo "Django test results: "+ ocoutput.actions[0].out - - echo "Revoking ADMIN rights" - sh "oc rsh -n ${envProject} dc/${DB_target} bash -c ' \ - psql -c \"ALTER USER \\\"\${PG_USER}\\\" WITH NOSUPERUSER;\" \ - '" - } -} - - -// API test function -def apiTest (String stageName, String stageUrl, String envSuffix) { - - _openshift(env.STAGE_NAME, toolsProject) { - podTemplate( - label: "nodejs-${appName}-${envSuffix}-${prNumber}", - name: "nodejs-${appName}-${envSuffix}-${prNumber}", - serviceAccount: 'jenkins', - cloud: 'openshift', - activeDeadlineSeconds: 1800, - containers: [ - containerTemplate( - name: 'jnlp', - image: 'registry.access.redhat.com/openshift3/jenkins-agent-nodejs-8-rhel7', - resourceRequestCpu: '500m', - resourceLimitCpu: '800m', - resourceRequestMemory: '512Mi', - resourceLimitMemory: '1Gi', - activeDeadlineSeconds: '600', - podRetention: 'never', - workingDir: '/tmp', - command: '', - args: '${computer.jnlpmac} ${computer.name}', - envVars: [ - envVar( - key:'BASE_URL', - value: "https://${stageUrl}/gwells" - ), - secretEnvVar( - key: 'GWELLS_API_TEST_AUTH_SERVER', - secretName: 'apitest-secrets', - secretKey: 'auth_server' - ), - secretEnvVar( - key: 'GWELLS_API_TEST_CLIENT_ID', - secretName: 'apitest-secrets', - secretKey: 'client_id' - ), - secretEnvVar( - key: 'GWELLS_API_TEST_CLIENT_SECRET', - secretName: 'apitest-secrets', - secretKey: 'client_secret' - ) - ] - ) - ] - ) { - node("nodejs-${appName}-${envSuffix}-${prNumber}") { - checkout scm - dir('tests/api-tests') { - sh 'npm install -g newman@4.6.1' - try { - sh """ - newman run ./registries_api_tests.json \ - --color on \ - --disable-unicode \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./registries_v2_api_tests.json \ - --color on \ - --disable-unicode \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./wells_api_tests.json \ - --color on \ - --disable-unicode \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./wells_v2_api_tests.json \ - --color on \ - --disable-unicode \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./submissions_api_tests.json \ - --color on \ - --disable-unicode \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./submissions_v2_api_tests.json \ - --color on \ - --disable-unicode \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./aquifers_api_tests.json \ - --color on \ - --disable-unicode \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./aquifers_v2_api_tests.json \ - --color on \ - --disable-unicode \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./cities_api_tests.json \ - --color on \ - --disable-unicode \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./configuration_api_tests.json \ - --color on \ - --disable-unicode \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./utilities_api_tests.json \ - --color on \ - --disable-unicode \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - """ - - if ("dev".equalsIgnoreCase("${envSuffix}")) { - sh """ - newman run ./wells_search_api_tests.json \ - --color on \ - --disable-unicode \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./wells_search_v2_api_tests.json \ - --color on \ - --disable-unicode \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - newman run ./exports_api_tests.json \ - --color on \ - --disable-unicode \ - --global-var base_url=\$BASE_URL \ - --global-var auth_server=\$GWELLS_API_TEST_AUTH_SERVER \ - --global-var client_id=\$GWELLS_API_TEST_CLIENT_ID \ - --global-var client_secret=\$GWELLS_API_TEST_CLIENT_SECRET \ - -r cli,junit,html - """ - } - - } finally { - junit 'newman/*.xml' - publishHTML ( - target: [ - allowMissing: false, - alwaysLinkToLastBuild: false, - keepAll: true, - reportDir: 'newman', - reportFiles: 'newman*.html', - reportName: "API Test Report" - ] - ) - stash includes: 'newman/*.xml', name: 'api-tests' - } - } - } - } - } - return true -} - -def deployToDev() { - // Process postgres deployment config (sub in vars, create list items) - echo "Processing database deployment (using folder ${templateDir}" - def deployDBTemplate = openshift.process("-f", - "${templateDir}/postgresql.dc.yml", - "DATABASE_SERVICE_NAME=gwells-pg12-${devSuffix}-${prNumber}", - "IMAGE_STREAM_NAMESPACE=${devProject}", - "IMAGE_STREAM_NAME=crunchy-postgres-gis", - "NAME_SUFFIX=-${devSuffix}-${prNumber}", - "POSTGRESQL_DATABASE=gwells", - "VOLUME_CAPACITY=1Gi", - "STORAGE_CLASS=netapp-file-standard", - "REQUEST_CPU=200m", - "REQUEST_MEMORY=512Mi", - "LIMIT_CPU=500m", - "LIMIT_MEMORY=1Gi" - ) - - // Process postgres deployment config (sub in vars, create list items) - echo "Processing deployment config for pull request ${prNumber}" - def deployTemplate = openshift.process("-f", - "${templateDir}/backend.dc.json", - "ENV_NAME=${devSuffix}", - "HOST=${devHost}", - "NAME_SUFFIX=-${devSuffix}-${prNumber}" - ) - - echo "Processing deployment config for tile server" - def pgtileservTemplate = openshift.process("-f", - "${templateDir}/pg_tileserv/pg_tileserv.dc.yaml", - "NAME_SUFFIX=-${devSuffix}-${prNumber}", - "DATABASE_SERVICE_NAME=gwells-pg12-${devSuffix}-${prNumber}", - "HOST=${devHost}", - ) - - echo "Processing Minio deployment config" - def minioTemplate = openshift.process("-f", - "${templateDir}/minio/minio.dc.yaml", - "NAME_SUFFIX=-${devSuffix}-${prNumber}", - "HOSTNAME=gwells-docs-${devSuffix}-${prNumber}.apps.silver.devops.gov.bc.ca", - "SRC_TAG=dev" - ) - - // some objects need to be copied from a base secret or configmap - // these objects have an annotation "as-copy-of" in their object spec (e.g. an object in backend.dc.json) - echo "Creating configmaps and secrets objects" - List newObjectCopies = [] - - for (o in (deployTemplate + deployDBTemplate)) { - // only perform this operation on objects with 'as-copy-of' - def sourceName = (o.metadata && o.metadata.annotations && o.metadata.annotations['as-copy-of']) ? o.metadata.annotations['as-copy-of'] : false - if (sourceName && sourceName.length() > 0) { - def selector = openshift.selector("${o.kind}/${sourceName}") - if (selector.count() == 1) { - - // create a copy of the object and add it to the new list of objects to be applied - Map copiedModel = selector.object() - copiedModel.metadata.name = o.metadata.name - copiedModel.metadata.remove('annotations') - copiedModel.metadata.remove('creationTimestamp') - copiedModel.metadata.remove('resourceVersion') - copiedModel.metadata.remove('selfLink') - copiedModel.metadata.remove('uid') - - // set Minio host for dev environments - if (sourceName == 'gwells-global-config') { - copiedModel.data['S3_PRIVATE_HOST'] = "gwells-docs-${devSuffix}-${prNumber}.apps.silver.devops.gov.bc.ca" - } - if (sourceName == 'gwells-minio-secrets') { - copiedModel.data.remove('S3_HOST') - copiedModel.stringData = [:] - copiedModel.stringData['S3_HOST'] = "gwells-docs-${devSuffix}-${prNumber}.apps.silver.devops.gov.bc.ca" - } - - echo "[as-copy-of] Copying ${o.kind} ${o.metadata.name}" - newObjectCopies.add(copiedModel) - - - - } - } - } - - echo "Applying deployment configs for pull request ${prNumber} on ${devProject}" - - // apply the templates, which will create new objects or modify existing ones as necessary. - // the copies of base objects (secrets, configmaps) are also applied. - openshift.apply(pgtileservTemplate).label(['app':"${devAppName}", 'app-name':"${appName}", 'env-name':"${devSuffix}"], "--overwrite") - openshift.apply(minioTemplate).label(['app':"${devAppName}", 'app-name':"${appName}", 'env-name':"${devSuffix}"], "--overwrite") - openshift.apply(deployTemplate).label(['app':"${devAppName}", 'app-name':"${appName}", 'env-name':"${devSuffix}"], "--overwrite") - openshift.apply(deployDBTemplate).label(['app':"${devAppName}", 'app-name':"${appName}", 'env-name':"${devSuffix}"], "--overwrite") - openshift.apply(newObjectCopies).label(['app':"${devAppName}", 'app-name':"${appName}", 'env-name':"${devSuffix}"], "--overwrite") - echo "Successfully applied deployment configs for ${prNumber}" - - // promote the newly built image to DEV - echo "Tagging new image to DEV imagestream." - openshift.tag("${toolsProject}/gwells-application:${prNumber}", "${devProject}/${devAppName}:dev") // todo: clean up labels/tags - - // post a notification to Github that this pull request is being deployed - createDeploymentStatus(devSuffix, 'PENDING', devHost) - - // monitor the deployment status and wait until deployment is successful - echo "Waiting for deployment to dev..." - - // wait until each container in this deployment's pod reports as ready - timeout(15) { - openshift.selector("dc", "${devAppName}").rollout().status() - openshift.selector("dc", "pgtileserv-${devSuffix}-${prNumber}").rollout().status() - } - // Report a pass to GitHub - createDeploymentStatus(devSuffix, 'SUCCESS', devHost) - } - -def loadFixtures(String appName) { - // wait for deployment config to finish rolling out - openshift.selector("dc", "${appName}").rollout().status() - def pods = openshift.selector("dc", "${appName}").related("pods") - def podName = pods.objects()[0].metadata.name - echo "Loading fixtures using pod/${podName}" - // the dc rollout status above should be enough! - def waitStatus = openshift.raw('wait', '--for=condition=Ready', "pod/${podName}", '--timeout=300s') - echo "Wait for pod/${podName}: ${waitStatus.out}" - def ocoutput = openshift.exec( - podName, - "--", - "bash -c '\ - cd \${APP_SOURCE_DIR:-\"\${APP_ROOT}/src\"}/backend; \ - ./load_fixtures.sh all \ - '" - ) - echo "Load Fixtures results: "+ ocoutput.actions[0].out - - openshift.exec( - podName, - "--", - "bash -c '\ - cd \${APP_SOURCE_DIR:-\"\${APP_ROOT}/src\"}/backend; \ - python manage.py createinitialrevisions \ - '" - ) -} - -def zapTests (String stageName, String envUrl, String envSuffix) { - _openshift(env.STAGE_NAME, toolsProject) { - def podName = envSuffix == "dev" ? "zap-${envSuffix}-${prNumber}" : "zap-${envSuffix}" - podTemplate( - label: "${podName}", - name: "${podName}", - serviceAccount: "jenkins", - cloud: "openshift", - containers: [ - containerTemplate( - name: 'jnlp', - image: 'docker-registry.default.svc:5000/openshift/jenkins-slave-zap', - resourceRequestCpu: '1', - resourceLimitCpu: '1', - resourceRequestMemory: '2Gi', - resourceLimitMemory: '2Gi', - activeDeadlineSeconds: '600', - workingDir: '/home/jenkins', - command: '', - args: '${computer.jnlpmac} ${computer.name}', - envVars: [ - envVar( - key:'BASE_URL', - value: "https://${envUrl}/gwells" - ) - ] - ) - ] - ) { - node("${podName}") { - checkout scm - sh ( - script: "/zap/zap-baseline.py -r index.html -t $BASE_URL", - returnStatus: true - ) - - publishHTML( - target: [ - allowMissing: false, - alwaysLinkToLastBuild: false, - keepAll: true, - reportDir: '/zap/wrk', - reportFiles: 'index.html', - reportName: 'ZAP Baseline Scan', - reportTitles: 'ZAP Baseline Scan' - ] - ) - } - } - } - return true -} - - -// Database backup -def dbBackup (String envProject, String envSuffix) { - - - def dcName = envSuffix == "dev" ? "${appName}-pg12-${envSuffix}-${prNumber}" : "${appName}-pg12-${envSuffix}" - def dumpDir = "/pgdata/deployment-backups" - def dumpName = "${envSuffix}-\$( date +%Y-%m-%d-%H%M ).dump" - def dumpOpts = "--no-privileges --no-tablespaces --schema=public --exclude-table=spatial_ref_sys" - def dumpTemp = "/tmp/unverified.dump" - int maxBackups = 10 - - //Dump to temporary file - sh "oc rsh -n ${envProject} dc/${dcName} bash -c ' \ - pg_dump -U \${PG_USER} -d \${PG_DATABASE} -Fc -f ${dumpTemp} ${dumpOpts} \ - '" - - // Verify dump size is at least 1M - String sizeAtLeast1M = sh ( - script: "oc rsh -n ${envProject} dc/${dcName} bash -c ' \ - du --threshold=1M ${dumpTemp} | wc -l \ - '", - returnStdout: true - ) - assert sizeAtLeast1M.toInteger() == 1 - - // Store verified dump - sh "oc rsh -n ${envProject} dc/${dcName} bash -c ' \ - mkdir -p ${dumpDir}; \ - mv ${dumpTemp} ${dumpDir}/${dumpName}; \ - ls -lh ${dumpDir} \ - '" - - // Database purge - sh "oc rsh -n ${envProject} dc/${dcName} bash -c \" \ - find ${dumpDir} -name *.dump -printf '%Ts\t%p\n' \ - | sort -nr | cut -f2 | tail -n +${maxBackups} | xargs rm 2>/dev/null \ - || echo 'No extra backups to remove' \ - \"" -} - -pipeline { - triggers { - cron(env.BRANCH_NAME == 'PR-1800' ? '0 7 * * 1-5' : '') - } - options { - timestamps() - ansiColor('xterm') - } - environment { - // Project-wide settings - app name, repo - appName = "gwells" - repository = 'https://www.github.com/bcgov/gwells.git' - platformEnv = "4" - - platformDomain = "${platformEnv == '4' ? 'apps.silver.devops.gov.bc.ca' : 'pathfinder.gov.bc.ca'}" - - // prNumber is the pull request number e.g. 'pr-4' - prNumber = "${env.JOB_BASE_NAME}".toLowerCase() - - // toolsProject is where images are built - toolsProject = "${APP_TOOLS_NAMESPACE ?: "26e83e-tools"}" - - // devProject is the project where individual development environments are spun up - devProject = "${APP_DEV_NAMESPACE ?: "26e83e-dev"}" - devSuffix = "dev" - devAppName = "${appName}-${devSuffix}-${prNumber}" - devHost = "${devAppName}.${platformDomain}" - - // stagingProject contains the test deployment. The test image is a candidate for promotion to prod. - stagingProject = "${APP_STAGING_NAMESPACE ?: "26e83e-test"}" - stagingSuffix = "staging" - stagingHost = "gwells-staging.${platformDomain}" - - // prodProject is the prod deployment. - // TODO: New production images can be deployed by tagging an existing "test" image as "prod". - prodProject = "${APP_PROD_NAMESPACE ?: "26e83e-prod"}" - prodSuffix = "production" - prodSubdomain = "${platformEnv == '4' ? 'gwells' : 'gwells-prod'}" - prodHost = "${prodSubdomain}.${platformDomain}" - - // name of the provisioned PVC claim for NFS backup storage - // this will not be created during the pipeline; it must be created - // before running the production pipeline. - nfsProdBackupPVC = "gwells-backups" - nfsStagingBackupPVC = "gwells-backups" - - // name of the PVC where documents are stored (e.g. Minio PVC) - // this should be the same across all environments. - minioDataPVC = "minio-data-vol" - - - templateDir = "${platformEnv == '4' ? 'openshift/ocp4' : 'openshift' }" - - } - agent none - stages { - // the Start Pipeline stage will process and apply OpenShift build templates which will create - // buildconfigs and an imagestream for built images. - // each pull request gets its own buildconfig but all new builds are pushed to a single imagestream, - // to be tagged with the pull request number. - // e.g.: gwells-app:pr-999 - stage('ALL - Prepare Templates') { - agent { label 'build' } - steps { - script { - echo "Starting deployment to OCP platform: ${platformEnv}" - echo "Cancelling previous builds..." - timeout(10) { - abortAllPreviousBuildInProgress(currentBuild) - } - echo "Previous builds cancelled" - echo "Processing/applying template: ${templateDir}/backend.bc.json" - _openshift(env.STAGE_NAME, toolsProject) { - // - variable substitution - def buildtemplate = openshift.process("-f", - "${templateDir}/backend.bc.json", - "ENV_NAME=${devSuffix}", - "NAME_SUFFIX=-${devSuffix}-${prNumber}", - "APP_IMAGE_TAG=${prNumber}", - "SOURCE_REPOSITORY_URL=${repository}", - "SOURCE_REPOSITORY_REF=pull/${CHANGE_ID}/head" - ) - - // Apply oc list objects - // - add docker image reference as tag in gwells-application - // - create build config - echo "Preparing backend imagestream and buildconfig" - openshift.apply(buildtemplate) - } - } - } - } - - // the Build stage builds files; an image will be outputted to the app's imagestream, - // using the source-to-image (s2i) strategy. See /app/.s2i/assemble for image build script - stage('ALL - Build') { - agent { label 'build' } - steps { - script { - _openshift(env.STAGE_NAME, toolsProject) { - echo "Running unit tests and building images..." - echo "This may take several minutes. Logs are not forwarded to Jenkins by default (at this time)." - echo "Additional logs can be found by monitoring bc/${devAppName} in ${toolsProject}" - - // Select appropriate buildconfig - def appBuild = openshift.selector("bc", "${devAppName}") - echo "Canceling all existing/running builds" - appBuild.cancelBuild() - echo "Starting a new build" - def newBuildSelector = appBuild.startBuild() - echo "Build Started: ${newBuildSelector.names()}" - newBuildSelector.logs("-f") - echo "Build Ended: ${newBuildSelector.names()}" - - def buildStatus = "" - timeout(1) { - waitUntil { - buildStatus = newBuildSelector.object().status.phase - echo "Build Status: ${buildStatus}" - return buildStatus != "Running" - } - } - - if (newBuildSelector.object().status.phase != "Complete") { - error("Build ${newBuildSelector.names()} has failed!") - } - } - } - } - } - - // the Deploy to Dev stage creates a new dev environment for the pull request (if necessary), tagging - // the newly built application image into that environment. This stage monitors the newest deployment - // for pods/containers to report back as ready. - stage('DEV - Deploy') { - agent { label 'deploy' } - when { - beforeAgent true - expression { env.CHANGE_TARGET != 'master' } - } - steps { - script { - _openshift(env.STAGE_NAME, devProject) { - deployToDev() - } - } - } - } - - // the Django Unit Tests stage runs backend unit tests using a test DB that is - // created and destroyed afterwards. - stage('DEV - Django Unit Tests') { - agent { label 'test' } - when { - beforeAgent true - expression { env.CHANGE_TARGET != 'master' } - } - steps { - script { - def result = unitTestDjango (env.STAGE_NAME, devProject, devSuffix) - } - } - } - - stage('DEV - Load Fixtures') { - agent { label 'test' } - when { - beforeAgent true - expression { env.CHANGE_TARGET != 'master' } - } - steps { - script { - _openshift(env.STAGE_NAME, devProject) { - loadFixtures(devAppName) - } - } - } - } - - stage('DEV - API Tests') { - agent { label 'test' } - when { - beforeAgent true - expression { env.CHANGE_TARGET != 'master' } - } - steps { - script { - def result = apiTest ('DEV - API Tests', devHost, devSuffix) - } - } - } - - stage('STAGING - Backup') { - agent { label 'deploy' } - when { - beforeAgent true - expression { env.CHANGE_TARGET == 'master' } - } - steps { - script { - echo "backing up staging environment before deploying" - dbBackup (stagingProject, stagingSuffix) - } - } - } - - // the Promote to Test stage allows approving the tagging of the newly built image into the test environment, - // which will trigger an automatic deployment of that image. - // this stage should only occur when the pull request is being made against the master branch. - stage('STAGING - Deploy') { - agent { label 'deploy' } - when { - beforeAgent true - expression { env.CHANGE_TARGET == 'master' } - } - steps { - script { - _openshift(env.STAGE_NAME, stagingProject) { - echo "Preparing..." - - // Process db and app template into list objects - // TODO: Match docker-compose image from 26e83e-tools - echo "Updating staging deployment..." - def deployDBTemplate = openshift.process("-f", - "${templateDir}/postgresql.dc.yml", - "NAME_SUFFIX=-${stagingSuffix}", - "DATABASE_SERVICE_NAME=gwells-pg12-${stagingSuffix}", - "IMAGE_STREAM_NAMESPACE=${stagingProject}", - "IMAGE_STREAM_NAME=crunchy-postgres-gis", - "POSTGRESQL_DATABASE=gwells", - "VOLUME_CAPACITY=20Gi", - "STORAGE_CLASS=netapp-file-standard", - "REQUEST_CPU=400m", - "REQUEST_MEMORY=2Gi", - "LIMIT_CPU=400m", - "LIMIT_MEMORY=2Gi" - ) - - def deployTemplate = openshift.process("-f", - "${templateDir}/backend.dc.json", - "NAME_SUFFIX=-${stagingSuffix}", - "ENV_NAME=${stagingSuffix}", - "HOST=${stagingHost}", - "CPU_REQUEST=500m", - "CPU_LIMIT=2", - ) - - - echo "Processing deployment config for tile server" - def pgtileservTemplate = openshift.process("-f", - "${templateDir}/pg_tileserv/pg_tileserv.dc.yaml", - "NAME_SUFFIX=-${stagingSuffix}", - "DATABASE_SERVICE_NAME=gwells-pg12-${stagingSuffix}", - "IMAGE_TAG=20201112", - "HOST=${stagingHost}", - ) - - echo "Processing Minio deployment config" - def minioTemplate = openshift.process("-f", - "${templateDir}/minio/minio.dc.yaml", - "NAME_SUFFIX=-${stagingSuffix}", - "DEST_PVC_SIZE=10Gi", - "HOSTNAME=gwells-docs-${stagingSuffix}.apps.silver.devops.gov.bc.ca" - ) - - - echo "Processing backup volume config" - def backupVolConfig = openshift.process("-f", - "${templateDir}/backup.pvc.yaml", - "VOLUME_CAPACITY=20Gi", - "STORAGE_CLASS=netapp-file-backup" - ) - - // some objects need to be copied from a base secret or configmap - // these objects have an annotation "as-copy-of" in their object spec (e.g. an object in backend.dc.json) - echo "Creating configmaps and secrets objects" - List newObjectCopies = [] - - // todo: refactor to explicitly copy the objects we need - for (o in (deployTemplate + deployDBTemplate)) { - - // only perform this operation on objects with 'as-copy-of' - def sourceName = (o.metadata && o.metadata.annotations && o.metadata.annotations['as-copy-of']) ? o.metadata.annotations['as-copy-of'] : false - if (sourceName && sourceName.length() > 0) { - - def selector = openshift.selector("${o.kind}/${sourceName}") - if (selector.count() == 1) { - // create a copy of the object and add it to the new list of objects to be applied - Map copiedModel = selector.object() - copiedModel.metadata.remove('annotations') - copiedModel.metadata.remove('creationTimestamp') - copiedModel.metadata.remove('resourceVersion') - copiedModel.metadata.remove('selfLink') - copiedModel.metadata.remove('uid') - copiedModel.metadata.name = o.metadata.name - echo "Copying ${o.kind} ${o.metadata.name}" - newObjectCopies.add(copiedModel) - } - } - } - - openshift.apply(deployDBTemplate).label( - [ - 'app':"gwells-${stagingSuffix}", - 'app-name':"${appName}", - 'env-name':"${stagingSuffix}" - ], - "--overwrite" - ) - - // apply the templates, which will create new objects or modify existing ones as necessary. - // the copies of base objects (secrets, configmaps) are also applied. - echo "Applying deployment config for pull request ${prNumber} on ${stagingProject}" - - openshift.apply(pgtileservTemplate).label( - [ - 'app':"gwells-${stagingSuffix}", - 'app-name':"${appName}", - 'env-name':"${stagingSuffix}" - ], - "--overwrite" - ) - - openshift.apply(minioTemplate).label( - ['app':"gwells-${stagingSuffix}", 'app-name':"${appName}", 'env-name':"${stagingSuffix}"], - "--overwrite" - ) - - openshift.apply(deployTemplate).label( - [ - 'app':"gwells-${stagingSuffix}", - 'app-name':"${appName}", - 'env-name':"${stagingSuffix}" - ], - "--overwrite" - ) - - openshift.apply(backupVolConfig).label( - [ - 'app':"gwells-${stagingSuffix}", - 'app-name':"${appName}", - 'env-name':"${stagingSuffix}" - ], - "--overwrite" - ) - - openshift.apply(newObjectCopies).label( - [ - 'app':"gwells-${stagingSuffix}", - 'app-name':"${appName}", - 'env-name':"${stagingSuffix}" - ], - "--overwrite" - ) - - echo "Successfully applied TEST deployment config" - - // promote the newly built image to DEV - echo "Tagging new image to TEST imagestream." - - // Application/database images are tagged in the tools imagestream as the new test/prod image - openshift.tag( - "${toolsProject}/gwells-application:${prNumber}", - "${toolsProject}/gwells-application:${stagingSuffix}" - ) - - // Images are then tagged into the target environment namespace (test or prod) - openshift.tag( - "${toolsProject}/gwells-application:${stagingSuffix}", - "${stagingProject}/gwells-${stagingSuffix}:${stagingSuffix}" - ) // todo: clean up labels/tags - - createDeploymentStatus(stagingSuffix, 'PENDING', stagingHost) - - // Create cronjob for well export - def exportWellCronTemplate = openshift.process("-f", - "${templateDir}/jobs/export-databc/export.cj.json", - "ENV_NAME=${stagingSuffix}", - "PROJECT=${stagingProject}", - "TAG=${stagingSuffix}", - "NAME=export", - "COMMAND=export", - "SCHEDULE=30 3 * * *" - ) - openshift.apply(exportWellCronTemplate).label( - [ - 'app':"gwells-${stagingSuffix}", - 'app-name':"${appName}", - 'env-name':"${stagingSuffix}" - ], - "--overwrite" - ) - - // Create cronjob for licence import - def importLicencesCronjob = openshift.process("-f", - "${templateDir}/jobs/import-licences/import-licences.cj.json", - "ENV_NAME=${stagingSuffix}", - "PROJECT=${stagingProject}", - "TAG=${stagingSuffix}", - "NAME=licences", - "COMMAND=import_licences", - "SCHEDULE=40 3 * * *" - ) - openshift.apply(importLicencesCronjob).label( - [ - 'app':"gwells-${stagingSuffix}", - 'app-name':"${appName}", - 'env-name':"${stagingSuffix}" - ], - "--overwrite" - ) - - // Create cronjob for aquifer demand calc update - def importUpdateAquiferCronjob = openshift.process("-f", - "${templateDir}/jobs/update-aquifer/update-aquifer.cj.json", - "ENV_NAME=${stagingSuffix}", - "PROJECT=${stagingProject}", - "TAG=${stagingSuffix}", - "NAME=demand", - "COMMAND=update_demand", - "SCHEDULE=50 3 * * *" - ) - openshift.apply(importUpdateAquiferCronjob).label( - [ - 'app':"gwells-${stagingSuffix}", - 'app-name':"${appName}", - 'env-name':"${stagingSuffix}" - ], - "--overwrite" - ) - - - // Create cronjob for databc export - def exportDataBCTemplate = openshift.process("-f", - "${templateDir}/jobs/export-databc/export.cj.json", - "ENV_NAME=${stagingSuffix}", - "PROJECT=${stagingProject}", - "TAG=${stagingSuffix}", - "NAME=export-databc", - "COMMAND=export_databc", - "SCHEDULE=0 4 * * *" - ) - openshift.apply(exportDataBCTemplate).label( - [ - 'app':"gwells-${stagingSuffix}", - 'app-name':"${appName}", - 'env-name':"${stagingSuffix}" - ], - "--overwrite" - ) - - // automated minio backup - def docBackupCronjob = openshift.process("-f", - "${templateDir}/jobs/minio-backup/minio-backup.cj.yaml", - "NAME_SUFFIX=${stagingSuffix}", - "NAMESPACE=${stagingProject}", - "VERSION=v1.0.0", - "SCHEDULE=15 3 * * *", - "DEST_PVC=gwells-backups", - "PVC_SIZE=10Gi", - "SOURCE_PVC=gwells-minio-${stagingSuffix}" - ) - - openshift.apply(docBackupCronjob).label( - [ - 'app':"gwells-${stagingSuffix}", - 'app-name':"${appName}", - 'env-name':"${stagingSuffix}" - ], - "--overwrite" - ) - - // automated database backup - def dbNFSBackup = openshift.process("-f", - "${templateDir}/jobs/postgres-backup-nfs/postgres-backup.cj.yaml", - "NAMESPACE=${stagingProject}", - "TAG_NAME=v12.0.0", - "TARGET=gwells-pg12-staging", - "PVC_NAME=gwells-backups", - "SCHEDULE=30 2 * * *", - "JOB_NAME=postgres-nfs-backup", - "DAILY_BACKUPS=2", - "WEEKLY_BACKUPS=1", - "MONTHLY_BACKUPS=1" - ) - openshift.apply(dbNFSBackup).label( - [ - 'app':"gwells-${stagingSuffix}", - 'app-name':"${appName}", - 'env-name':"${stagingSuffix}" - ], - "--overwrite" - ) - - // monitor the deployment status and wait until deployment is successful - echo "Waiting for deployment to STAGING..." - def newVersion = openshift.selector("dc", "gwells-${stagingSuffix}").object().status.latestVersion - def pods = openshift.selector('pod', [deployment: "gwells-${stagingSuffix}-${newVersion}"]) - - // wait until at least one pod reports as ready - timeout(15) { - pods.untilEach(2) { - return it.object().status.containerStatuses.every { - it.ready - } - } - } - - def pgtileservVersion = openshift.selector("dc", "pgtileserv-${stagingSuffix}").object().status.latestVersion - def pgtileservPods = openshift.selector('pod', [deployment: "pgtileserv-${stagingSuffix}-${newVersion}"]) - - // wait until each container in this deployment's pod reports as ready - timeout(15) { - pods.untilEach(2) { - return it.object().status.containerStatuses.every { - it.ready - } - } - } - - createDeploymentStatus(stagingSuffix, 'SUCCESS', stagingHost) - } - } - } - } - - - // the Django Unit Tests stage runs backend unit tests using a test DB that is - // created and destroyed afterwards. - stage('Staging - Django Unit Tests') { - agent { label 'test' } - when { - beforeAgent true - expression { env.CHANGE_TARGET == 'master' } - } - steps { - script { - def result = unitTestDjango (env.STAGE_NAME, stagingProject, stagingSuffix) - } - } - } - - - stage('STAGING - API Tests') { - agent { label 'test' } - when { - beforeAgent true - expression { env.CHANGE_TARGET == 'master' } - } - steps { - script { - def result = apiTest ('STAGING - API Tests', stagingHost, stagingSuffix) - } - } - } - - stage('PROD - Backup') { - agent { label 'deploy' } - when { - beforeAgent true - expression { env.CHANGE_TARGET == 'master' } - } - steps { - script { - dbBackup (prodProject, prodSuffix) - } - } - } - - stage('PROD - Approval') { - agent none - when { - beforeInput true - expression { - def buildCause = currentBuild.rawBuild.getCauses().collect { - it.getClass().getName().tokenize('.').last() - } - echo "Build Cause: ${buildCause}" - env.CHANGE_TARGET == 'master' && buildCause.toString() != '[TimerTrigger$TimerTriggerCause]' - } - } - input { - message "Deploy to production?" - ok "Yes, deploy to production." - submitter "authenticated" - } - steps { - echo "Approved!" - } - } - - stage('PROD - Deploy') { - agent { label 'deploy' } - when { - beforeAgent true - expression { - def buildCause = currentBuild.rawBuild.getCauses().collect { - it.getClass().getName().tokenize('.').last() - } - echo "Build Cause: ${buildCause}" - env.CHANGE_TARGET == 'master' && buildCause.toString() != '[TimerTrigger$TimerTriggerCause]' - } - } - steps { - script { - echo "Updating production deployment..." - - _openshift(env.STAGE_NAME, prodProject) { - - // Pre-deployment database backup - def dbBackupResult = dbBackup (prodProject, prodSuffix) - - def deployDBTemplate = openshift.process("-f", - "${templateDir}/postgresql.dc.yml", - "NAME_SUFFIX=-${prodSuffix}", - "DATABASE_SERVICE_NAME=gwells-pg12-${prodSuffix}", - "IMAGE_STREAM_NAMESPACE=${prodProject}", - "IMAGE_STREAM_NAME=crunchy-postgres-gis", - "POSTGRESQL_DATABASE=gwells", - "STORAGE_CLASS=netapp-file-standard", - "VOLUME_CAPACITY=30Gi", - "REQUEST_CPU=800m", - "REQUEST_MEMORY=4Gi", - "LIMIT_CPU=2", - "LIMIT_MEMORY=4Gi" - ) - - def deployTemplate = openshift.process("-f", - "${templateDir}/backend.dc.json", - "NAME_SUFFIX=-${prodSuffix}", - "ENV_NAME=${prodSuffix}", - "HOST=${prodHost}", - "CPU_REQUEST=1", - "CPU_LIMIT=2", - "MEMORY_REQUEST=1Gi", - "MEMORY_LIMIT=2Gi" - ) - - echo "Processing deployment config for tile server" - def pgtileservTemplate = openshift.process("-f", - "${templateDir}/pg_tileserv/pg_tileserv.dc.yaml", - "NAME_SUFFIX=-${prodSuffix}", - "DATABASE_SERVICE_NAME=gwells-pg12-${prodSuffix}", - "IMAGE_TAG=20201112", - "HOST=${prodHost}", - ) - - echo "Processing Minio deployment config" - def minioTemplate = openshift.process("-f", - "${templateDir}/minio/minio.dc.yaml", - "NAME_SUFFIX=-${prodSuffix}", - "DEST_PVC_SIZE=20Gi", - "HOSTNAME=gwells-docs-${prodSuffix}.apps.silver.devops.gov.bc.ca" - ) - - echo "Processing backup volume config" - def backupVolConfig = openshift.process("-f", - "${templateDir}/backup.pvc.yaml", - "VOLUME_CAPACITY=30Gi", - "STORAGE_CLASS=netapp-file-backup" - ) - // some objects need to be copied from a base secret or configmap - // these objects have an annotation "as-copy-of" in their object spec (e.g. an object in backend.dc.json) - echo "Creating configmaps and secrets objects" - List newObjectCopies = [] - - for (o in (deployTemplate + deployDBTemplate)) { - - // only perform this operation on objects with 'as-copy-of' - def sourceName = (o.metadata && o.metadata.annotations && o.metadata.annotations['as-copy-of']) ? o.metadata.annotations['as-copy-of'] : false - if (sourceName && sourceName.length() > 0) { - def selector = openshift.selector("${o.kind}/${sourceName}") - if (selector.count() == 1) { - - // create a copy of the object and add it to the new list of objects to be applied - Map copiedModel = selector.object() - copiedModel.metadata.name = o.metadata.name - copiedModel.metadata.remove('annotations') - copiedModel.metadata.remove('creationTimestamp') - copiedModel.metadata.remove('resourceVersion') - copiedModel.metadata.remove('selfLink') - copiedModel.metadata.remove('uid') - echo "Copying ${o.kind} ${o.metadata.name}" - newObjectCopies.add(copiedModel) - } - } - } - - // apply the templates, which will create new objects or modify existing ones as necessary. - // the copies of base objects (secrets, configmaps) are also applied. - echo "Applying deployment config for pull request ${prNumber} on ${prodProject}" - - openshift.apply(deployDBTemplate).label( - [ - 'app':"gwells-${prodSuffix}", - 'app-name':"${appName}", - 'env-name':"${prodSuffix}" - ], - "--overwrite" - ) - - openshift.apply(pgtileservTemplate).label( - [ - 'app':"gwells-${prodSuffix}", - 'app-name':"${appName}", - 'env-name':"${prodSuffix}" - ], - "--overwrite" - ) - - openshift.apply(minioTemplate).label( - ['app':"gwells-${prodSuffix}", 'app-name':"${appName}", 'env-name':"${prodSuffix}"], - "--overwrite" - ) - - openshift.apply(deployTemplate).label( - [ - 'app':"gwells-${prodSuffix}", - 'app-name':"${appName}", - 'env-name':"${prodSuffix}" - ], - "--overwrite" - ) - - openshift.apply(newObjectCopies).label( - [ - 'app':"gwells-${prodSuffix}", - 'app-name':"${appName}", - 'env-name':"${prodSuffix}" - ], - "--overwrite" - ) - - - openshift.apply(backupVolConfig).label( - [ - 'app':"gwells-${prodSuffix}", - 'app-name':"${appName}", - 'env-name':"${prodSuffix}" - ], - "--overwrite" - ) - - echo "Successfully applied production deployment config" - // promote the newly built image to DEV - echo "Tagging new image to production imagestream." - // Application/database images are tagged in the tools imagestream as the new prod image - openshift.tag( - "${toolsProject}/gwells-application:${prNumber}", - "${toolsProject}/gwells-application:${prodSuffix}" - ) - - // Images are then tagged into the target environment namespace (prod) - openshift.tag( - "${toolsProject}/gwells-application:${prodSuffix}", - "${prodProject}/gwells-${prodSuffix}:${prodSuffix}" - ) - - createDeploymentStatus(prodSuffix, 'PENDING', prodHost) - - // Create cronjob for well export - def exportWellCronTemplate = openshift.process("-f", - "${templateDir}/jobs/export-databc/export.cj.json", - "ENV_NAME=${prodSuffix}", - "PROJECT=${prodProject}", - "TAG=${prodSuffix}", - "NAME=export", - "COMMAND=export", - "SCHEDULE=30 3 * * *" - ) - openshift.apply(exportWellCronTemplate).label( - [ - 'app':"gwells-${prodSuffix}", - 'app-name':"${appName}", - 'env-name':"${prodSuffix}" - ], - "--overwrite" - ) - - // Create cronjob for databc export - def exportDataBCCronTemplate = openshift.process("-f", - "${templateDir}/jobs/export-databc/export.cj.json", - "ENV_NAME=${prodSuffix}", - "PROJECT=${prodProject}", - "TAG=${prodSuffix}", - "NAME=export-databc", - "COMMAND=export_databc", - "SCHEDULE=0 5 * * *" - ) - openshift.apply(exportDataBCCronTemplate).label( - [ - 'app':"gwells-${prodSuffix}", - 'app-name':"${appName}", - 'env-name':"${prodSuffix}" - ], - "--overwrite" - ) - - def docBackupCronJob = openshift.process("-f", - "${templateDir}/jobs/minio-backup/minio-backup.cj.yaml", - "NAME_SUFFIX=${prodSuffix}", - "NAMESPACE=${prodProject}", - "VERSION=v1.0.0", - "SCHEDULE=15 4 * * *", - "DEST_PVC=${nfsProdBackupPVC}", - "SOURCE_PVC=gwells-minio-${prodSuffix}", - "PVC_SIZE=20Gi" - ) - - openshift.apply(docBackupCronJob).label( - [ - 'app':"gwells-${prodSuffix}", - 'app-name':"${appName}", - 'env-name':"${prodSuffix}" - ], - "--overwrite" - ) - - def dbNFSBackup = openshift.process("-f", - "${templateDir}/jobs/postgres-backup-nfs/postgres-backup.cj.yaml", - "NAMESPACE=${prodProject}", - "TAG_NAME=v12.0.0", - "TARGET=gwells-pg12-production", - "PVC_NAME=gwells-backups", - "MONTHLY_BACKUPS=12", - "SCHEDULE=30 1 * * *", - "JOB_NAME=postgres-nfs-backup" - ) - openshift.apply(dbNFSBackup).label( - [ - 'app':"gwells-${prodSuffix}", - 'app-name':"${appName}", - 'env-name':"${prodSuffix}" - ], - "--overwrite" - ) - - // Create cronjob for licence import - def importLicencesCronjob = openshift.process("-f", - "${templateDir}/jobs/import-licences/import-licences.cj.json", - "ENV_NAME=${prodSuffix}", - "PROJECT=${prodProject}", - "TAG=${prodSuffix}", - "NAME=licences", - "COMMAND=import_licences", - "SCHEDULE=45 11 * * *" - ) - openshift.apply(importLicencesCronjob).label( - [ - 'app':"gwells-${prodSuffix}", - 'app-name':"${appName}", - 'env-name':"${prodSuffix}" - ], - "--overwrite" - ) - - // monitor the deployment status and wait until deployment is successful - echo "Waiting for deployment to production..." - def newVersion = openshift.selector("dc", "gwells-${prodSuffix}").object().status.latestVersion - def pods = openshift.selector('pod', [deployment: "gwells-${prodSuffix}-${newVersion}"]) - - // wait until pods reports as ready - timeout(15) { - pods.untilEach(2) { - return it.object().status.containerStatuses.every { - it.ready - } - } - } - - def pgtileservVersion = openshift.selector("dc", "pgtileserv-${prodSuffix}").object().status.latestVersion - def pgtileservPods = openshift.selector('pod', [deployment: "pgtileserv-${prodSuffix}-${newVersion}"]) - - // wait until each container in this deployment's pod reports as ready - timeout(15) { - pods.untilEach(2) { - return it.object().status.containerStatuses.every { - it.ready - } - } - } - - createDeploymentStatus(prodSuffix, 'SUCCESS', prodHost) - } - } - } - } - } - post { - always { - node('build') { - step([$class: 'Mailer', - notifyEveryUnstableBuild: true, - recipients: "Sustainment.Team@gov.bc.ca", - sendToIndividuals: true]) - } - } - } -} diff --git a/gwells/Makefile b/gwells/Makefile deleted file mode 100644 index cdc580829..000000000 --- a/gwells/Makefile +++ /dev/null @@ -1,68 +0,0 @@ -.DELETE_ON_ERROR: - - -################ -# General Jobs # -################ - -default: vue down - -test: test-node test-django - - -################### -# Individual jobs # -################### - -prep: - docker-compose pull - docker-compose build - -docker: - docker-compose up -d - -docker-staging: - docker-compose --env-file ./.env.test up - -down: - docker-compose down --volumes - -vue: prep - docker-compose up - -test-vue: - docker exec -ti gwells-frontend-1 /bin/bash -c "cd /app/frontend/; npm run test:unit -- --runInBand" - -test-vue-update: - docker exec -ti gwells-frontend-1 /bin/bash -c "cd /app/frontend/; npm run test:unit:update" - -vue-coverage: - docker exec -ti gwells-frontend-1 /bin/bash -c "cd /app/frontend/; npm run coverage:test" - -TEST_PATH ?= "." - -test-django: - docker exec -ti gwells-backend-1 /bin/bash -c "cd /app/backend/; python -m coverage run manage.py test ${TEST_PATH} --noinput" - -django-coverage: - docker exec -ti gwells-backend-1 /bin/bash -c "cd /app/backend/; coverage report" - -django-coverage-html: - docker exec -ti gwells-backend-1 /bin/bash -c "cd /app/backend/; coverage html" - -admin-django: - docker exec -ti gwells-backend-1 /bin/bash -c "cd /app/backend; python manage.py createsuperuser" - -backend: - docker-compose pull backend - docker-compose build backend - docker-compose up backend - -psql: - docker-compose exec db /bin/bash -c "psql -U gwells -d gwells" - -DEFAULT_API_TEST := 'local_run_all.sh' -TEST_FILE?="$(DEFAULT_API_TEST)" - -api-tests-local: - cd tests/api-tests && "./$(TEST_FILE)" diff --git a/gwells/app/.s2i/bin/README.md b/gwells/app/.s2i/bin/README.md deleted file mode 100644 index 64bf234f1..000000000 --- a/gwells/app/.s2i/bin/README.md +++ /dev/null @@ -1,3 +0,0 @@ -### Adding the NPM Build for our VUE components - -Insert the build actions just before `python manage.py collectstatic` so that the solution has the opportunity to download the needed node modules, build and integrate itself into the solution. diff --git a/gwells/app/.s2i/bin/assemble b/gwells/app/.s2i/bin/assemble deleted file mode 100755 index d3ce82f07..000000000 --- a/gwells/app/.s2i/bin/assemble +++ /dev/null @@ -1,99 +0,0 @@ -#!/bin/bash - -function is_django_installed() { - python -c "import django" &>/dev/null -} - -function should_collectstatic() { - is_django_installed && [[ -z "$DISABLE_COLLECTSTATIC" ]] -} - -# Install pipenv to the separate virtualenv to isolate it -# from system Python packages and packages in the main -# virtualenv. Executable is simlinked into ~/.local/bin -# to be accessible. This approach is inspired by pipsi -# (pip script installer). -function install_pipenv() { - echo "---> Installing pipenv packaging tool ..." - VENV_DIR=$HOME/.local/venvs/pipenv - virtualenv $VENV_DIR - $VENV_DIR/bin/pip --isolated install -U pipenv - mkdir -p $HOME/.local/bin - ln -s $VENV_DIR/bin/pipenv $HOME/.local/bin/pipenv -} - -set -e - -shopt -s dotglob -echo "---> Installing application source ..." -mv /tmp/src/* ./ - -if [[ ! -z "$UPGRADE_PIP_TO_LATEST" || ! -z "$ENABLE_PIPENV" ]]; then - echo "---> Upgrading pip to latest version ..." - curl -s -o get-pip.py https://bootstrap.pypa.io/pip/3.6/get-pip.py && python3 get-pip.py - python3 -m pip install -U setuptools==57.4.0 wheel - rm get-pip.py -fi - -cd backend - -if [[ ! -z "$ENABLE_PIPENV" ]]; then - install_pipenv - echo "---> Installing dependencies via pipenv ..." - if [[ -f Pipfile ]]; then - pipenv install --deploy - elif [[ -f requirements.txt ]]; then - pipenv install -r requirements.txt - fi - pipenv check -elif [[ -f requirements.txt ]]; then - echo "---> Installing dependencies ..." - pip install -r requirements.txt -elif [[ -f setup.py ]]; then - echo "---> Installing application ..." - python setup.py develop -fi - -cd .. -cd frontend -npm install -# Run unit tests in build stage with the code below -echo "--> testing and building frontend files" -npm run test:unit -- --runInBand -npm run build -# Copy resultant file to the backend. -mkdir -p ../backend/gwells/static/ -cp -R dist/* ../backend/gwells/static/ -cp dist/index.html ../backend/gwells/templates/ -cd .. - -if should_collectstatic; then - ( - echo "---> Collecting Django static files ..." - cd backend - - APP_HOME=${APP_HOME:-.} - # Look for 'manage.py' in the directory specified by APP_HOME, or the current directory - manage_file=$APP_HOME/manage.py - - if [[ ! -f "$manage_file" ]]; then - echo "WARNING: seems that you're using Django, but we could not find a 'manage.py' file." - echo "'manage.py collectstatic' ignored." - exit - fi - - if ! python $manage_file collectstatic --dry-run --noinput &> /dev/null; then - echo "WARNING: could not run 'manage.py collectstatic'. To debug, run:" - echo " $ python $manage_file collectstatic --noinput" - echo "Ignore this warning if you're not serving static files with Django." - exit - fi - - python $manage_file collectstatic --noinput - - cd .. - ) -fi - -# set permissions for any installed artifacts -fix-permissions /opt/app-root diff --git a/gwells/app/.s2i/environment b/gwells/app/.s2i/environment deleted file mode 100644 index 18f3b0467..000000000 --- a/gwells/app/.s2i/environment +++ /dev/null @@ -1 +0,0 @@ -DISABLE_MIGRATE=1 diff --git a/gwells/openshift/README.md b/gwells/openshift/README.md deleted file mode 100644 index 9f085842b..000000000 --- a/gwells/openshift/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Openshift Configuration and Notes - -GWELLS runs on the BC Gov Pathfinder OpenShift cluster with deployments and resources spanning 4 projects. - -## Projects/Namespaces - -`moe-gwells-tools` houses the Jenkins-based CI/CD system, which runs pipelines using the Jenkinsfile in the root of this repository. Builds and images are also located in this project (when an image is ready to be deployed to an environment, it is tagged across to that environment's project). - -`moe-gwells-dev` is where developer environments live. There is generally one active environment/deployment in this project per pull request. Active environments are deployed at a URL in the form of https://gwells-dev-pr-999.pathfinder.gov.bc.ca/gwells/. - -* note: resources for each pull request environment are grouped by the "app" label. For example, you can select all resources for PR 999 using the label selector `-l app=gwells-dev-pr-999`. These resources are cleaned up by Jenkins when the pull request is merged or closed. - -`moe-gwells-test` is where **test** and **demo** environments are located. Staging is automatically deployed when a pull request is made against the `master` branch (and the version deployed at staging can optionally be deployed to production). To deploy to the *demo* environment, make a pull request against the `demo` branch. - -`moe-gwells-prod` is where the production environment is located. - -## CI/CD Pipeline - -The Jenkins CI/CD pipeline is designed to roll out a new version of the GWELLS application from dev to staging to production automatically. It uses the Jenkinsfile in the repo root and OpenShift tools to build the app from source and deploy the image. - -The CI/CD pipeline functions by having Jenkins monitor GitHub pull requests. - -### Deploy to a dev environment - -When a pull request is made against the `release` branch, a dev environment will be created for the pull request, which should mirror the production environment as closely as is practical. Closing the pull request cleans up the dev environment. - -### Deploy to staging - -When a pull request is merged into release, the release pipeline will re-build the `release` branch into a new application image and deploy it to staging. This process relies on having a `release` -> `master` pull request open. - -### Deploy to production - -Use the Jenkins pipeline to approve the deployment of an image in staging to production. The staging image will be tagged as the latest production image and production will be redeployed. - - -## Prerequisites to deploying to staging/production - -The application will roll itself out to staging and production automatically using the Jenkinsfile. However, if starting from a brand new, empty environment, some resources and backing services need to be deployed first: - -#### Prerequisite images - -The database image comes from bcgov/postgresql-oracle_fdw. Ensure this image is present and can be pulled into the tools project. - -#### ConfigMaps and Secrets - -ConfigMaps and Secrets must be created for each environment. The Jenkinsfile is set up to make a copy of the following "base" objects for each environment (see the file in parentheses for keys/values that are required): - -* ConfigMaps: - * `gwells-global-config` (backend.dc.json) - -* Secrets: - * `gwells-minio-secrets` (backend.dc.json) - * `gwells-django-secrets` (backend.dc.json) - * `gwells-database-secrets` (postgresql.dc.yml) - -#### Minio object storage - -Private object storage is provided by a minio service, but minio is not currently deployed as part of the pipeline. Please see `openshift/minio.dc.json` for a deployment config template. - -#### Minio backup - -A backup cronjob is deployed by the pipeline, but the BuildConfig needs to be created once before running the pipeline. Please see `openshift/jobs/minio-backup`. Apply the BuildConfig template and then `oc tag` the resulting image to the project where backups will run. This only needs to be done once. - - -## Data migration - -Data will have to be migrated to the database running on OCP4. - -The following was tested for staging: -* ensure `ftw_reader` user is in place (for tile server). Must have connect privileges. -* `pg_dump -d gwells -Fp -c -C -f /tmp/backup/staging-20201208.sql --exclude-table=spatial_ref_sys` -* rsync to OCP4 (todo: automate this step) -* `psql -x -v ON_ERROR_STOP=1 2>&1 < staging-20201208.sql` diff --git a/gwells/openshift/backup-pvc.bc.yaml b/gwells/openshift/backup-pvc.bc.yaml deleted file mode 100644 index 72df94c7b..000000000 --- a/gwells/openshift/backup-pvc.bc.yaml +++ /dev/null @@ -1,52 +0,0 @@ ---- -kind: Template -apiVersion: v1 -metadata: - name: gwells-backend-bc - creationTimestamp: -parameters: -- name: NAME - displayName: Name Suffix - description: A suffix appended to all objects - required: true - value: backup-pvc -- name: SRC_REPO_URL - required: true - value: https://github.com/bcgov/gwells -- name: SRC_REPO_BRANCH - required: true - value: master -objects: -- kind: ImageStream - apiVersion: v1 - metadata: - name: ${NAME} -- kind: BuildConfig - apiVersion: v1 - metadata: - name: ${NAME} - creationTimestamp: - labels: - appver: ${NAME} - annotations: - description: Backup one PVC to another with rsync - spec: - successfulBuildsHistoryLimit: 3 - failedBuildsHistoryLimit: 3 - triggers: - - type: ImageChange - - type: ConfigChange - runPolicy: SerialLatestOnly - source: - type: Git - git: - uri: "${SRC_REPO_URL}" - ref: "${SRC_REPO_BRANCH}" - contextDir: openshift/docker/backup-pvc - strategy: - type: Docker - output: - to: - kind: ImageStreamTag - name: ${NAME}:latest - \ No newline at end of file diff --git a/gwells/openshift/backup-pvc.cj.yaml b/gwells/openshift/backup-pvc.cj.yaml deleted file mode 100644 index 73367b0d9..000000000 --- a/gwells/openshift/backup-pvc.cj.yaml +++ /dev/null @@ -1,143 +0,0 @@ ---- -apiVersion: v1 -kind: Template -metadata: - annotations: - description: "Scheduled Task to perform a folder backup" - tags: "cronjob,backup" - labels: - app: ${SOURCE_PVC}-backup-folder - cronjob: ${SOURCE_PVC}-backup-folder - template: "${JOB_NAME}-backup-folder-template" - name: "${JOB_NAME}-backup-folder-template" -parameters: - - name: "SOURCE_PVC" - displayName: "PVC to backup" - description: "The name of the PVC to be backed up." - required: true - - name: "JOB_NAME" - displayName: "Job Name" - description: "Name of the Scheduled Job to Create." - value: "pvc-backup" - required: true - - name: "SCHEDULE" - displayName: "Cron Schedule" - description: "Cron Schedule to Execute the Job (in UTC)" - # 12:00 UTC = 4:00 AM PDT - value: "0 12 * * *" - required: true - - name: "DEST_PVC_SIZE" - displayName: "PVC size" - description: "The size of the PVC to create." - required: false - value: "10Gi" - - name: "DEST_PVC_CLASS" - displayName: "PVC class" - description: "The class of PVC to create." - required: false - value: "gluster-file" - - name: "DEST_PVC_ACCESS" - displayName: "PVC access mode" - description: "The access mode of the PVC to create." - required: false - value: "ReadWriteOnce" - - name: "SOURCE_IMAGE_NAME" - displayName: "Source image Name" - description: "The name of the image to use for this resource." - required: true - value: "backup-pvc" - - name: "SOURCE_IMAGE_TAG" - displayName: "Source image tag" - description: "The name of the image tag to use for this resource." - required: true - value: "latest" - - name: "IMAGE_NAMESPACE" - displayName: "Image Namespace" - description: "The namespace of the OpenShift project containing the imagestream for the application." - required: true - value: "moe-gwells-tools" - - name: "SUCCESS_JOBS_HISTORY_LIMIT" - displayName: "Successful Job History Limit" - description: "The number of successful jobs that will be retained." - value: "5" - required: true - - name: "FAILED_JOBS_HISTORY_LIMIT" - displayName: "Failed Job History Limit" - description: "The number of failed jobs that will be retained." - value: "2" - required: true - - name: "JOB_BACKOFF_LIMIT" - displayName: "Job Backoff Limit" - description: "The number of attempts to try for a successful job outcome." - value: "0" - required: true - - name: "JOB_DEADLINE_SECONDS" - displayName: "Job deadline (seconds)" - description: "The maximum amount of time to let this job run." - value: "600" - required: true -objects: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - finalizers: - - kubernetes.io/pvc-protection - labels: - app: ${SOURCE_PVC}-backup - cronjob: ${SOURCE_PVC}-backup - template: "${JOB_NAME}-config-template" - name: ${SOURCE_PVC}-backup - spec: - accessModes: - - ${DEST_PVC_ACCESS} - resources: - requests: - storage: ${DEST_PVC_SIZE} - storageClassName: ${DEST_PVC_CLASS} - - apiVersion: batch/v1beta1 - kind: CronJob - metadata: - name: ${SOURCE_PVC}-backup - spec: - concurrencyPolicy: Forbid - failedJobsHistoryLimit: ${{FAILED_JOBS_HISTORY_LIMIT}} - jobTemplate: - metadata: - labels: - app: ${SOURCE_PVC}-backup - cronjob: ${SOURCE_PVC}-backup - template: "${JOB_NAME}-config-template" - spec: - backoffLimit: ${{JOB_BACKOFF_LIMIT}} - template: - spec: - activeDeadlineSeconds: ${{JOB_DEADLINE_SECONDS}} - containers: - - command: - - /bin/sh - - -c - - /entrypoint.sh - image: docker-registry.default.svc:5000/${IMAGE_NAMESPACE}/${SOURCE_IMAGE_NAME}:${SOURCE_IMAGE_TAG} - imagePullPolicy: Always - name: backup-cronjob - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /mnt/dest/ - name: ${JOB_NAME} - - mountPath: /mnt/src/ - name: ${SOURCE_PVC} - readOnly: true - dnsPolicy: ClusterFirst - restartPolicy: Never - schedulerName: default-scheduler - terminationGracePeriodSeconds: 30 - volumes: - - name: ${JOB_NAME} - persistentVolumeClaim: - claimName: ${SOURCE_PVC}-backup - - name: ${SOURCE_PVC} - persistentVolumeClaim: - claimName: ${SOURCE_PVC} - schedule: ${SCHEDULE} - successfulJobsHistoryLimit: ${{SUCCESS_JOBS_HISTORY_LIMIT}} diff --git a/gwells/openshift/backup.bc.yaml b/gwells/openshift/backup.bc.yaml deleted file mode 100644 index 615d1c084..000000000 --- a/gwells/openshift/backup.bc.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -kind: Template -apiVersion: v1 -metadata: - name: "${NAME}-build-template" - creationTimestamp: -parameters: -- name: NAME - displayName: Name - description: The name assigned to all of the resources defined in this template. - required: true - value: backup -- name: GIT_REPO_URL - displayName: Git Repo URL - description: The URL to your GIT repo. - required: true - value: https://github.com/BCDevOps/backup-container.git -- name: GIT_REF - displayName: Git Reference - description: The git reference or branch. - required: true - value: master -- name: SOURCE_CONTEXT_DIR - displayName: Source Context Directory - description: The source context directory. - required: false - value: "/docker" -- name: SOURCE_IMAGE_KIND - displayName: Source Image Kind - description: The 'kind' (type) of the source image; typically ImageStreamTag, or - DockerImage. - required: true - value: DockerImage -- name: SOURCE_IMAGE_NAME - displayName: Source Image Name - description: The name of the source image. - required: true - value: docker-registry.default.svc:5000/moe-gwells-tools/postgresql-oracle-fdw -- name: SOURCE_IMAGE_TAG - displayName: Source Image Tag - description: The tag of the source image. - required: true - value: 9.6-1 -- name: DOCKER_FILE_PATH - displayName: Docker File Path - description: The path to the docker file defining the build. - required: false - value: Dockerfile -- name: OUTPUT_IMAGE_TAG - displayName: Output Image Tag - description: The tag given to the built image. - required: true - value: latest -objects: -- kind: ImageStream - apiVersion: v1 - metadata: - name: "${NAME}" -- kind: BuildConfig - apiVersion: v1 - metadata: - name: "${NAME}" - labels: - app: "${NAME}" - spec: - triggers: - - type: ImageChange - - type: ConfigChange - runPolicy: Serial - source: - type: Git - git: - uri: "${GIT_REPO_URL}" - ref: "${GIT_REF}" - contextDir: "${SOURCE_CONTEXT_DIR}" - strategy: - type: Docker - dockerStrategy: - from: - kind: "${SOURCE_IMAGE_KIND}" - name: "${SOURCE_IMAGE_NAME}:${SOURCE_IMAGE_TAG}" - dockerfilePath: "${DOCKER_FILE_PATH}" - output: - to: - kind: ImageStreamTag - name: "${NAME}:${OUTPUT_IMAGE_TAG}" \ No newline at end of file diff --git a/gwells/openshift/backup.cj.yaml b/gwells/openshift/backup.cj.yaml deleted file mode 100644 index 3d05042a9..000000000 --- a/gwells/openshift/backup.cj.yaml +++ /dev/null @@ -1,260 +0,0 @@ ---- -apiVersion: v1 -kind: Template -metadata: - annotations: - description: "Scheduled Task to perform a Database Backup" - tags: "cronjob,backup" - labels: - app: ${TARGET}-backup - cronjob: ${TARGET}-backup - template: "${JOB_NAME}-config-template" - name: "${JOB_NAME}-cronjob-template" -parameters: - - name: "TARGET" - displayName: "Database name (deployment config, not pod name)" - description: "The name of the database, by deployment config, to be backed up." - required: true - - name: "JOB_NAME" - displayName: "Job Name" - description: "Name of the Scheduled Job to Create." - value: "backup" - required: true - - name: "SCHEDULE" - displayName: "Cron Schedule" - description: "Cron Schedule to Execute the Job (in UTC)" - # 11:00 UTC = 3:00 AM PDT - value: "0 11 * * *" - required: true - - name: "PVC_SIZE" - displayName: "PVC size" - description: "The size of the PVC to create." - required: false - value: "5Gi" - - name: "PVC_CLASS" - displayName: "PVC class" - description: "The class of PVC to create." - required: false - value: "gluster-file" - - name: "PVC_ACCESS" - displayName: "PVC access mode" - description: "The access mode of the PVC to create." - required: false - value: "ReadWriteOnce" - - name: "SOURCE_IMAGE_NAME" - displayName: "Source Image Name" - description: "The name of the image to use for this resource." - required: true - value: "backup" - - name: "IMAGE_NAMESPACE" - displayName: "Image Namespace" - description: "The namespace of the OpenShift project containing the imagestream for the application." - required: true - value: "moe-gwells-tools" - - name: "TAG_NAME" - displayName: "Environment TAG name" - description: "The TAG name for this environment, e.g., dev, test, prod" - required: true - value: "latest" - - name: "DEFAULT_PORT" - displayName: "Database Service Port" - description: "The configured port for the database service" - required: true - value: "5432" - - name: "DATABASE_NAME" - displayName: "Database Name" - description: "The name of the database." - required: true - value: "gwells" - - name: "BACKUP_STRATEGY" - displayName: "Backup Strategy" - description: "The strategy to use for backups; for example daily, or rolling." - required: true - value: "rolling" - - name: "BACKUP_DIR" - displayName: "The root backup directory" - description: "The name of the root backup directory" - required: true - value: "/backups/" - - name: "NUM_BACKUPS" - displayName: "The number of backup files to be retained" - description: "The number of backup files to be retained. Used for the `daily` backup strategy. Ignored when using the `rolling` backup strategy." - required: false - value: "5" - - name: "DAILY_BACKUPS" - displayName: "Number of Daily Backups to Retain" - description: "The number of daily backup files to be retained. Used for the `rolling` backup strategy." - required: false - value: "7" - - name: "WEEKLY_BACKUPS" - displayName: "Number of Weekly Backups to Retain" - description: "The number of weekly backup files to be retained. Used for the `rolling` backup strategy." - required: false - value: "4" - - name: "MONTHLY_BACKUPS" - displayName: "Number of Monthly Backups to Retain" - description: "The number of monthly backup files to be retained. Used for the `rolling` backup strategy." - required: false - value: "1" - - name: "SUCCESS_JOBS_HISTORY_LIMIT" - displayName: "Successful Job History Limit" - description: "The number of successful jobs that will be retained." - value: "5" - required: true - - name: "FAILED_JOBS_HISTORY_LIMIT" - displayName: "Failed Job History Limit" - description: "The number of failed jobs that will be retained." - value: "2" - required: true - - name: "JOB_BACKOFF_LIMIT" - displayName: "Job Backoff Limit" - description: "The number of attempts to try for a successful job outcome." - value: "0" - required: false - - name: "JOB_DEADLINE_SECONDS" - displayName: "Job deadline (seconds)" - description: "The maximum amount of time to let this job run." - value: "600" - required: false -objects: - - apiVersion: v1 - data: - BACKUP_STRATEGY: ${BACKUP_STRATEGY} - DAILY_BACKUPS: ${DAILY_BACKUPS} - DATABASE_SERVICE_NAME: ${TARGET} - DEFAULT_PORT: ${DEFAULT_PORT} - MONTHLY_BACKUPS: ${MONTHLY_BACKUPS} - NUM_BACKUPS: ${NUM_BACKUPS} - POSTGRESQL_DATABASE: ${DATABASE_NAME} - WEEKLY_BACKUPS: ${WEEKLY_BACKUPS} - kind: ConfigMap - metadata: - labels: - app: ${TARGET}-backup - cronjob: ${TARGET}-backup - template: "${JOB_NAME}-config-template" - name: ${TARGET}-backup - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - finalizers: - - kubernetes.io/pvc-protection - labels: - app: ${TARGET}-backup - cronjob: ${TARGET}-backup - template: "${JOB_NAME}-config-template" - name: ${TARGET}-backup - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: ${PVC_SIZE} - storageClassName: ${PVC_CLASS} - - apiVersion: batch/v1beta1 - kind: CronJob - metadata: - name: ${TARGET}-backup - spec: - concurrencyPolicy: Forbid - failedJobsHistoryLimit: ${{FAILED_JOBS_HISTORY_LIMIT}} - jobTemplate: - metadata: - creationTimestamp: null - labels: - app: ${TARGET}-backup - cronjob: ${TARGET}-backup - template: "${JOB_NAME}-config-template" - spec: - backoffLimit: ${{JOB_BACKOFF_LIMIT}} - template: - metadata: - creationTimestamp: null - spec: - activeDeadlineSeconds: ${{JOB_DEADLINE_SECONDS}} - containers: - - command: - - /bin/bash - - -c - - /backup.sh -1 - env: - - name: BACKUP_DIR - value: /backups/ - - name: BACKUP_STRATEGY - valueFrom: - configMapKeyRef: - key: BACKUP_STRATEGY - name: ${TARGET}-backup - - name: NUM_BACKUPS - valueFrom: - configMapKeyRef: - key: NUM_BACKUPS - name: ${TARGET}-backup - optional: true - - name: DAILY_BACKUPS - valueFrom: - configMapKeyRef: - key: DAILY_BACKUPS - name: ${TARGET}-backup - optional: true - - name: WEEKLY_BACKUPS - valueFrom: - configMapKeyRef: - key: WEEKLY_BACKUPS - name: ${TARGET}-backup - optional: true - - name: MONTHLY_BACKUPS - valueFrom: - configMapKeyRef: - key: MONTHLY_BACKUPS - name: ${TARGET}-backup - optional: true - - name: DATABASE_SERVICE_NAME - valueFrom: - configMapKeyRef: - key: DATABASE_SERVICE_NAME - name: ${TARGET}-backup - - name: DEFAULT_PORT - valueFrom: - configMapKeyRef: - key: DEFAULT_PORT - name: ${TARGET}-backup - optional: true - - name: POSTGRESQL_DATABASE - valueFrom: - configMapKeyRef: - key: POSTGRESQL_DATABASE - name: ${TARGET}-backup - - name: POSTGRESQL_USER - valueFrom: - secretKeyRef: - key: database-user - name: ${TARGET} - - name: POSTGRESQL_PASSWORD - valueFrom: - secretKeyRef: - key: database-password - name: ${TARGET} - image: docker-registry.default.svc:5000/${IMAGE_NAMESPACE}/${SOURCE_IMAGE_NAME}:${TAG_NAME} - imagePullPolicy: Always - name: backup-cronjob - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /backups/ - name: backup - dnsPolicy: ClusterFirst - restartPolicy: Never - schedulerName: default-scheduler - securityContext: {} - serviceAccount: default - serviceAccountName: default - terminationGracePeriodSeconds: 30 - volumes: - - name: ${JOB_NAME} - persistentVolumeClaim: - claimName: ${TARGET}-backup - schedule: ${SCHEDULE} - successfulJobsHistoryLimit: ${{SUCCESS_JOBS_HISTORY_LIMIT}} - suspend: false diff --git a/gwells/openshift/bddstack.bc.yaml b/gwells/openshift/bddstack.bc.yaml deleted file mode 100644 index 21a1e2ead..000000000 --- a/gwells/openshift/bddstack.bc.yaml +++ /dev/null @@ -1,93 +0,0 @@ -# Build config for Jenkins BDDStack functional testing -# -# Process this file, creating or replacing imagestreams and builds -# $ oc process -f openshift/bddstack.bc.yaml | oc [create|replace] -n -f - -# -apiVersion: v1 -kind: Template -metadata: - name: bddstack -objects: -- apiVersion: v1 - kind: ImageStream - metadata: - name: bddstack - labels: - build: bddstack - annotations: - openshift.io/generated-by: OpenShiftNewBuild -- apiVersion: v1 - kind: BuildConfig - metadata: - annotations: - openshift.io/generated-by: OpenShiftNewBuild - labels: - build: bddstack - name: bddstack - spec: - output: - to: - kind: ImageStreamTag - name: bddstack:latest - source: - dockerfile: | - # https://github.com/BCDevOps/BDDStack - FROM registry.access.redhat.com/openshift3/jenkins-slave-base-rhel7 - - EXPOSE 8080 - - ENV PATH=$HOME/.local/bin/:$PATH \ - LC_ALL=en_US.UTF-8 \ - LANG=en_US.UTF-8 - - ENV SUMMARY="Jenkins slave with chrome and firefox installed for use with functional/BDD tests that use BDDStack." \ - DESCRIPTION="Jenkins pipeline slave with chrome and firefox for testing application with headless browser. (aka 'BDDStack')" - - LABEL summary="$SUMMARY" \ - description="$DESCRIPTION" \ - io.k8s.description="$DESCRIPTION" \ - io.k8s.display-name="Jenkins-Pipeline-BDDStack" \ - io.openshift.expose-services="8080:http" \ - io.openshift.tags="builder,jenkins-jnlp-chrome,jenkins-jnlp-firefox,jenkins-jnlp" \ - release="1" - - # NOTES: - # We need to call 2 (!) yum commands before being able to enable repositories properly - # This is a workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1479388 - # Chrome install info: https://access.redhat.com/discussions/917293 - RUN yum repolist > /dev/null && \ - yum install -y yum-utils && \ - yum-config-manager --disable \* &> /dev/null && \ - yum-config-manager --enable rhel-server-rhscl-7-rpms && \ - yum-config-manager --enable rhel-7-server-rpms && \ - yum-config-manager --enable rhel-7-server-optional-rpms && \ - yum-config-manager --enable rhel-7-server-fastrack-rpms && \ - UNINSTALL_PKGS="java-1.8.0-openjdk-headless.i686" &&\ - INSTALL_PKGS="redhat-lsb libXScrnSaver gdk-pixbuf2 xorg-x11-server-Xvfb wget firefox" && \ - yum remove -y $UNINSTALL_PKGS &&\ - yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ - rpm -V $INSTALL_PKGS && \ - yum clean all -y && \ - wget https://dl.google.com/linux/direct/google-chrome-stable_current_x86_64.rpm && \ - yum -y localinstall google-chrome-stable_current_x86_64.rpm && \ - rm google-chrome-stable_current_x86_64.rpm && \ - mkdir -p /home/jenkins/.pki && \ - chmod 777 /home/jenkins/.pki && \ - yum info google-chrome-stable - - USER 1001 - - type: Dockerfile - strategy: - dockerStrategy: - env: - - name: OPENSHIFT_JENKINS_JVM_ARCH - value: x86_64 - from: - kind: ImageStreamTag - name: jenkins-slave-base-rhel7:latest - type: Docker - triggers: - - type: ConfigChange - - imageChange: {} - type: ImageChange diff --git a/gwells/openshift/caddy.bc.json b/gwells/openshift/caddy.bc.json deleted file mode 100644 index 9166faf83..000000000 --- a/gwells/openshift/caddy.bc.json +++ /dev/null @@ -1,110 +0,0 @@ -{ - "kind": "Template", - "apiVersion": "v1", - "metadata": { - "name": "${APP_NAME}" - }, - "parameters": [ - { - "name": "NAME", - "displayName": "Name", - "description": "The suffix for all created objects", - "required": false, - "value": "proxy-caddy" - }, - { - "name": "GIT_REPO", - "displayName": "GIT_REPO", - "description": "URL to GitHub repo with static content", - "required": false, - "value": "https://github.com/bcgov/gwells.git" - }, - { - "name": "GIT_BRANCH", - "displayName": "GIT_BRANCH", - "description": "Branch related to GitHub repo", - "required": false, - "value": "master" - }, - { - "name": "IMG_SRC", - "displayName": "IMG_SRC", - "description": "Source image name", - "required": false, - "value": "bcgov-s2i-caddy" - } - ], - "objects": [ - { - "kind": "ImageStream", - "apiVersion": "v1", - "metadata": { - "name": "${NAME}", - "creationTimestamp": null, - "labels": { - "app": "${NAME}" - } - }, - "spec": { - "lookupPolicy": { - "local": false - } - }, - "status": { - "dockerImageRepository": "" - } - }, - { - "kind": "BuildConfig", - "apiVersion": "v1", - "metadata": { - "name": "${NAME}", - "creationTimestamp": null, - "labels": { - "app": "${NAME}" - } - }, - "spec": { - "triggers": [ - { - "type": "ConfigChange" - }, - { - "type": "ImageChange", - "imageChange": {} - } - ], - "runPolicy": "SerialLatestOnly", - "source": { - "type": "Git", - "contextDir": "/maintenance", - "git": { - "uri": "${GIT_REPO}", - "ref": "${GIT_BRANCH}" - } - }, - "strategy": { - "type": "Source", - "sourceStrategy": { - "from": { - "kind": "ImageStreamTag", - "namespace": "openshift", - "name": "${IMG_SRC}:latest" - } - } - }, - "output": { - "to": { - "kind": "ImageStreamTag", - "name": "${NAME}:latest" - } - }, - "resources": {}, - "postCommit": {}, - "nodeSelector": null, - "successfulBuildsHistoryLimit": 5, - "failedBuildsHistoryLimit": 5 - } - } - ] -} diff --git a/gwells/openshift/caddy.dc.json b/gwells/openshift/caddy.dc.json deleted file mode 100644 index 9e9bbd4e0..000000000 --- a/gwells/openshift/caddy.dc.json +++ /dev/null @@ -1,138 +0,0 @@ -{ - "kind": "Template", - "apiVersion": "v1", - "metadata": { - "name": "${APP_NAME}" - }, - "parameters": [ - { - "name": "NAME", - "displayName": "Name", - "description": "The suffix for all created objects", - "required": false, - "value": "proxy-caddy" - }, - { - "name": "BUILD_PROJECT", - "displayName": "Build project", - "description": "The suffix for all created objects", - "required": false, - "value": "moe-gwells-tools" - } - ], - "objects": [ - { - "kind": "DeploymentConfig", - "apiVersion": "v1", - "metadata": { - "name": "${NAME}", - "creationTimestamp": null, - "labels": { - "app": "${NAME}" - } - }, - "spec": { - "strategy": { - "type": "Rolling", - "rollingParams": { - "updatePeriodSeconds": 1, - "intervalSeconds": 1, - "timeoutSeconds": 600, - "maxUnavailable": "25%", - "maxSurge": "25%" - }, - "resources": {}, - "activeDeadlineSeconds": 21600 - }, - "triggers": [ - { - "type": "ConfigChange" - }, - { - "type": "ImageChange", - "imageChangeParams": { - "automatic": true, - "containerNames": [ - "${NAME}" - ], - "from": { - "kind": "ImageStreamTag", - "namespace": "${BUILD_PROJECT}", - "name": "${NAME}:latest" - } - } - } - ], - "replicas": 1, - "revisionHistoryLimit": 10, - "test": false, - "selector": { - "app": "${NAME}", - "deploymentconfig": "${NAME}" - }, - "template": { - "metadata": { - "creationTimestamp": null, - "labels": { - "app": "${NAME}", - "deploymentconfig": "${NAME}" - }, - "annotations": { - "openshift.io/generated-by": "OpenShiftNewApp" - } - }, - "spec": { - "containers": [ - { - "name": "${NAME}", - "image": " ", - "ports": [ - { - "containerPort": 2015, - "protocol": "TCP" - } - ], - "resources": {}, - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "imagePullPolicy": "Always" - } - ], - "restartPolicy": "Always", - "terminationGracePeriodSeconds": 30, - "dnsPolicy": "ClusterFirst", - "securityContext": {}, - "schedulerName": "default-scheduler" - } - } - } - }, - { - "kind": "Service", - "apiVersion": "v1", - "metadata": { - "name": "${NAME}", - "creationTimestamp": null, - "labels": { - "app": "${NAME}" - } - }, - "spec": { - "ports": [ - { - "name": "2015-tcp", - "protocol": "TCP", - "port": 2015, - "targetPort": 2015 - } - ], - "selector": { - "app": "${NAME}", - "deploymentconfig": "${NAME}" - }, - "type": "ClusterIP", - "sessionAffinity": "None" - } - } - ] -} diff --git a/gwells/openshift/cron-backup/cron-backup.sh b/gwells/openshift/cron-backup/cron-backup.sh deleted file mode 100755 index bb7226112..000000000 --- a/gwells/openshift/cron-backup/cron-backup.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/bash - - -# Halt on errors/unsets, change fail returns, change field separator, verbose mode -# -set -euo pipefail -IFS=$'\n\t' -[ "${VERBOSE:-}" != true ]|| set -x - - -# Parameters and mode variables -# -PARAM=${1:-""} -PROJECT=$( echo ${PARAM} | cut -d "/" -f 1 ) -TARGET=$( echo ${PARAM} | cut -d "/" -f 2 ) - -# App and build settings -# -SCRIPT_DIR=$(dirname $0) -OC_BUILD=${OC_BUILD:-${SCRIPT_DIR}/../backup.bc.yaml} -OC_DEPLOY=${OC_DEPLOY:-${SCRIPT_DIR}/../backup.cj.yaml} -# -DRY_RUN=${DRY_RUN:-false} -SCHEDULE=${SCHEDULE:-} - - -# Show message if passed any params -# -if [ "${#}" -ne 1 ] -then - set +x - echo - echo "PostgreSQL Backup Cronjobs" - echo - echo "Setup backup cronjobs for a PostgreSQL deployment." - echo "Defaults to 11 AM UTC (3 AM PDT, 0 11 * * *)." - echo "Override defaults with runtime variables." - echo - echo "Usage:" - echo " ./cron-backup.sh [project]/[database]" - echo - echo "Examples:" - echo " # Deploy to staging environment" - echo " ./cron-backup.sh moe-gwells-test/gwells-pgsql-staging" - echo - echo " # Deploy to demo environment, custom testing schedule" - echo " SCHEDULE=\"*/5 * * * *\" ./cron-backup.sh moe-gwells-test/gwells-pgsql-demo" - echo - exit -fi - - -# Check project -# -CHECK=$( oc projects | tr -d '*' | grep -v "Using project" | grep "${PROJECT}" | awk '{ print $1 }' || echo ) -if [ "${PROJECT}" != "${CHECK}" ] -then - echo - echo "Unable to access project ${PROJECT}" - echo - exit -fi - - -# Create build config and image -# -oc process -f ${OC_BUILD} \ - | oc apply -n moe-gwells-tools -f - - - -# Deploy, add SCHEDULE is provided -# -if [ -z "${SCHEDULE}" ] -then - oc process -f ${OC_DEPLOY} -p TARGET=${TARGET} \ - | oc apply -n ${PROJECT} -f - -else - oc process -f ${OC_DEPLOY} -p TARGET=${TARGET} -p SCHEDULE=${SCHEDULE} \ - | oc apply -n ${PROJECT} -f - -fi \ No newline at end of file diff --git a/gwells/openshift/docker/backend/README.md b/gwells/openshift/docker/backend/README.md deleted file mode 100644 index 115dee143..000000000 --- a/gwells/openshift/docker/backend/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# Docker Images - -Dockerfiles that are the source of GWELLS custom images. - -## Python Images with GDAL, GEOS, and PROJ.4 compiled from source - * [OpenShift, based on RHEL7](Dockerfile.rhel7) - * [Local Development, based on CentOS](Dockerfile) - - -``` -docker build . -f Dockerfile -t python-gis -docker build . -f Dockerfile -t local/python-gis -docker run -it --rm --user root --entrypoint /bin/bash local/python-gis - -``` - - -The OpenShift image can only be built on a machine with a valid subscription, so either on OpenShift itself or on a local development workstation with a valid RedHat Subscription: - - -``` -oc -n moe-gwells-tools new-build https://github.com/bcgov/gwells#feature/gis-backend-with-configmap --context-dir=docker-images/python-gdal --name gwells-python - - -oc -n moe-gwells-tools tag gwells-python:latest gwells-python:3.6 -oc -n moe-gwells-tools tag gwells-python:latest gwells-python:GDAL - - - -``` - - - -``` -docker build . -f Dockerfile -t gwells-python:gdal - -``` - -## Misc: -oc -n csnr-devops-lab-tools new-build . --name python-36-rhel7-gdal --context-dir=. - -(or `oc -n csnr-devops-lab-tools new-build https://github.com/bcgov/gwells.git#dev --name python-36-rhel7-gdal --context-dir=.` to reference a specific branch) - -To reset: -oc -n moe-gwells-tools delete bc/tst-python-gdal istag/tst-python-gdal:latest is/tst-python-gdal -oc -n csnr-devops-lab-tools logs -f bc/python-36-rhel7-gdal - -oc -n moe-gwells-tools export bc/gwells-python is/gwells-python -o json --as-template=gwells-python > backend-test.bc.json - - - -## License - -Code released under the [Apache License, Version 2.0](https://github.com/bcgov/gwells/blob/master/LICENSE). diff --git a/gwells/openshift/docker/backend/gwells-python.bc.ocp4.yaml b/gwells/openshift/docker/backend/gwells-python.bc.ocp4.yaml deleted file mode 100644 index 2adf91b41..000000000 --- a/gwells/openshift/docker/backend/gwells-python.bc.ocp4.yaml +++ /dev/null @@ -1,46 +0,0 @@ -apiVersion: build.openshift.io/v1 -kind: BuildConfig -metadata: - annotations: - openshift.io/generated-by: OpenShiftNewBuild - creationTimestamp: 2019-02-05T18:41:45Z - labels: - build: gwells-python - name: gwells-python - namespace: 26e83e-tools -spec: - failedBuildsHistoryLimit: 5 - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: gwells-python:v3 - postCommit: {} - resources: - requests: - memory: 1Gi - cpu: 1000m - limits: - memory: 6Gi - cpu: 6000m - runPolicy: Serial - source: - contextDir: openshift/docker/backend - git: - ref: release - uri: https://github.com/bcgov/gwells - type: Git - strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: 'python-36-rhel7:v1' - namespace: 26e83e-tools - type: Docker - successfulBuildsHistoryLimit: 5 - triggers: - - type: ConfigChange - - imageChange: - type: ImageChange -status: - lastVersion: 1 diff --git a/gwells/openshift/docker/backend/gwells-python.bc.yaml b/gwells/openshift/docker/backend/gwells-python.bc.yaml deleted file mode 100644 index 5e4136471..000000000 --- a/gwells/openshift/docker/backend/gwells-python.bc.yaml +++ /dev/null @@ -1,45 +0,0 @@ -apiVersion: build.openshift.io/v1 -kind: BuildConfig -metadata: - annotations: - openshift.io/generated-by: OpenShiftNewBuild - creationTimestamp: 2019-02-05T18:41:45Z - labels: - build: gwells-python - name: gwells-python - namespace: 26e83e-tools -spec: - failedBuildsHistoryLimit: 5 - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: gwells-python:latest - postCommit: {} - resources: - requests: - memory: 1Gi - cpu: 1000m - limits: - memory: 4Gi - cpu: 4000m - runPolicy: Serial - source: - contextDir: openshift/docker/backend - git: - ref: release - uri: https://github.com/bcgov/gwells - type: Git - strategy: - dockerStrategy: - from: - kind: DockerImage - name: registry.access.redhat.com/ubi8/python-36:1 - type: Docker - successfulBuildsHistoryLimit: 5 - triggers: - - type: ConfigChange - - imageChange: - type: ImageChange -status: - lastVersion: 1 diff --git a/gwells/openshift/docker/backup-pvc/Dockerfile b/gwells/openshift/docker/backup-pvc/Dockerfile deleted file mode 100644 index 22c1dafdc..000000000 --- a/gwells/openshift/docker/backup-pvc/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM alpine - -RUN apk add rsync - -COPY entrypoint.sh / diff --git a/gwells/openshift/docker/backup-pvc/entrypoint.sh b/gwells/openshift/docker/backup-pvc/entrypoint.sh deleted file mode 100755 index 4eac1bdad..000000000 --- a/gwells/openshift/docker/backup-pvc/entrypoint.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/sh - - -# Halt on errors/unsets, change fail returns, change field separator, verbose mode -# -set -euo pipefail -IFS=$'\n\t' -[ "${VERBOSE:-}" != true ]|| set -x - - -# PVC mount and folder variables, removing any trailing slashes (%/) -# -SRC_MNT=${SRC_MNT:-/mnt/src} -DEST_MNT=${DEST_MNT:-/mnt/dest} -SRC_MNT=${SRC_MNT%/} -DEST_MNT=${DEST_MNT%/} -# -DEST_DIR=${DEST_MNT}/$(hostname) -TMP_BK=${NEW_BK:-${DEST_DIR}/bk-tmp} -PRV_BK=${PRV_BK:-${DEST_DIR}/bk-prev} -NEW_BK=${NEW_BK:-${DEST_DIR}/bk} - - -# Drop to one previous backup -# Either directory does not exist, or remove directory. -[ ! -d ${PRV_BK} ]|| rm -rf ${PRV_BK} - - -# Copy and verify -# -mkdir -p ${TMP_BK} -if ! rsync -avh ${SRC_MNT}/ ${TMP_BK}/ -then - echo "Copy failed! Previous backups retained." - rm -rf ${TMP_BK} - exit 1 -fi - - -# Shuffle and show disk usage -# Either directory doesn't exist, or move it -[ ! -d ${NEW_BK} ]|| mv ${NEW_BK} ${PRV_BK} -mv ${TMP_BK} ${NEW_BK} -du -hd 1 ${DEST_MNT} diff --git a/gwells/openshift/docker/pg12-postgis2.5/Dockerfile b/gwells/openshift/docker/pg12-postgis2.5/Dockerfile deleted file mode 100644 index ae058889a..000000000 --- a/gwells/openshift/docker/pg12-postgis2.5/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM registry.access.redhat.com/rhscl/postgresql-12-rhel7:1-10 - - -RUN yum -y install \ - --enablerepo="epel,rhel-7-server-optional-rpms" \ - --setopt=skip_missing_names_on_install=False \ - postgis25_12 \ - postgis25_12-client \ - && yum -y clean all --enablerepo="epel,rhel-7-server-optional-rpms" - diff --git a/gwells/openshift/export.cj.json b/gwells/openshift/export.cj.json deleted file mode 100644 index 796791e0e..000000000 --- a/gwells/openshift/export.cj.json +++ /dev/null @@ -1,195 +0,0 @@ -{ - "kind": "Template", - "apiVersion": "v1", - "metadata": {}, - "parameters": [ - { - "name": "ENV_NAME", - "required": true - }, - { - "name": "PROJECT", - "required": true - }, - { - "name": "TAG", - "required": false, - "value": "${ENV_NAME}" - }, - { - "name": "NAME", - "required": true - }, - { - "name": "COMMAND", - "required": true - }, - { - "name": "SCHEDULE", - "required": true - } - ], - "objects": [ - { - "apiVersion": "batch/v1beta1", - "kind": "CronJob", - "metadata": { - "name": "${NAME}" - }, - "spec": { - "schedule": "${SCHEDULE}", - "concurrencyPolicy": "Forbid", - "jobTemplate": { - "spec": { - "template": { - "spec": { - "containers": [ - { - "name": "${NAME}", - "image": "docker-registry.default.svc:5000/${PROJECT}/gwells-${ENV_NAME}:${TAG}", - "imagePullPolicy": "Always", - "command": [ - "python", - "backend/manage.py", - "${COMMAND}" - ], - "env": [ - { - "name": "DATABASE_SERVICE_NAME", - "value": "gwells-pg12-${ENV_NAME}" - }, - { - "name": "DATABASE_ENGINE", - "value": "postgresql" - }, - { - "name": "DATABASE_NAME", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pgsql-${ENV_NAME}", - "key": "database-name" - } - } - }, - { - "name": "DATABASE_USER", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pgsql-${ENV_NAME}", - "key": "database-user" - } - } - }, - { - "name": "DATABASE_PASSWORD", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pgsql-${ENV_NAME}", - "key": "database-password" - } - } - }, - { - "name": "DATABASE_SCHEMA", - "value": "public" - }, - { - "name": "MINIO_ACCESS_KEY", - "valueFrom": { - "secretKeyRef": { - "name": "minio-access-parameters-${ENV_NAME}", - "key": "MINIO_ACCESS_KEY" - } - } - }, - { - "name": "MINIO_SECRET_KEY", - "valueFrom": { - "secretKeyRef": { - "name": "minio-access-parameters-${ENV_NAME}", - "key": "MINIO_SECRET_KEY" - } - } - }, - { - "name": "S3_PUBLIC_ACCESS_KEY", - "valueFrom": { - "secretKeyRef": { - "name": "minio-access-parameters-${ENV_NAME}", - "key": "S3_PUBLIC_ACCESS_KEY" - } - } - }, - { - "name": "S3_PUBLIC_SECRET_KEY", - "valueFrom": { - "secretKeyRef": { - "name": "minio-access-parameters-${ENV_NAME}", - "key": "S3_PUBLIC_SECRET_KEY" - } - } - }, - { - "name": "S3_HOST", - "valueFrom": { - "secretKeyRef": { - "name": "minio-access-parameters-${ENV_NAME}", - "key": "S3_HOST" - } - } - }, - { - "name": "S3_ROOT_BUCKET", - "valueFrom": { - "secretKeyRef": { - "name": "minio-access-parameters-${ENV_NAME}", - "key": "S3_ROOT_BUCKET" - } - } - }, - { - "name": "S3_PRIVATE_HOST", - "valueFrom": { - "configMapKeyRef": { - "key": "S3_PRIVATE_HOST", - "name": "gwells-global-config-${ENV_NAME}" - } - } - }, - { - "name": "S3_WELL_EXPORT_BUCKET", - "valueFrom": { - "configMapKeyRef": { - "key": "S3_WELL_EXPORT_BUCKET", - "name": "gwells-global-config-${ENV_NAME}" - } - } - }, - { - "name": "S3_PRIVATE_BUCKET", - "valueFrom": { - "configMapKeyRef": { - "key": "S3_PRIVATE_BUCKET", - "name": "gwells-global-config-${ENV_NAME}" - } - } - } - ], - "envFrom": [ - { - "configMapRef": { - "name": "gwells-global-config-${ENV_NAME}" - } - } - ] - } - ], - "restartPolicy": "OnFailure" - } - } - } - } - } - } - ] -} \ No newline at end of file diff --git a/gwells/openshift/jenkins/003-create-jobs.groovy b/gwells/openshift/jenkins/003-create-jobs.groovy deleted file mode 100644 index 71424729c..000000000 --- a/gwells/openshift/jenkins/003-create-jobs.groovy +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/env groovy - -// -// This needs to come before 003-register-github-webhooks.groovy -// - -import static jenkins.model.Jenkins.instance as jenkins - -import com.cloudbees.hudson.plugins.folder.computed.DefaultOrphanedItemStrategy -import org.jenkinsci.plugins.github_branch_source.* -import jenkins.scm.impl.trait.* -import jenkins.scm.api.mixin.* -import org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject -import jenkins.branch.BranchSource - -import com.adobe.jenkins.disable_github_multibranch_status.DisableStatusUpdateTrait -import jenkins.plugins.git.traits.WipeWorkspaceTrait -import org.csanchez.jenkins.plugins.kubernetes.KubernetesFolderProperty - -// Get configuration from jobs configmap -def repoOwner = new File('/var/run/configs/jobs/repo.owner').getText('UTF-8').trim() -def appRepoAdmin = new File('/var/run/configs/jobs/repo.name.Admin').getText('UTF-8').trim() -def appRepoApi = new File('/var/run/configs/jobs/repo.name.Api').getText('UTF-8').trim() -def appRepoPublic = new File('/var/run/configs/jobs/repo.name.Public').getText('UTF-8').trim() -def appName = new File('/var/run/configs/jobs/app.name').getText('UTF-8').trim() -def name = appName.toLowerCase().replaceAll("/[^A-Za-z0-9 ]/", "").replaceAll("\\s", "-") - -def githubCredentialsId = "github-account" - -def pullRequestTraits = [ - new ForkPullRequestDiscoveryTrait([ChangeRequestCheckoutStrategy.MERGE].toSet(),new ForkPullRequestDiscoveryTrait.TrustContributors()), - new DisableStatusUpdateTrait(), - new WipeWorkspaceTrait() -] - -def masterTraits = [ - new RegexSCMHeadFilterTrait("^(develop|test|master)"), - new BranchDiscoveryTrait(3), - new DisableStatusUpdateTrait(), - new WipeWorkspaceTrait() -] - -def hotfixTraits = [ - new RegexSCMHeadFilterTrait("^(hotfix)"), - new BranchDiscoveryTrait(3), - new DisableStatusUpdateTrait(), - new WipeWorkspaceTrait() -] - -core_jobs = [ new Expando(jobName: "dev-${name}-public", - displayName: "dev-${name}-public", - owner: repoOwner, - repo: appRepoPublic, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-develop", - traits: pullRequestTraits, - startJob: true), - new Expando(jobName: "dev-${name}-api", - displayName: "dev-${name}-api", - owner: repoOwner, - repo: appRepoApi, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-develop", - traits: pullRequestTraits, - startJob: true), - new Expando(jobName: "dev-${name}-admin", - displayName: "dev-${name}-admin", - owner: repoOwner, - repo: appRepoAdmin, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-develop", - traits: pullRequestTraits, - startJob: true), - new Expando(jobName: "cicd-${name}-public", - displayName: "cicd-${name}-public", - owner: repoOwner, - repo: appRepoPublic, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-cicd", - traits: masterTraits, - startJob: true), - new Expando(jobName: "cicd-${name}-api", - displayName: "cicd-${name}-api", - owner: repoOwner, - repo: appRepoApi, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-cicd", - traits: masterTraits, - startJob: true), - new Expando(jobName: "cicd-${name}-admin", - displayName: "cicd-${name}-admin", - owner: repoOwner, - repo: appRepoAdmin, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-cicd", - traits: masterTraits, - startJob: true), - new Expando(jobName: "hotfix-${name}-public", - displayName: "hotfix-${name}-public", - owner: repoOwner, - repo: appRepoPublic, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-hotfix", - traits: hotfixTraits, - startJob: true), - new Expando(jobName: "hotfix-${name}-api", - displayName: "hotfix-${name}-api", - owner: repoOwner, - repo: appRepoApi, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-hotfix", - traits: hotfixTraits, - startJob: true), - new Expando(jobName: "hotfix-${name}-admin", - displayName: "hotfix-${name}-admin", - owner: repoOwner, - repo: appRepoAdmin, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-hotfix", - traits: hotfixTraits, - startJob: true), - ] - - -jobs = jenkins.getAllItems() - -for (core_job in core_jobs) { - - def shouldCreate = true - jobs.each { j -> - if (j instanceof org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject && - j.fullName.contains(core_job.jobName)) { - println '----> Already have a job for ' + j.fullName + ' of type:' + j.getClass() - println j - shouldCreate = false - } - } - if (!shouldCreate) { - continue - } - println '----> configuring job ' + core_job.jobName - - // start by creating the toplevel folder - def folder = jenkins.createProject(WorkflowMultiBranchProject, core_job.jobName) - - // Configure the Github SCM integration - def scm = new GitHubSCMSource(core_job.owner, core_job.repo) - scm.credentialsId = core_job.credentialsId - scm.traits = core_job.traits - folder.getSourcesList().add(new BranchSource(scm)) - - folder.displayName = core_job.displayName - - // Delete orphan items after 5 days - folder.orphanedItemStrategy = new DefaultOrphanedItemStrategy(true, "-1", "-1") - - // Configure what Jenkinsfile we should be looking for - folder.projectFactory.scriptPath = core_job.jenkinsFilePath - - folder.addProperty(new KubernetesFolderProperty()) - - folder.triggers.clear() - - jenkins.save() - - println '----> configured job ' + core_job.jobName - - if (core_job.startJob) { - Thread.start { - sleep 3000 // 3 seconds - println '----> Running Github organization scan for job ' + core_job.jobName - folder.scheduleBuild() - } - } -} - - -println '<--- Create Jobs: jobs created.' \ No newline at end of file diff --git a/gwells/openshift/jenkins/CleanupPR.groovy b/gwells/openshift/jenkins/CleanupPR.groovy deleted file mode 100644 index d1f8f76d1..000000000 --- a/gwells/openshift/jenkins/CleanupPR.groovy +++ /dev/null @@ -1,67 +0,0 @@ -// note: a copy of this file lives in Jenkins config. We need a way to update the Jenkins config -// when changes are made to this file. - -import groovy.json.JsonSlurper - -String TOOLS_PROJECT = "moe-gwells-tools" -String DEV_PROJECT = "moe-gwells-dev" - -def jsonSlurper = new JsonSlurper() - -// the webhook trigger comes from GitHub as a POST request with a "payload" object in the body -String ghEventType = build.buildVariableResolver.resolve("x_github_event") -def payload = jsonSlurper.parseText(build.buildVariableResolver.resolve("payload")) -def prNum = payload['number'] - - -// this script is triggered on all events, but we are specifically interested in pull requests that are closed -// pull requests come with actions like "opened", "closed". Merged and closed are the same event (there is an -// additional "merged: true" property) -if (ghEventType == 'pull_request' && payload['action'] == 'closed' && prNum) { - - def sout = new StringBuilder(), serr = new StringBuilder() - - // delete all the objects in the DEV namespace labeled with this PR number - // todo: there are several labels that need to be targeted and hardcoding them is fragile. - // a future task should focus on creating a label that applies to all resources associated with one pull request. - - // these objects were created as part of deploying an app (e.g. replication controller) - def deleteAllAppObjects = "oc delete all,pvc,secret,configmap -n ${DEV_PROJECT} -l app=gwells-dev-pr-${prNum}".execute() - deleteAllAppObjects.consumeProcessOutput(sout, serr) - deleteAllAppObjects.waitForOrKill(25000) - println "out> $sout err> $serr" - - // these objects were created by our templates during the pipeline runs - sout = new StringBuilder() - serr = new StringBuilder() - def deleteCreatedObjects = "oc delete all,pvc,secret,configmap -n ${DEV_PROJECT} -l appver=gwells-dev-pr-${prNum}".execute() - deleteCreatedObjects.consumeProcessOutput(sout, serr) - deleteCreatedObjects.waitForOrKill(25000) - println "out> $sout err> $serr" - - // these objects were generated by openshift for PVC provisioning - sout = new StringBuilder() - serr = new StringBuilder() - def deleteGeneratedObjects = "oc delete all,pvc,secret,configmap -n ${DEV_PROJECT} -l gluster.kubernetes.io/provisioned-for-pvc=gwells-pg12-dev-pr-${prNum}".execute() - deleteGeneratedObjects.consumeProcessOutput(sout, serr) - deleteGeneratedObjects.waitForOrKill(25000) - println "out> $sout err> $serr" - - // delete the objects in the tools project (this is primarly the build configs, - // the imagestream is not unique to each pull request). - sout = new StringBuilder() - serr = new StringBuilder() - def deleteAllBuilds = "oc delete all -n ${TOOLS_PROJECT} -l appver=gwells-dev-pr-${prNum}".execute() - deleteAllBuilds.consumeProcessOutput(sout, serr) - deleteAllBuilds.waitForOrKill(25000) - println "out> $sout err> $serr" - - // untag the images tagged with this PR number - sout = new StringBuilder() - serr = new StringBuilder() - def untagImages = "oc tag -d gwells-application:pr-${prNum}".execute() - untagImages.consumeProcessOutput(sout, serr) - untagImages.waitForOrKill(25000) - println "out> $sout err> $serr" - -} diff --git a/gwells/openshift/jenkins/jenkins-deploy.yaml b/gwells/openshift/jenkins/jenkins-deploy.yaml deleted file mode 100644 index a41886ce7..000000000 --- a/gwells/openshift/jenkins/jenkins-deploy.yaml +++ /dev/null @@ -1,331 +0,0 @@ ---- -apiVersion: template.openshift.io/v1 -kind: Template -labels: - app: jenkins -metadata: - creationTimestamp: null - name: jenkins -objects: -- apiVersion: v1 - kind: Secret - metadata: - annotations: - as-copy-of: template.${NAME}-slave-user - as-copy-of/preserve: password - name: ${NAME}-slave-user - stringData: - metadata.name: ${NAME}-slave-user - password: ${SLAVE_USER_PASSWORD} - username: jenkins-slave - type: kubernetes.io/basic-auth -- apiVersion: v1 - kind: Secret - metadata: - annotations: - as-copy-of: template.${NAME}-github - name: ${NAME}-github - stringData: - metadata.name: ${NAME}-github - password: ${GH_PASSWORD} - username: ${GH_USERNAME} - type: kubernetes.io/basic-auth -- apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - annotations: - volume.beta.kubernetes.io/storage-class: netapp-file-standard - name: ${NAME} - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi -- apiVersion: v1 - kind: ServiceAccount - metadata: - annotations: - serviceaccounts.openshift.io/oauth-redirectreference.jenkins: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"${NAME}"}}' - name: ${NAME} -- apiVersion: v1 - groupNames: null - kind: RoleBinding - metadata: - name: ${NAME}_edit - roleRef: - name: edit - subjects: - - kind: ServiceAccount - name: ${NAME} -- apiVersion: v1 - groupNames: null - kind: RoleBinding - metadata: - name: ${NAME}_admin - roleRef: - name: admin - subjects: - - kind: ServiceAccount - name: ${NAME} -- apiVersion: v1 - kind: DeploymentConfig - metadata: - annotations: - template.alpha.openshift.io/wait-for-ready: "true" - creationTimestamp: null - name: ${NAME} - spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - deploymentconfig: ${NAME} - strategy: - activeDeadlineSeconds: 21600 - recreateParams: - timeoutSeconds: 600 - resources: {} - type: Recreate - template: - metadata: - creationTimestamp: null - labels: - deploymentconfig: ${NAME} - spec: - containers: - - command: - - /usr/local/bin/container-entrypoint - - /usr/local/bin/jenkins-run - env: - - name: USE_JAVA_DIAGNOSTICS - value: "true" - - name: JENKINS_URL - value: https://${ROUTE_HOST}${ROUTE_PATH} - - name: ENV_NAME - value: ${ENV_NAME} - - name: ENV_ID - value: ${ENV_ID} - - name: APP_TOOLS_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.tools - - name: APP_DEV_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.dev - - name: APP_STAGING_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.test - - name: APP_PROD_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.prod - - name: OCP_PLATFORM - value: '4' - image: ' ' - imagePullPolicy: Always - livenessProbe: - failureThreshold: 3 - httpGet: - path: /login - port: 8080 - initialDelaySeconds: 420 - periodSeconds: 360 - timeoutSeconds: 240 - name: jenkins - ports: - - containerPort: 50000 - protocol: TCP - - containerPort: 8080 - protocol: TCP - readinessProbe: - httpGet: - path: /login - port: 8080 - initialDelaySeconds: 3 - timeoutSeconds: 240 - resources: - limits: - cpu: "${CPU_LIMIT}" - memory: "${MEMORY_LIMIT}" - requests: - cpu: "${CPU_REQUEST}" - memory: "${MEMORY_REQUEST}" - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /var/jenkins-data - name: jenkins-jobs - readOnly: false - - mountPath: /var/run/pod - name: pod-metadata - - mountPath: /run/secrets/jenkins-slave-user - name: jenkins-slave-user - readOnly: true - - mountPath: /run/secrets/github - name: github - readOnly: true - - mountPath: /run/configs/ns - name: ns-config - readOnly: true - - mountPath: /run/configs/jobs - name: jobs-config - readOnly: true - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - serviceAccount: ${NAME} - serviceAccountName: ${NAME} - terminationGracePeriodSeconds: 30 - volumes: - - name: jenkins-jobs - persistentVolumeClaim: - claimName: ${NAME} - - downwardAPI: - items: - - fieldRef: - fieldPath: metadata.name - name: name - path: name - - fieldRef: - fieldPath: metadata.namespace - name: namespace - path: namespace - - fieldRef: - fieldPath: metadata.labels - name: labels - path: labels - - fieldRef: - fieldPath: metadata.annotations - name: annotations - path: annotations - name: pod-metadata - - name: jenkins-slave-user - secret: - defaultMode: 420 - secretName: ${NAME}-slave-user - - name: github - secret: - defaultMode: 420 - secretName: ${NAME}-github - - name: ns-config - configMap: - defaultMode: 420 - name: ns-config - - name: jobs-config - configMap: - defaultMode: 420 - name: jobs-config - test: false - triggers: - - imageChangeParams: - automatic: true - containerNames: - - jenkins - from: - kind: ImageStreamTag - name: jenkins-basic:${VERSION} - type: ImageChange - - type: ConfigChange -- apiVersion: v1 - kind: Service - metadata: - creationTimestamp: null - name: ${NAME} - spec: - ports: - - name: 8080-tcp - port: 8080 - protocol: TCP - targetPort: 8080 - - name: 50000-tcp - port: 50000 - protocol: TCP - targetPort: 50000 - selector: - deploymentconfig: ${NAME} - sessionAffinity: None - type: ClusterIP -- apiVersion: v1 - kind: Route - metadata: - creationTimestamp: null - name: ${NAME} - spec: - host: ${ROUTE_HOST} - path: ${ROUTE_PATH} - port: - targetPort: 8080-tcp - tls: - termination: edge - to: - kind: Service - name: ${NAME} - weight: 100 - wildcardPolicy: None -parameters: -- description: A name used for all objects - displayName: Name - name: NAME - required: true -- description: A version used for the image tags - displayName: version - name: VERSION - required: true - value: v1.0.0 -- description: ROUTE_HOST - displayName: ROUTE_HOST - name: ROUTE_HOST - required: true - value: "gwells-jenkins.apps.silver.devops.gov.bc.ca" -- description: ROUTE_PATH - displayName: ROUTE_PATH - name: ROUTE_PATH - required: true - value: / -- description: Environment Name - displayName: ENV_NAME - name: ENV_NAME - required: true - value: prod -- description: Environment ID - displayName: ENV_ID - name: ENV_ID - required: true - value: prod -- description: SLAVE_USER_PASSWORD - displayName: SLAVE_USER_PASSWORD - from: '[a-zA-Z0-9]{16}' - generate: expression - name: SLAVE_USER_PASSWORD -- description: GitHub Username (Same as github secret) - displayName: GH_USERNAME - name: GH_USERNAME - required: true - value: "" -- description: GitHub Personal Access Token (Same as github secret) - displayName: GH_PASSWORD - name: GH_PASSWORD - required: true -- name: CPU_REQUEST - description: Minimal CPU needed to run - displayName: CPU Request - value: 100m -- name: CPU_LIMIT - description: Maximum CPU allowed to use - displayName: CPU Limit - value: 2000m -- name: MEMORY_REQUEST - description: Minimal amount of memory needed to run - displayName: Memory Request - value: 1Gi -- name: MEMORY_LIMIT - description: Maximum amount of memory allowed to use - displayName: Memory Limit - value: 4Gi diff --git a/gwells/openshift/jenkins/jenkins-prereq.yaml b/gwells/openshift/jenkins/jenkins-prereq.yaml deleted file mode 100644 index 6d1f738a3..000000000 --- a/gwells/openshift/jenkins/jenkins-prereq.yaml +++ /dev/null @@ -1,70 +0,0 @@ ---- -kind: Template -apiVersion: v1 -labels: - app: jenkins-prod -metadata: - name: jenkins -objects: -- apiVersion: v1 - data: - project.dev: "${DEV}" - project.test: "${TEST}" - project.prod: "${PROD}" - project.tools: "${TOOLS}" - kind: ConfigMap - metadata: - name: ns-config -- apiVersion: v1 - data: - app.name: "${APP_NAME}" - app.domain: "${APP_DOMAIN}" - repo.name: "${REPO_NAME}" - repo.owner: "${REPO_OWNER}" - kind: ConfigMap - metadata: - name: jobs-config -parameters: -- name: DEV - displayName: Dev Project Namespace - description: Project/Namespace for Dev. Environment - required: true - value: 26e83e-dev -- name: TEST - displayName: Test Project Namespace - description: Project/Namespace for Test Environment - required: true - value: 26e83e-test -- name: PROD - displayName: Prod Project Namespace - description: Project/Namespace for Prod. Environment - required: true - value: 26e83e-prod -- name: TOOLS - displayName: Tools Project Namespace - description: Project/Namespace for Tools Environment - required: true - value: 26e83e-tools -- name: NAME - displayName: Name - description: A name used for all objects - required: true - value: jenkins -- name: REPO_OWNER - displayName: Application Repository Owner - description: A name of the github repo owner - required: true - value: bcgov -- name: REPO_NAME - displayName: Application Repository Name - description: Name of the application repository (code to build) - required: true - value: gwells -- name: APP_NAME - description: Short name (one word, lowercase) of the application - required: true - value: gwells -- name: APP_DOMAIN - description: Internet domain for the application - required: true - value: gwells.apps.silver.devops.gov.bc.ca diff --git a/gwells/openshift/jenkins/jenkins-secondary-build.yaml b/gwells/openshift/jenkins/jenkins-secondary-build.yaml deleted file mode 100644 index 6d8db4d60..000000000 --- a/gwells/openshift/jenkins/jenkins-secondary-build.yaml +++ /dev/null @@ -1,88 +0,0 @@ ---- -apiVersion: template.openshift.io/v1 -kind: Template -labels: - app: jenkins -metadata: - creationTimestamp: null - name: jenkins -objects: -- apiVersion: v1 - kind: ImageStream - metadata: - creationTimestamp: null - labels: - shared: "true" - name: ${NAME}-secondary - spec: - lookupPolicy: - local: false -- apiVersion: v1 - kind: BuildConfig - metadata: - creationTimestamp: null - name: ${NAME}-secondary - spec: - completionDeadlineSeconds: 600 - failedBuildsHistoryLimit: 3 - successfulBuildsHistoryLimit: 3 - output: - to: - kind: ImageStreamTag - name: ${NAME}-secondary:${VERSION} - postCommit: {} - resources: - limits: - cpu: 2000m - memory: 2Gi - requests: - cpu: 1000m - memory: 1Gi - runPolicy: SerialLatestOnly - source: - dockerfile: | - FROM BuildConfig - ARG NODE_VERSION=v10.16.0 - ARG SONAR_VERSION=3.3.0.1492 - USER 0 - RUN fix_permission() { while [[ $# > 0 ]] ; do chgrp -R 0 "$1" && chmod -R g=u "$1"; shift; done } && \ - set -x && \ - curl -sSL -o /tmp/sonar-scanner-cli.zip https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${SONAR_VERSION}-linux.zip && \ - unzip /tmp/sonar-scanner-cli.zip -d /tmp/sonar-scanner-cli && \ - mv /tmp/sonar-scanner-cli/sonar-scanner-${SONAR_VERSION}-linux /opt/sonar-scanner && \ - ln -s /opt/sonar-scanner/bin/sonar-scanner /usr/local/bin && \ - rm -rf /tmp/sonar-scanner-cli.zip && \ - rm -rf /tmp/sonar-scanner-cli && \ - curl -sSL https://nodejs.org/dist/${NODE_VERSION}/node-${NODE_VERSION}-linux-x64.tar.xz | tar -Jx -C /opt && \ - mv /opt/node-${NODE_VERSION}-linux-x64 /opt/node && \ - fix_permission '/opt/sonar-scanner' '/opt/node' - ENV NODE_HOME=/opt/node \ - PATH=$PATH:/opt/node/bin - USER 1001 - type: Dockerfile - strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: ${SOURCE_IMAGE_STREAM_TAG} - namespace: ${SOURCE_IMAGE_STREAM_NAMESPACE} - type: Docker - triggers: - - imageChange: {} - type: ImageChange -parameters: -- name: NAME - description: A name used for all objects - displayName: Name - required: true - value: jenkins -- name: VERSION - description: A version used for the image tags - displayName: version - required: true - value: v1.0.0 -- name: SOURCE_IMAGE_STREAM_NAMESPACE - value: "26e83e-tools" -- name: SOURCE_IMAGE_STREAM_TAG - required: true - value: "jenkins-basic:v1.0.0" diff --git a/gwells/openshift/jenkins/jenkins-secondary-deploy.yaml b/gwells/openshift/jenkins/jenkins-secondary-deploy.yaml deleted file mode 100644 index 063b037dd..000000000 --- a/gwells/openshift/jenkins/jenkins-secondary-deploy.yaml +++ /dev/null @@ -1,188 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: jenkins -objects: -- apiVersion: v1 - kind: DeploymentConfig - metadata: - creationTimestamp: null - name: ${NAME}-${SECONDARY_NAME} - spec: - replicas: "${{REPLICAS}}" - revisionHistoryLimit: 10 - selector: - deploymentconfig: ${NAME}-${SECONDARY_NAME} - strategy: - activeDeadlineSeconds: 21600 - recreateParams: - timeoutSeconds: 600 - resources: {} - type: Recreate - template: - metadata: - creationTimestamp: null - labels: - deploymentconfig: ${NAME}-${SECONDARY_NAME} - spec: - initContainers: - - name: init - image: " " - command: - - "curl" - - "-sSf" - - "http://${NAME}:8080/login" - containers: - - command: - - bash - - -c - - cd $HOME && java -XshowSettings:vm -version && exec java -jar /usr/lib/jenkins/swarm-client.jar - -name "$(cat /etc/hostname)" -deleteExistingClients -fsroot "$JENKINS_HOME/$(cat - /etc/hostname)" -master http://$JENKINS_MASTER_SERVICE:8080 -disableSslVerification - -username "$(cat /var/run/secrets/jenkins-slave-user/username)" -passwordFile - /var/run/secrets/jenkins-slave-user/password -description "$(cat /etc/hostname)" - -executors ${SECONDARY_EXECUTORS} -labels '${SECONDARY_LABELS}' -mode - 'normal' -retry 10 -tunnel $JENKINS_MASTER_SERVICE:50000 -disableClientsUniqueId - env: - - name: JENKINS_MASTER_SERVICE - value: ${NAME} - - name: JAVA_TOOL_OPTIONS - value: -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap - -XX:MaxRAMFraction=5 -XX:MaxHeapFreeRatio=20 -XX:MinHeapFreeRatio=10 - -XX:+UseParallelGC -XX:ParallelGCThreads=2 - - name: ENV_NAME - value: ${ENV_NAME} - - name: ENV_ID - value: ${ENV_ID} - - name: APP_TOOLS_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.tools - - name: APP_DEV_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.dev - - name: APP_STAGING_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.test - - name: APP_PROD_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.prod - - name: APP_PROD_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.prod - - name: OCP_PLATFORM - value: '4' - image: ' ' - imagePullPolicy: Always - name: jenkins - ports: - - containerPort: 50000 - protocol: TCP - - containerPort: 8080 - protocol: TCP - resources: - limits: - cpu: ${CPU_LIMIT} - memory: ${MEMORY_LIMIT} - requests: - cpu: ${CPU_REQUEST} - memory: ${MEMORY_REQUEST} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /var/run/pod - name: pod-metadata - - mountPath: /run/secrets/jenkins-slave-user - name: jenkins-slave-user - readOnly: true - - mountPath: /run/secrets/github - name: github - readOnly: true - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - serviceAccount: ${NAME} - serviceAccountName: ${NAME} - terminationGracePeriodSeconds: 30 - volumes: - - name: jenkins-home - persistentVolumeClaim: - claimName: ${NAME} - - downwardAPI: - items: - - fieldRef: - fieldPath: metadata.name - name: name - path: name - - fieldRef: - fieldPath: metadata.namespace - name: namespace - path: namespace - - fieldRef: - fieldPath: metadata.labels - name: labels - path: labels - - fieldRef: - fieldPath: metadata.annotations - name: annotations - path: annotations - name: pod-metadata - - name: jenkins-slave-user - secret: - defaultMode: 420 - secretName: ${NAME}-slave-user - - name: github - secret: - defaultMode: 420 - secretName: ${NAME}-github - test: false - triggers: - - imageChangeParams: - automatic: true - containerNames: - - jenkins - - init - from: - kind: ImageStreamTag - name: jenkins-basic:${VERSION} - type: ImageChange - - type: ConfigChange -parameters: -- description: A name used for all objects - displayName: Name - name: NAME - required: true - value: jenkins -- description: A version used for the image tags - displayName: version - name: VERSION - required: true - value: v1.0.0 -- name: SECONDARY_NAME - required: true - value: secondary -- name: SECONDARY_LABELS - value: "Linux rhel rhel7 build test deploy light" -- name: SECONDARY_EXECUTORS - value: "3" -- name: REPLICAS - value: "1" -- name: CPU_REQUEST - value: "300m" -- name: CPU_LIMIT - value: "2000m" -- name: MEMORY_REQUEST - value: "1Gi" -- name: MEMORY_LIMIT - value: "4Gi" \ No newline at end of file diff --git a/gwells/openshift/jenkins/jenkins.nsp.yaml b/gwells/openshift/jenkins/jenkins.nsp.yaml deleted file mode 100644 index 162eea229..000000000 --- a/gwells/openshift/jenkins/jenkins.nsp.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -parameters: -- name: NAMESPACE -objects: -- apiVersion: security.devops.gov.bc.ca/v1alpha1 - kind: NetworkSecurityPolicy - metadata: - name: egress-internet - spec: - description: "allow ${NAMESPACE} namespace to talk to the internet." - source: - - - $namespace=${NAMESPACE} - destination: - - - ext:network=any -- apiVersion: security.devops.gov.bc.ca/v1alpha1 - kind: NetworkSecurityPolicy - metadata: - name: intra-namespace-comms - spec: - description: "allow ${NAMESPACE} namespace to talk to itself" - source: - - - $namespace=${NAMESPACE} - destination: - - - $namespace=${NAMESPACE} -- apiVersion: security.devops.gov.bc.ca/v1alpha1 - kind: NetworkSecurityPolicy - metadata: - name: int-cluster-k8s-api-comms - spec: - description: "allow ${NAMESPACE} pods to talk to the k8s api" - destination: - - - int:network=internal-cluster-api-endpoint - source: - - - $namespace=${NAMESPACE} diff --git a/gwells/openshift/jobs/import-licences/import-licences.cj.json b/gwells/openshift/jobs/import-licences/import-licences.cj.json deleted file mode 100644 index 10a91a997..000000000 --- a/gwells/openshift/jobs/import-licences/import-licences.cj.json +++ /dev/null @@ -1,110 +0,0 @@ -{ - "kind": "Template", - "apiVersion": "v1", - "metadata": {}, - "parameters": [ - { - "name": "ENV_NAME", - "required": true - }, - { - "name": "PROJECT", - "required": true - }, - { - "name": "TAG", - "required": false, - "value": "${ENV_NAME}" - }, - { - "name": "NAME", - "required": true - }, - { - "name": "COMMAND", - "required": true - }, - { - "name": "SCHEDULE", - "required": true - } - ], - "objects": [ - { - "apiVersion": "batch/v1beta1", - "kind": "CronJob", - "metadata": { - "name": "${NAME}" - }, - "spec": { - "schedule": "${SCHEDULE}", - "concurrencyPolicy": "Forbid", - "jobTemplate": { - "spec": { - "template": { - "spec": { - "containers": [ - { - "name": "${NAME}", - "image": "docker-registry.default.svc:5000/${PROJECT}/gwells-${ENV_NAME}:${TAG}", - "imagePullPolicy": "Always", - "command": [ - "python", - "backend/manage.py", - "${COMMAND}" - ], - "env": [ - { - "name": "DATABASE_SERVICE_NAME", - "value": "gwells-pg12-${ENV_NAME}" - }, - { - "name": "DATABASE_ENGINE", - "value": "postgresql" - }, - { - "name": "DATABASE_NAME", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pgsql-${ENV_NAME}", - "key": "database-name" - } - } - }, - { - "name": "DATABASE_USER", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pgsql-${ENV_NAME}", - "key": "database-user" - } - } - }, - { - "name": "DATABASE_PASSWORD", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pgsql-${ENV_NAME}", - "key": "database-password" - } - } - } - ], - "envFrom": [ - { - "configMapRef": { - "name": "gwells-global-config-${ENV_NAME}" - } - } - ] - } - ], - "restartPolicy": "OnFailure" - } - } - } - } - } - } - ] -} diff --git a/gwells/openshift/jobs/minio-backup/Dockerfile b/gwells/openshift/jobs/minio-backup/Dockerfile deleted file mode 100644 index 9624ac23f..000000000 --- a/gwells/openshift/jobs/minio-backup/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM alpine:latest -USER root -RUN apk add --update \ - curl rsync \ - && rm -rf /var/cache/apk/* - -# install restic -RUN curl -Lo restic.bz2 https://github.com/restic/restic/releases/download/v0.9.4/restic_0.9.4_linux_amd64.bz2 \ - && bzip2 -d restic.bz2 \ - && mv restic /usr/bin/restic \ - && chmod +x /usr/bin/restic - -COPY ./entrypoint.sh / -ENTRYPOINT ["/entrypoint.sh"] -USER 1001 diff --git a/gwells/openshift/jobs/minio-backup/README.md b/gwells/openshift/jobs/minio-backup/README.md deleted file mode 100644 index 89f7a84d5..000000000 --- a/gwells/openshift/jobs/minio-backup/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# S3/minio document backup - -This job connects to a PVC containing files to backup and makes a copy to a second PVC. It then syncs the files to NFS storage using restic (see https://restic.net/). - -This leaves 3 copies of the data: the original (in-use) PVC that is mounted by the minio service, -a backup PVC in the cluster that is only mounted by the backup job pods during backup (e.g. gwells-documents-staging-backup-vol), and the provisioned NFS storage (restic repository). - - -Example usage in Jenkins pipeline: - -```groovy -def docBackupCronjob = openshift.process("-f", - "openshift/jobs/minio-backup.cj.yaml", - - // values for the environment that this job will run in - "NAME_SUFFIX=${prodSuffix}", - "NAMESPACE=${prodProject}", - - // this is the backup image version created by the build config in this folder (minio-backup.bc.yaml) - "VERSION=v1.0.0", - "SCHEDULE='15 12 * * *'", - - // the name of the target backup PVC for the restic repository. This will be the 3rd backup. - // the 2nd backup will be a PVC created by the minio-backup.cj.yaml template. - // GWELLS uses a provisioned NFS storage claim for this value. - "DEST_PVC=${backupPVC}", - "SOURCE_PVC=${minioDataPVC}", // the name of the minio data PVC - "PVC_SIZE=40Gi" // you may need enough space to hold a few copies of files on-disk. -) -``` diff --git a/gwells/openshift/jobs/minio-backup/entrypoint.sh b/gwells/openshift/jobs/minio-backup/entrypoint.sh deleted file mode 100755 index 00a1be063..000000000 --- a/gwells/openshift/jobs/minio-backup/entrypoint.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/sh -set -euo pipefail -IFS=$'\n\t' -[ "${VERBOSE:-}" != true ]|| set -x - -# PVC mount and folder variables, removing any trailing slashes (%/) -# -SRC_MNT=${SRC_MNT:-/mnt/source} -DEST_MNT=${DEST_MNT:-/backup} -SRC_MNT=${SRC_MNT%/} -DEST_MNT=${DEST_MNT%/} -# -DEST_DIR=${DEST_MNT}/documents -TMP_BK=${NEW_BK:-${DEST_DIR}/bk-tmp} -PRV_BK=${PRV_BK:-${DEST_DIR}/bk-prev} -NEW_BK=${NEW_BK:-${DEST_DIR}/bk} - - -# Drop to one previous backup -# Either directory does not exist, or remove directory. -[ ! -d ${PRV_BK} ]|| rm -rf ${PRV_BK} - - -# Copy and verify -# -mkdir -p ${TMP_BK} -if ! rsync -avh ${SRC_MNT}/ ${TMP_BK}/ -then - echo "Copy failed! Previous backups retained." - rm -rf ${TMP_BK} - exit 1 -fi - - -# Shuffle and show disk usage -# Either directory doesn't exist, or move it -[ ! -d ${NEW_BK} ]|| mv ${NEW_BK} ${PRV_BK} -mv ${TMP_BK} ${NEW_BK} -du -hd 1 ${DEST_MNT} - - -# Check if NFS repository is initialized. If not, initialize it. -# RESTIC_PASSWORD is required. -if ! restic -r /mnt/dest/gwells-documents snapshots > /dev/null 2>&1; then - restic -r /mnt/dest/gwells-documents init ; fi - -# Backup files using delta (de-duplicate) and encryption -restic --cache-dir ${DEST_DIR}/.cache -r /mnt/dest/gwells-documents backup ${NEW_BK} - -# Clean up old snapshots. -# As an example, the following arguments: -# --keep-daily 7 --keep-weekly 5 --keep-monthly 12 --keep-yearly 2 -# will keep the most recent 7 daily snapshots, 5 weekly, 12 monthly, and 2 yearly snapshots. -# The rest will be pruned. -restic -r /mnt/dest/gwells-documents forget --keep-daily 7 --keep-weekly 5 --keep-monthly 12 --keep-yearly 10 --prune - -# check repository integrity before exiting -restic -r /mnt/dest/gwells-documents check diff --git a/gwells/openshift/jobs/minio-backup/minio-backup.bc.yaml b/gwells/openshift/jobs/minio-backup/minio-backup.bc.yaml deleted file mode 100644 index 4f665c590..000000000 --- a/gwells/openshift/jobs/minio-backup/minio-backup.bc.yaml +++ /dev/null @@ -1,77 +0,0 @@ -apiVersion: v1 -kind: Template -metadata: {} -parameters: - - name: VERSION - value: v1.0.0 - - name: NAMESPACE - value: moe-gwells-tools -objects: - - apiVersion: image.openshift.io/v1 - kind: ImageStream - metadata: - annotations: - openshift.io/generated-by: OpenShiftNewBuild - creationTimestamp: null - labels: - name: gwells-documents-backup - component: backups - part-of: gwells - name: gwells-documents-backup - namespace: "${NAMESPACE}" - spec: - lookupPolicy: - local: false - status: - dockerImageRepository: "" - - apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - annotations: - openshift.io/generated-by: OpenShiftNewBuild - creationTimestamp: null - labels: - name: "gwells-documents-backup-${VERSION}" - component: backups - version: ${VERSION} - part-of: gwells - name: "gwells-documents-backup-${VERSION}" - namespace: "${NAMESPACE}" - spec: - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: gwells-documents-backup:${VERSION} - postCommit: {} - resources: {} - source: - git: - uri: https://github.com/bcgov/gwells.git - ref: steve/backups - contextDir: openshift/ocp4/jobs/minio-backup - dockerfile: | - FROM alpine:latest - USER root - RUN apk add --update \ - curl rsync \ - && rm -rf /var/cache/apk/* - RUN curl -Lo restic.bz2 https://github.com/restic/restic/releases/download/v0.9.4/restic_0.9.4_linux_amd64.bz2 \ - && bzip2 -d restic.bz2 \ - && mv restic /usr/bin/restic \ - && chmod +x /usr/bin/restic - COPY ./entrypoint.sh / - ENTRYPOINT ["/entrypoint.sh"] - USER 1001 - type: Dockerfile - strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: alpine:3.7 - namespace: openshift - type: Docker - triggers: - - type: ConfigChange - status: - lastVersion: 0 diff --git a/gwells/openshift/jobs/minio-backup/minio-backup.cj.yaml b/gwells/openshift/jobs/minio-backup/minio-backup.cj.yaml deleted file mode 100644 index aa75a9d23..000000000 --- a/gwells/openshift/jobs/minio-backup/minio-backup.cj.yaml +++ /dev/null @@ -1,90 +0,0 @@ -apiVersion: v1 -kind: Template -metadata: {} -parameters: - - name: NAME_SUFFIX - required: true - - name: NAMESPACE - required: true - - name: PVC_SIZE - value: "15Gi" - - name: DEST_PVC - required: true - - name: VERSION - value: v1.0.0 - - name: SCHEDULE - value: "15 3 * * *" - required: false - - name: SOURCE_PVC - required: true -objects: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - finalizers: - - kubernetes.io/pvc-protection - name: "gwells-documents-${NAME_SUFFIX}-backup-vol" - labels: - app: "gwells-${NAME_SUFFIX}" - name: "gwells-${NAME_SUFFIX}-documents-backup" - component: backups - part-of: gwells - spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: ${PVC_SIZE} - storageClassName: gluster-file - - apiVersion: batch/v1beta1 - kind: CronJob - metadata: - name: gwells-documents-${NAME_SUFFIX}-backup - namespace: "${NAMESPACE}" - spec: - concurrencyPolicy: Forbid - failedJobsHistoryLimit: 3 - jobTemplate: - metadata: - labels: - app: "gwells-${NAME_SUFFIX}" - name: "gwells-${NAME_SUFFIX}-documents-backup" - component: backups - part-of: gwells - spec: - backoffLimit: 10 - template: - spec: - activeDeadlineSeconds: 3600 - containers: - - image: docker-registry.default.svc:5000/${NAMESPACE}/gwells-documents-backup:${VERSION} - imagePullPolicy: Always - name: minio-backup - env: - - name: RESTIC_PASSWORD - valueFrom: - secretKeyRef: - key: RESTIC_PASSWORD - name: minio-access-parameters-${NAME_SUFFIX} - volumeMounts: - - mountPath: /mnt/dest/ - name: dest - - mountPath: /backup - name: backup - - mountPath: /mnt/source/ - name: source - readOnly: true - restartPolicy: OnFailure - terminationGracePeriodSeconds: 30 - volumes: - - name: source - persistentVolumeClaim: - claimName: "${SOURCE_PVC}" - - name: dest - persistentVolumeClaim: - claimName: "${DEST_PVC}" - - name: backup - persistentVolumeClaim: - claimName: "gwells-documents-${NAME_SUFFIX}-backup-vol" - schedule: ${SCHEDULE} - successfulJobsHistoryLimit: 3 diff --git a/gwells/openshift/jobs/postgres-backup-nfs/README.md b/gwells/openshift/jobs/postgres-backup-nfs/README.md deleted file mode 100644 index 475e0365f..000000000 --- a/gwells/openshift/jobs/postgres-backup-nfs/README.md +++ /dev/null @@ -1 +0,0 @@ -This job backs up the GWELLS database to a provisioned NFS volume using https://github.com/BCDevOps/backup-container. diff --git a/gwells/openshift/jobs/postgres-backup-nfs/postgres-backup.cj.yaml b/gwells/openshift/jobs/postgres-backup-nfs/postgres-backup.cj.yaml deleted file mode 100644 index 13b38540c..000000000 --- a/gwells/openshift/jobs/postgres-backup-nfs/postgres-backup.cj.yaml +++ /dev/null @@ -1,252 +0,0 @@ ---- -apiVersion: v1 -kind: Template -metadata: - annotations: - description: "Scheduled Task to perform a Database Backup" - tags: "cronjob,backup" - labels: - app: ${TARGET}-backup - cronjob: ${TARGET}-backup - component: backups - part-of: gwells - template: "${JOB_NAME}-config-template" - name: "${JOB_NAME}-cronjob-template" -parameters: - - name: "NAMESPACE" - required: true - - name: "TARGET" - displayName: "Database name (deployment config, not pod name)" - description: "The name of the database, by deployment config, to be backed up." - required: true - - name: "JOB_NAME" - displayName: "Job Name" - description: "Name of the Scheduled Job to Create." - value: "backup" - required: true - - name: "SCHEDULE" - displayName: "Cron Schedule" - description: "Cron Schedule to Execute the Job (in UTC)" - # 11:00 UTC = 3:00 AM PDT - value: "27 9 * * *" - required: true - - name: "PVC_NAME" - required: true - - name: "SOURCE_IMAGE_NAME" - displayName: "Source Image Name" - description: "The name of the image to use for this resource." - required: false - value: "postgres-backup-container" - - name: "IMAGE_NAMESPACE" - displayName: "Image Namespace" - description: "The namespace of the OpenShift project containing the imagestream for the application." - required: false - value: "moe-gwells-tools" - - name: "TAG_NAME" - displayName: "Environment TAG name" - description: "The TAG name for this environment, e.g., dev, test, prod" - required: false - value: "v1.0.0" - - name: "DEFAULT_PORT" - displayName: "Database Service Port" - description: "The configured port for the database service" - required: false - value: "5432" - - name: "DATABASE_NAME" - displayName: "Database Name" - description: "The name of the database." - required: false - value: "gwells" - - name: "BACKUP_STRATEGY" - displayName: "Backup Strategy" - description: "The strategy to use for backups; for example daily, or rolling." - required: false - value: "rolling" - - name: "BACKUP_DIR" - displayName: "The root backup directory" - description: "The name of the root backup directory" - required: false - value: "/gwells-db-backups/" - - name: "NUM_BACKUPS" - displayName: "The number of backup files to be retained" - description: "The number of backup files to be retained. Used for the `daily` backup strategy. Ignored when using the `rolling` backup strategy." - required: false - value: "5" - - name: "DAILY_BACKUPS" - displayName: "Number of Daily Backups to Retain" - description: "The number of daily backup files to be retained. Used for the `rolling` backup strategy." - required: false - value: "7" - - name: "WEEKLY_BACKUPS" - displayName: "Number of Weekly Backups to Retain" - description: "The number of weekly backup files to be retained. Used for the `rolling` backup strategy." - required: false - value: "4" - - name: "MONTHLY_BACKUPS" - displayName: "Number of Monthly Backups to Retain" - description: "The number of monthly backup files to be retained. Used for the `rolling` backup strategy." - required: false - value: "12" - - name: "SUCCESS_JOBS_HISTORY_LIMIT" - displayName: "Successful Job History Limit" - description: "The number of successful jobs that will be retained." - value: "5" - required: false - - name: "FAILED_JOBS_HISTORY_LIMIT" - displayName: "Failed Job History Limit" - description: "The number of failed jobs that will be retained." - value: "2" - required: false - - name: "JOB_BACKOFF_LIMIT" - displayName: "Job Backoff Limit" - description: "The number of attempts to try for a successful job outcome." - value: "0" - required: false - - name: "JOB_DEADLINE_SECONDS" - displayName: "Job deadline (seconds)" - description: "The maximum amount of time to let this job run." - value: "600" - required: false -objects: - - apiVersion: v1 - data: - BACKUP_STRATEGY: ${BACKUP_STRATEGY} - DAILY_BACKUPS: ${DAILY_BACKUPS} - DATABASE_SERVICE_NAME: ${TARGET} - DEFAULT_PORT: ${DEFAULT_PORT} - MONTHLY_BACKUPS: ${MONTHLY_BACKUPS} - NUM_BACKUPS: ${NUM_BACKUPS} - POSTGRESQL_DATABASE: ${DATABASE_NAME} - WEEKLY_BACKUPS: ${WEEKLY_BACKUPS} - kind: ConfigMap - metadata: - labels: - app: ${TARGET}-backup - cronjob: ${TARGET}-backup - template: "${JOB_NAME}-config-template" - name: ${TARGET}-backup - namespace: ${NAMESPACE} - - apiVersion: batch/v1beta1 - kind: CronJob - metadata: - name: ${TARGET}-nfs-backup - namespace: ${NAMESPACE} - labels: - name: ${TARGET}-backup - cronjob: ${TARGET}-backup - part-of: gwells - component: backups - template: "${JOB_NAME}-config-template" - spec: - concurrencyPolicy: Forbid - failedJobsHistoryLimit: ${{FAILED_JOBS_HISTORY_LIMIT}} - jobTemplate: - metadata: - creationTimestamp: null - labels: - app: ${TARGET}-backup - cronjob: ${TARGET}-backup - component: backups - template: "${JOB_NAME}-config-template" - spec: - backoffLimit: ${{JOB_BACKOFF_LIMIT}} - template: - metadata: - creationTimestamp: null - spec: - activeDeadlineSeconds: ${{JOB_DEADLINE_SECONDS}} - containers: - - command: - - /bin/bash - - -c - - /backup.sh -1 - env: - - name: BACKUP_DIR - value: /backups/ - - name: BACKUP_STRATEGY - valueFrom: - configMapKeyRef: - key: BACKUP_STRATEGY - name: ${TARGET}-backup - - name: NUM_BACKUPS - valueFrom: - configMapKeyRef: - key: NUM_BACKUPS - name: ${TARGET}-backup - optional: true - - name: DAILY_BACKUPS - valueFrom: - configMapKeyRef: - key: DAILY_BACKUPS - name: ${TARGET}-backup - optional: true - - name: WEEKLY_BACKUPS - valueFrom: - configMapKeyRef: - key: WEEKLY_BACKUPS - name: ${TARGET}-backup - optional: true - - name: MONTHLY_BACKUPS - valueFrom: - configMapKeyRef: - key: MONTHLY_BACKUPS - name: ${TARGET}-backup - optional: true - - name: DATABASE_SERVICE_NAME - valueFrom: - configMapKeyRef: - key: DATABASE_SERVICE_NAME - name: ${TARGET}-backup - - name: DEFAULT_PORT - valueFrom: - configMapKeyRef: - key: DEFAULT_PORT - name: ${TARGET}-backup - optional: true - - name: POSTGRESQL_DATABASE - valueFrom: - configMapKeyRef: - key: POSTGRESQL_DATABASE - name: ${TARGET}-backup - - name: POSTGRESQL_USER - value: postgres - - name: POSTGRESQL_PASSWORD - valueFrom: - secretKeyRef: - key: PG_ROOT_PASSWORD - name: crunchy-db-credentials - - name: DATABASE_NAME - valueFrom: - configMapKeyRef: - key: POSTGRESQL_DATABASE - name: ${TARGET}-backup - - name: DATABASE_USER - value: postgres - - name: DATABASE_PASSWORD - valueFrom: - secretKeyRef: - key: PG_ROOT_PASSWORD - name: crunchy-db-credentials - image: docker-registry.default.svc:5000/${IMAGE_NAMESPACE}/${SOURCE_IMAGE_NAME}:${TAG_NAME} - imagePullPolicy: Always - name: backup-cronjob - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /backups/ - name: backup - dnsPolicy: ClusterFirst - restartPolicy: Never - schedulerName: default-scheduler - securityContext: {} - serviceAccount: default - serviceAccountName: default - terminationGracePeriodSeconds: 30 - volumes: - - name: backup - persistentVolumeClaim: - claimName: ${PVC_NAME} - schedule: ${SCHEDULE} - successfulJobsHistoryLimit: ${{SUCCESS_JOBS_HISTORY_LIMIT}} - suspend: false diff --git a/gwells/openshift/jobs/update-aquifer/update-aquifer.cj.json b/gwells/openshift/jobs/update-aquifer/update-aquifer.cj.json deleted file mode 100644 index 10a91a997..000000000 --- a/gwells/openshift/jobs/update-aquifer/update-aquifer.cj.json +++ /dev/null @@ -1,110 +0,0 @@ -{ - "kind": "Template", - "apiVersion": "v1", - "metadata": {}, - "parameters": [ - { - "name": "ENV_NAME", - "required": true - }, - { - "name": "PROJECT", - "required": true - }, - { - "name": "TAG", - "required": false, - "value": "${ENV_NAME}" - }, - { - "name": "NAME", - "required": true - }, - { - "name": "COMMAND", - "required": true - }, - { - "name": "SCHEDULE", - "required": true - } - ], - "objects": [ - { - "apiVersion": "batch/v1beta1", - "kind": "CronJob", - "metadata": { - "name": "${NAME}" - }, - "spec": { - "schedule": "${SCHEDULE}", - "concurrencyPolicy": "Forbid", - "jobTemplate": { - "spec": { - "template": { - "spec": { - "containers": [ - { - "name": "${NAME}", - "image": "docker-registry.default.svc:5000/${PROJECT}/gwells-${ENV_NAME}:${TAG}", - "imagePullPolicy": "Always", - "command": [ - "python", - "backend/manage.py", - "${COMMAND}" - ], - "env": [ - { - "name": "DATABASE_SERVICE_NAME", - "value": "gwells-pg12-${ENV_NAME}" - }, - { - "name": "DATABASE_ENGINE", - "value": "postgresql" - }, - { - "name": "DATABASE_NAME", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pgsql-${ENV_NAME}", - "key": "database-name" - } - } - }, - { - "name": "DATABASE_USER", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pgsql-${ENV_NAME}", - "key": "database-user" - } - } - }, - { - "name": "DATABASE_PASSWORD", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pgsql-${ENV_NAME}", - "key": "database-password" - } - } - } - ], - "envFrom": [ - { - "configMapRef": { - "name": "gwells-global-config-${ENV_NAME}" - } - } - ] - } - ], - "restartPolicy": "OnFailure" - } - } - } - } - } - } - ] -} diff --git a/gwells/openshift/maintenance.dc.yaml b/gwells/openshift/maintenance.dc.yaml deleted file mode 100644 index 462f57790..000000000 --- a/gwells/openshift/maintenance.dc.yaml +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright 2021 The Province of British Columbia -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ---- -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - annotations: - description: | - Deployment template for the GWELLS OCP3 to OCP4 reverse proxy. - name: gwells-maintenance${NAME_SUFFIX} -objects: - - apiVersion: v1 - kind: Route - metadata: - labels: - app: gwells${NAME_SUFFIX} - name: gwells-maintenance${NAME_SUFFIX} - name: gwells-maintenance${NAME_SUFFIX} - spec: - host: ${SOURCE_HOST_NAME} - port: - targetPort: 2015-tcp - tls: - termination: edge - to: - kind: Service - name: gwells-maintenance${NAME_SUFFIX} - weight: 100 - - apiVersion: v1 - kind: ConfigMap - metadata: - name: gwells-maintenance${NAME_SUFFIX}-config - labels: - app: gwells${NAME_SUFFIX} - name: gwells-maintenance${NAME_SUFFIX}-config - data: - Caddyfile: | - :2015 { - reverse_proxy * https://${DESTINATION_HOST_NAME} { - header_up Host {http.reverse_proxy.upstream.hostport} - header_up X-Real-IP {remote_host} - header_up X-Forwarded-For {remote_host} - header_up X-Forwarded-Proto {scheme} - #header_up Connection {>Connection} - #header_up Upgrade {>Upgrade} - flush_interval -1 - } - - log { - # errors stdout - output stdout - #format single_field common_log - } - } - - :2016 { - respond /health 200 - - log { - # errors stdout - output discard - #format single_field common_log - } - } - - apiVersion: v1 - kind: Service - metadata: - labels: - app: gwells${NAME_SUFFIX} - name: gwells-maintenance${NAME_SUFFIX} - name: gwells-maintenance${NAME_SUFFIX} - spec: - selector: - role: proxy - name: gwells-maintenance${NAME_SUFFIX} - ports: - - name: 2015-tcp - port: 2015 - protocol: TCP - targetPort: 2015 - - apiVersion: v1 - kind: DeploymentConfig - metadata: - labels: - app: gwells${NAME_SUFFIX} - name: gwells-maintenance${NAME_SUFFIX} - name: gwells-maintenance${NAME_SUFFIX} - spec: - strategy: - type: Rolling - triggers: - - type: ConfigChange - replicas: ${{REPLICA_COUNT}} - selector: - role: proxy - name: gwells-maintenance${NAME_SUFFIX} - template: - metadata: - labels: - app: gwells${NAME_SUFFIX} - role: proxy - name: gwells-maintenance${NAME_SUFFIX} - name: gwells-maintenance${NAME_SUFFIX} - spec: - containers: - - name: gwells-maintenance${NAME_SUFFIX} - imagePullPolicy: Always - image: "docker-registry.default.svc:5000/moe-gwells-tools/caddy:2" - env: - - name: XDG_DATA_HOME - value: /tmp - livenessProbe: - httpGet: - path: /health - port: 2016 - scheme: HTTP - failureThreshold: 5 - periodSeconds: 3 - readinessProbe: - httpGet: - path: /health - port: 2016 - timeoutSeconds: 10 - ports: - - containerPort: 2015 - resources: - limits: - cpu: 100m - memory: 92Mi - requests: - cpu: 30m - memory: 48Mi - volumeMounts: - - name: config-vol - mountPath: /etc/caddy/Caddyfile - subPath: Caddyfile - volumes: - - name: config-vol - configMap: - name: gwells-maintenance${NAME_SUFFIX}-config -parameters: - - displayName: Name - name: NAME_SUFFIX - required: true - - name: DESTINATION_HOST_NAME - description: - The destination address that is being proxied. - displayName: Redirect Destination - required: true - - name: SOURCE_HOST_NAME - description: | - The route that will be redirected to the destination URL. - displayName: Front Door Route - required: true - - name: REPLICA_COUNT - description: The number of PROXY pods to start - displayName: Replica Count - value: "2" - # - name: PROXY_MIN_HPA - # description: Min Number of PROXY pods for HPA - # displayName: PROXY Min HPA - # value: 3 - # - name: PROXY_MAX_HPA - # description: Max Number of PROXY pods for HPA - # displayName: PROXY Max HPA - # value: 9 \ No newline at end of file diff --git a/gwells/openshift/minio.bc.yaml b/gwells/openshift/minio.bc.yaml deleted file mode 100644 index 2f1187800..000000000 --- a/gwells/openshift/minio.bc.yaml +++ /dev/null @@ -1,50 +0,0 @@ ---- -kind: Template -apiVersion: v1 -metadata: - name: gwells-minio-bc -parameters: -- name: NAME - value: "gwells-minio" -- name: DEST_IMG_NAME - value: "gwells-minio" -- name: DEST_IMG_TAG - value: "latest" -- name: SRC_REPO_URL - value: "https://github.com/bcgov/gwells" -- name: SRC_REPO_BRANCH - value: "release" -objects: -- kind: ImageStream - apiVersion: v1 - metadata: - name: "${DEST_IMG_NAME}" - labels: - app: "${NAME}" -- apiVersion: v1 - kind: BuildConfig - metadata: - labels: - app: "${NAME}" - name: "${NAME}" - template: "${NAME}" - name: "${NAME}" - spec: - successfulBuildsHistoryLimit: 3 - failedBuildsHistoryLimit: 3 - triggers: - - type: ImageChange - - type: ConfigChange - runPolicy: SerialLatestOnly - source: - contextDir: "/openshift/docker/minio" - git: - ref: "${SRC_REPO_BRANCH}" - uri: "${SRC_REPO_URL}" - type: Git - strategy: - type: Docker - output: - to: - kind: ImageStreamTag - name: "${DEST_IMG_NAME}:${DEST_IMG_TAG}" diff --git a/gwells/openshift/ocp4/README.md b/gwells/openshift/ocp4/README.md deleted file mode 100644 index 6e29de07a..000000000 --- a/gwells/openshift/ocp4/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# OpenShift 4 Deployment - -## Prerequisites - -The Jenkins pipeline is designed to roll out GWELLS to dev, test and prod. Before beginning, it requires a Jenkins deployment, -Python, PostGIS, Minio, and pg_tileserv base images, and secrets in every namespace before it can run. The pipeline should roll everything else out. - - -### Jenkins -Note: See https://developer.gov.bc.ca/Cloud-Migration/Migrating-Your-BC-Gov-Jenkins-to-the-Cloud for background info on Jenkins in the OCP4 cluster. - -* Build the `jenkins-basic` provided in the above link. Ensure that the image has been outputted to the jenkins-basic imagestream in the tools project. -* Use `oc process` to process the Jenkins templates in `/openshift/ocp4/jenkins`. The same container image (built in step 1) can be used for both the primary and secondary. There are required parameters such as the Jenkins host name. -* Log in to Jenkins. Note the URL in the OpenShift Networking > Routes menu, or by using `oc get routes -o wide`. -* Add GitHub credentials to Jenkins. Use type `username/password`, with a GitHub token as the password. The `bcgov-csnr-cd` account can be used. -* Set up a Multibranch Pipeline for GWELLS. Ensure that only PRs from origin are accepted (no forks). If accepting forks, ensure that only forks from -users with write access to the repo are allowed. - -### Base images - -#### Python - -GWELLS uses a base image `ubi8/python-38` with GDAL installed. Since installing GDAL can take some time, it is not re-built and re-installed for every -pipeline run. The image should be built before running the pipeline. Other python packages will be installed during normal pipeline runs. - -The repo contains the BuildConfig used to build the base container image. In the `openshift/ocp4/docker/backend` directory, run `oc4 apply -f gwells-python.bc.yaml -n 26e83e-tools`. Start the build with `oc start-build`. - -#### PostgreSQL/PostGIS - -GWELLS uses PostgreSQL with the PostGIS extension. Import the Crunchy Data PostGIS image with `oc import-image --from=crunchydata/crunchy-postgres-gis:centos7-12.5-3.0-4.5.1` (check the repository at https://hub.docker.com/r/crunchydata/crunchy-postgres-gis/ -for an appropriate tag). - -Use `oc tag -n 26e83e-tools crunchy-postgres-gis:centos7-12.5-3.0-4.5.1 26e83e-dev:crunchy-postgres-gis:centos7-12.5-3.0-4.5.1` to copy the image into the dev, test and prod namespaces (note the `26e83e-dev` image namespace in the second argument - repeat for dev, test and prod). This procedure allows importing a new version of the database image into the tools namespace without affecting existing environments, and then progressively testing it on dev and test before making it available to production. - -#### Minio - -Import the minio image into the tools repo using `oc import-image --from=minio/minio` (check https://hub.docker.com/r/minio/minio/). At this time the image need only be available in the tools namespace. - -#### pg_tileserv - -Import the pg_tileserv image into the tools repo using `oc import-image --from=pramsey/pg_tileserv` (check https://hub.docker.com/r/pramsey/pg_tileserv/ for the most up to date tag). At this time the image need only be available in the tools namespace. - -### image-puller roles - -The dev, test and prod namespace service accounts will need image pull roles. -Use `oc -n 26e83e-tools policy add-role-to-group system:image-puller system:serviceaccounts:26e83e-dev` (note namespace at end- repeat for test and prod). - -### Secrets / ConfigMaps - -The following secrets / configmaps need to be deployed to each environment namespace (dev/test/prod): -| name | kind | -| gwells-django-secrets| Secret | -| gwells-database-secrets | Secret | -| gwells-e-licensing-secrets| Secret | -| gwells-minio-secrets | Secret | -| gwells-global-config | ConfigMap | - -These objects hold base/default config for each namespace. The Jenkins pipeline makes copies of these as (e.g.) `gwells-django-dev-pr-1110` or `gwells-django-prod` (depending on environment). - -### NetworkSecurityPolicies - -We need to create NetworkSecurityPolicy objects to define the rules for external and internal network communication. - -As a short term fix (for migration), there are NetworkSecurityPolicies that mimic the OCP3 cluster. - -``` -oc apply -f openshift/ocp4/jenkins/jenkins.nsp.yaml -p NAMESPACE= | oc apply -n -f - -``` - -### Backup jobs - -Unlike OCP3, where an NFS storage volume had to be manually provisioned, we can self-provision backup storage using the storage class `netapp-file-backup`. The Jenkinsfile has been updated to provision a volume for the minio and postgres backups to write to. diff --git a/gwells/openshift/ocp4/backend.bc.json b/gwells/openshift/ocp4/backend.bc.json deleted file mode 100644 index 9a766e76a..000000000 --- a/gwells/openshift/ocp4/backend.bc.json +++ /dev/null @@ -1,161 +0,0 @@ -{ - "kind": "Template", - "apiVersion": "v1", - "metadata": { - "name": "gwells-backend-bc", - "creationTimestamp": null - }, - "parameters": [ - { - "name": "NAME_SUFFIX", - "displayName": "Name Suffix", - "description": "A suffix appended to all objects", - "required": true - }, - { - "name": "SOURCE_REPOSITORY_URL", - "required": true - }, - { - "name": "ENV_NAME", - "required": true - }, - { - "name": "SOURCE_REPOSITORY_REF", - "required": true - }, - { - "name": "APP_IMAGE_TAG", - "required": true - } - ], - "objects": [ - { - "kind": "ImageStream", - "apiVersion": "v1", - "metadata": { - "name": "gwells-python", - "labels": { - "appver": "gwells-base-image" - }, - "creationTimestamp": null, - "annotations": { - "description": "Python Base Image with GIS-related External Libraries" - } - }, - "spec": { - "lookupPolicy": { - "local": false - }, - "tags":[ - { - "name": "latest", - "annotations": null, - "from": { - "kind": "DockerImage", - "name": "26e83e-tools/gwells-python" - }, - "importPolicy": {}, - "referencePolicy": { - "type": "Source" - } - } - ] - } - }, - { - "kind": "ImageStream", - "apiVersion": "v1", - "metadata": { - "name": "gwells-application", - "creationTimestamp": null, - "labels": { - "base-name":"gwells" - }, - "annotations": { - "description": "Keeps track of changes in the application image" - } - }, - "spec": { - "lookupPolicy": { - "local": false - }, - "tags":[ ] - } - }, - { - "kind": "BuildConfig", - "apiVersion": "v1", - "metadata": { - "name": "gwells${NAME_SUFFIX}", - "creationTimestamp": null, - "labels": { - "appver": "gwells${NAME_SUFFIX}" - }, - "annotations": { - "description": "Defines how to build the application" - } - }, - "spec": { - "successfulBuildsHistoryLimit": 3, - "failedBuildsHistoryLimit": 3, - "runPolicy": "SerialLatestOnly", - "source": { - "type": "Git", - "git": { - "uri": "${SOURCE_REPOSITORY_URL}", - "ref": "${SOURCE_REPOSITORY_REF}" - }, - "contextDir": "app" - }, - "strategy": { - "type": "Source", - "sourceStrategy": { - "from": { - "kind": "ImageStreamTag", - "name": "gwells-python:v3" - }, - "env": [ - { - "name": "BUILD_LOGLEVEL", - "value": "4" - }, - { - "name": "PIP_INDEX_URL" - }, - { - "name": "UPGRADE_PIP_TO_LATEST", - "value": "true" - }, - { - "name": "JWT_PUBLIC_KEY", - "value": "-----BEGIN PUBLIC KEY-----\\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAjpPznS8NO5XNl395Xa/wJyhhMDMJUk8s2wrG/FQ9gZnRaCbm9YFYynZzeehkpTNbb+SsLBnh0Me5DKTSlt0Gm03ULXXW6FZzL3SCE1wTx6Trm+zQ1mx07aGDbv34OtK0HitToajZrnTsGQ0TloVbQladBM74S2K0ooveV7p2qIydFjtR+DTJGiOxSLvts+qsGn/Wr2l939SRpQa/10vpYJgCLsd6Bv/0v23DpmR8WbVkLh8e3rtI0XgsJ0ZFXR80DPt3fXX3gdrNdPRB+hpOR8IZMEUzhqGRg5VXP8Lp+bbaemFanTwlFD3aUfDlOcPekxYqQeEmS6ahA/6vCpjuGwIDAQAB\\n-----END PUBLIC KEY-----" - }, - { - "name": "JWT_AUDIENCE", - "value": "webapp-dev-local" - } - ] - } - }, - "output": { - "to": { - "kind": "ImageStreamTag", - "name": "gwells-application:${APP_IMAGE_TAG}" - } - }, - "resources": { - "limits": { - "cpu": "2", - "memory": "8Gi" - }, - "requests": { - "cpu": "1", - "memory": "4Gi" - } - }, - "nodeSelector": null - } - } - ] -} diff --git a/gwells/openshift/ocp4/backup.pvc.yaml b/gwells/openshift/ocp4/backup.pvc.yaml deleted file mode 100644 index 66ba9732f..000000000 --- a/gwells/openshift/ocp4/backup.pvc.yaml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: v1 -kind: Template -labels: - template: gwells-backup-pvc-template -metadata: - creationTimestamp: null - name: gwells-backup-pvc-template -parameters: - - description: Storage class for PVCs. - displayName: Storage class for PVCs. - name: STORAGE_CLASS - value: netapp-file-backup - - description: Size of volume. - displayName: Size of volume. - name: VOLUME_CAPACITY - value: 10Gi -objects: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: gwells-backups - annotations: - template.openshift.io.bcgov/create: "true" - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: ${VOLUME_CAPACITY} - storageClassName: ${STORAGE_CLASS} diff --git a/gwells/openshift/ocp4/jenkins/003-create-jobs.groovy b/gwells/openshift/ocp4/jenkins/003-create-jobs.groovy deleted file mode 100644 index 71424729c..000000000 --- a/gwells/openshift/ocp4/jenkins/003-create-jobs.groovy +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/env groovy - -// -// This needs to come before 003-register-github-webhooks.groovy -// - -import static jenkins.model.Jenkins.instance as jenkins - -import com.cloudbees.hudson.plugins.folder.computed.DefaultOrphanedItemStrategy -import org.jenkinsci.plugins.github_branch_source.* -import jenkins.scm.impl.trait.* -import jenkins.scm.api.mixin.* -import org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject -import jenkins.branch.BranchSource - -import com.adobe.jenkins.disable_github_multibranch_status.DisableStatusUpdateTrait -import jenkins.plugins.git.traits.WipeWorkspaceTrait -import org.csanchez.jenkins.plugins.kubernetes.KubernetesFolderProperty - -// Get configuration from jobs configmap -def repoOwner = new File('/var/run/configs/jobs/repo.owner').getText('UTF-8').trim() -def appRepoAdmin = new File('/var/run/configs/jobs/repo.name.Admin').getText('UTF-8').trim() -def appRepoApi = new File('/var/run/configs/jobs/repo.name.Api').getText('UTF-8').trim() -def appRepoPublic = new File('/var/run/configs/jobs/repo.name.Public').getText('UTF-8').trim() -def appName = new File('/var/run/configs/jobs/app.name').getText('UTF-8').trim() -def name = appName.toLowerCase().replaceAll("/[^A-Za-z0-9 ]/", "").replaceAll("\\s", "-") - -def githubCredentialsId = "github-account" - -def pullRequestTraits = [ - new ForkPullRequestDiscoveryTrait([ChangeRequestCheckoutStrategy.MERGE].toSet(),new ForkPullRequestDiscoveryTrait.TrustContributors()), - new DisableStatusUpdateTrait(), - new WipeWorkspaceTrait() -] - -def masterTraits = [ - new RegexSCMHeadFilterTrait("^(develop|test|master)"), - new BranchDiscoveryTrait(3), - new DisableStatusUpdateTrait(), - new WipeWorkspaceTrait() -] - -def hotfixTraits = [ - new RegexSCMHeadFilterTrait("^(hotfix)"), - new BranchDiscoveryTrait(3), - new DisableStatusUpdateTrait(), - new WipeWorkspaceTrait() -] - -core_jobs = [ new Expando(jobName: "dev-${name}-public", - displayName: "dev-${name}-public", - owner: repoOwner, - repo: appRepoPublic, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-develop", - traits: pullRequestTraits, - startJob: true), - new Expando(jobName: "dev-${name}-api", - displayName: "dev-${name}-api", - owner: repoOwner, - repo: appRepoApi, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-develop", - traits: pullRequestTraits, - startJob: true), - new Expando(jobName: "dev-${name}-admin", - displayName: "dev-${name}-admin", - owner: repoOwner, - repo: appRepoAdmin, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-develop", - traits: pullRequestTraits, - startJob: true), - new Expando(jobName: "cicd-${name}-public", - displayName: "cicd-${name}-public", - owner: repoOwner, - repo: appRepoPublic, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-cicd", - traits: masterTraits, - startJob: true), - new Expando(jobName: "cicd-${name}-api", - displayName: "cicd-${name}-api", - owner: repoOwner, - repo: appRepoApi, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-cicd", - traits: masterTraits, - startJob: true), - new Expando(jobName: "cicd-${name}-admin", - displayName: "cicd-${name}-admin", - owner: repoOwner, - repo: appRepoAdmin, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-cicd", - traits: masterTraits, - startJob: true), - new Expando(jobName: "hotfix-${name}-public", - displayName: "hotfix-${name}-public", - owner: repoOwner, - repo: appRepoPublic, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-hotfix", - traits: hotfixTraits, - startJob: true), - new Expando(jobName: "hotfix-${name}-api", - displayName: "hotfix-${name}-api", - owner: repoOwner, - repo: appRepoApi, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-hotfix", - traits: hotfixTraits, - startJob: true), - new Expando(jobName: "hotfix-${name}-admin", - displayName: "hotfix-${name}-admin", - owner: repoOwner, - repo: appRepoAdmin, - credentialsId: githubCredentialsId, - jenkinsFilePath: "Jenkinsfile-hotfix", - traits: hotfixTraits, - startJob: true), - ] - - -jobs = jenkins.getAllItems() - -for (core_job in core_jobs) { - - def shouldCreate = true - jobs.each { j -> - if (j instanceof org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject && - j.fullName.contains(core_job.jobName)) { - println '----> Already have a job for ' + j.fullName + ' of type:' + j.getClass() - println j - shouldCreate = false - } - } - if (!shouldCreate) { - continue - } - println '----> configuring job ' + core_job.jobName - - // start by creating the toplevel folder - def folder = jenkins.createProject(WorkflowMultiBranchProject, core_job.jobName) - - // Configure the Github SCM integration - def scm = new GitHubSCMSource(core_job.owner, core_job.repo) - scm.credentialsId = core_job.credentialsId - scm.traits = core_job.traits - folder.getSourcesList().add(new BranchSource(scm)) - - folder.displayName = core_job.displayName - - // Delete orphan items after 5 days - folder.orphanedItemStrategy = new DefaultOrphanedItemStrategy(true, "-1", "-1") - - // Configure what Jenkinsfile we should be looking for - folder.projectFactory.scriptPath = core_job.jenkinsFilePath - - folder.addProperty(new KubernetesFolderProperty()) - - folder.triggers.clear() - - jenkins.save() - - println '----> configured job ' + core_job.jobName - - if (core_job.startJob) { - Thread.start { - sleep 3000 // 3 seconds - println '----> Running Github organization scan for job ' + core_job.jobName - folder.scheduleBuild() - } - } -} - - -println '<--- Create Jobs: jobs created.' \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/README.md b/gwells/openshift/ocp4/jenkins/README.md deleted file mode 100644 index 89f172ea7..000000000 --- a/gwells/openshift/ocp4/jenkins/README.md +++ /dev/null @@ -1,17 +0,0 @@ - -# Build Base image -``` -export OC_P_SUFFIX='-1790' OC_P_VERSION='v2-1790' -oc -n 26e83e-tools process -f https://raw.githubusercontent.com/BCDevOps/openshift-components/333cd1a7dbe5efe0f692ccd4a65f11561e1cc240/cicd/jenkins-basic/openshift/build.yaml SOURCE_REPOSITORY_REF=954d913cfa8a0d32bfb57bab62319658454c1d06 SUFFIX=${OC_P_SUFFIX} VERSION=${OC_P_VERSION} | jq '(.items[] | .metadata.labels) += {"app.kubernetes.io/part-of": "jenkins"}' | jq --arg instance "jenkins${OC_P_SUFFIX}" '(.items[] | select(.kind != "ImageStream") | .metadata.labels) += {"app.kubernetes.io/instance": $instance}' | jq 'del(.items[] | select(.kind == "BuildConfig") | .spec.triggers)' | oc -n 26e83e-tools apply -f - - -oc -n 26e83e-tools start-build "jenkins-basic${OC_P_SUFFIX}" --wait=true -``` - -# Build Final image - -oc -n 26e83e-tools process -f jenkins-main-build.yaml SUFFIX=${OC_P_SUFFIX} SOURCE_IMAGE_STREAM_TAG=jenkins-basic:${OC_P_VERSION} VERSION=${OC_P_VERSION} | jq '(.items[] | .metadata.labels) += {"app.kubernetes.io/part-of": "jenkins"}' | jq --arg instance "jenkins${OC_P_SUFFIX}" '(.items[] | select(.kind != "ImageStream") | .metadata.labels) += {"app.kubernetes.io/instance": $instance}' | jq 'del(.items[] | select(.kind == "BuildConfig") | .spec.triggers)' | oc -n 26e83e-tools apply -f - - -(cd "$(git rev-parse --show-toplevel)" && oc -n 26e83e-tools start-build jenkins-main${OC_P_SUFFIX} --wait=true --from-repo=.) - -# Deploy -oc -n 26e83e-tools process -f jenkins-deploy.yaml NAME=jenkins SUFFIX= "JENKINS_IMAGE_STREAM_NAME=jenkins-main:${OC_P_VERSION}" GH_USERNAME=blah GH_PASSWORD=blah | jq 'del(.items[] | select (.kind == "Secret" or .kind == "PersistentVolumeClaim"))' | jq '(.items[] | .metadata.labels) += {"app.kubernetes.io/part-of": "jenkins"}' | jq --arg instance "jenkins-prod" '(.items[] | select(.kind != "ImageStream") | .metadata.labels) += {"app.kubernetes.io/instance": $instance}' | oc -n 26e83e-tools apply -f - --dry-run=client diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/Dockerfile b/gwells/openshift/ocp4/jenkins/basic/docker/Dockerfile deleted file mode 100644 index 469523481..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/Dockerfile +++ /dev/null @@ -1,118 +0,0 @@ -FROM registry.access.redhat.com/ubi8/ubi:8.7 - -# https://blog.csanchez.org/2017/05/31/running-a-jvm-in-a-container-without-getting-killed/ -ENV JENKINS_VERSION=2.387.1 \ - HOME=/var/lib/jenkins \ - JENKINS_REF_HOME=/opt/jenkins \ - JENKINS_HOME=/var/lib/jenkins \ - JENKINS_DATA=/var/jenkins-data \ - JENKINS_UC=https://updates.jenkins.io \ - OPENSHIFT_JENKINS_IMAGE_VERSION=4.10 \ - LANG=en_US.UTF-8 \ - LC_ALL=en_US.UTF-8 \ - JENKINS_WEBROOT_DIR=/var/cache/jenkins/war--daemon \ - JENKINS_WAR=/usr/share/java/jenkins.war - -ARG JAVA_TOOL_OPTIONS="-XX:+UnlockExperimentalVMOptions -XX:+UseContainerSupport -XX:MaxRAMFraction=2" -ARG OC_VERSION=4.10.9 -ARG OC_QUALIFIER=0cbc58b-linux-64bit -ARG TINI_VERSION=v0.18.0 -ARG TINI_QUALIFIER=amd64 -ARG SWARM_CLIENT_VERSION=3.39 - -RUN set -x && yum -h && \ - curl -so /etc/yum.repos.d/jenkins.repo https://pkg.jenkins.io/redhat-stable/jenkins.repo && \ - rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key && \ - yum -y --disableplugin=subscription-manager --enablerepo=ubi-8-appstream-rpms --enablerepo=jenkins install which gpg java-17-openjdk-devel shadow-utils "jenkins-$JENKINS_VERSION" zip unzip bzip2 rsync elfutils git --nodocs && \ - yum clean all && \ - echo "unset BASH_ENV PROMPT_COMMAND ENV" > /usr/local/bin/scl_enable && \ - echo "source scl_source enable rh-git29" >> /usr/local/bin/scl_enable && \ - chgrp -R 0 /usr/local/bin && \ - chmod -R g+rx /usr/local/bin && \ - yum clean all && \ - rpm -qa - -# When bash is started non-interactively, to run a shell script, for example it -# looks for this variable and source the content of this file. This will enable -# the SCL for all scripts without need to do 'scl enable'. -ENV BASH_ENV=/usr/local/bin/scl_enable \ - ENV=/usr/local/bin/scl_enable \ - PROMPT_COMMAND=". /usr/local/bin/scl_enable" - -#OC command line -RUN set -x && \ - mkdir /tmp/oc && \ - curl -SL https://mirror.openshift.com/pub/openshift-v4/clients/ocp/${OC_VERSION}/openshift-client-linux-${OC_VERSION}.tar.gz -o /tmp/oc/openshift-origin-client-tools.tar.gz && \ - tar -xzf /tmp/oc/openshift-origin-client-tools.tar.gz -C /usr/local/bin/ && \ - chmod +x /usr/local/bin/oc && \ - rm -rf /tmp/oc - -RUN set -x && \ - curl -fsSL https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 -o /usr/local/bin/jq && \ - chmod +x /usr/local/bin/jq - -RUN set -x && \ - curl -sLo /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.2/dumb-init_1.2.2_amd64 && \ - chmod +x /usr/local/bin/dumb-init - -RUN curl -fsSL https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-static-${TINI_QUALIFIER} -o /sbin/tini \ - && curl -fsSL https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-static-${TINI_QUALIFIER}.asc -o /sbin/tini.asc \ - && gpg --batch --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 595E85A6B1B4779EA4DAAEC70B588DFF0527A9B7 \ - && gpg --verify /sbin/tini.asc \ - && rm -rf /sbin/tini.asc /root/.gnupg \ - && chmod +x /sbin/tini - -RUN set -x && \ - curl -Lo /usr/lib/jenkins/swarm-client.jar --create-dirs https://repo.jenkins-ci.org/releases/org/jenkins-ci/plugins/swarm-client/${SWARM_CLIENT_VERSION}/swarm-client-${SWARM_CLIENT_VERSION}.jar && \ - chmod 644 /usr/lib/jenkins/swarm-client.jar - -RUN set -x && \ - curl -sLo /usr/lib/jenkins/password-encoder.jar --create-dirs https://github.com/openshift/jenkins/raw/f080514d4370c64260e2301e76f30061c5dcc636/2/contrib/openshift/password-encoder.jar && \ - chmod 644 /usr/lib/jenkins/password-encoder.jar - -#RUN set -x && \ -# curl -sLo /usr/local/bin/jenkins-install-plugins https://raw.githubusercontent.com/openshift/jenkins/master/2/contrib/jenkins/install-plugins.sh && \ -# mkdir -p ${JENKINS_REF_HOME} && \ -# chmod 555 /usr/local/bin/jenkins-install-plugins - -COPY ./contrib/bin /usr/local/bin - -RUN set -x && \ - java -version && \ - mkdir -p ${JENKINS_REF_HOME} && \ - mkdir -p $JENKINS_HOME && \ - chmod -R 664 $JENKINS_HOME && \ - chgrp -R 0 $JENKINS_HOME && \ - chmod -R g+rwX $JENKINS_HOME && \ - chmod 664 /etc/passwd && \ - chmod -R 664 /etc/sysconfig/jenkins && \ - chgrp -R 0 /var/log/jenkins && \ - chmod -R 664 /var/log/jenkins && \ - chgrp -R 0 /usr/local/bin && \ - chmod -R g+rx /usr/local/bin && \ - chgrp -R 0 /var/log && \ - chmod -R g+rwX /var/log && \ - chgrp -R 0 /var/cache/jenkins && \ - chmod -R g+rwX /var/cache/jenkins - - -LABEL io.k8s.description="Jenkins is a continuous integration server" \ - io.k8s.display-name="Jenkins 2" \ - io.openshift.tags="jenkins,jenkins2,ci" \ - io.openshift.expose-services="8080:http" - -WORKDIR $HOME - -COPY ./contrib/jenkins/configuration $JENKINS_REF_HOME - -RUN set -x && \ - chmod -R g+rx /usr/local/bin && \ - chgrp -R 0 $JENKINS_REF_HOME && \ - chmod -R 644 $JENKINS_REF_HOME && \ - chmod -R g+rwX $JENKINS_REF_HOME && \ - (export REF=$JENKINS_REF_HOME/plugins JENKINS_WAR=/usr/share/java/jenkins.war; /usr/local/bin/jenkins-install-plugins $JENKINS_REF_HOME/plugins.txt) && \ - chmod -R 774 $JENKINS_REF_HOME/plugins - -VOLUME ["${JENKINS_DATA}"] - -USER 1001 diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/bin/container-entrypoint b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/bin/container-entrypoint deleted file mode 100644 index 9d8ad4d3c..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/bin/container-entrypoint +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -exec "$@" diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/bin/jenkins-install-plugins b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/bin/jenkins-install-plugins deleted file mode 100644 index 48191be2f..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/bin/jenkins-install-plugins +++ /dev/null @@ -1,441 +0,0 @@ -#! /bin/bash -eu -# -# Originally copied from https://github.com/jenkinsci/docker -# You can set JENKINS_UC to change the default URL to Jenkins update center -# -# Usage: -# -# FROM openshift/jenkins-2-centos7 -# COPY plugins.txt /plugins.txt -# RUN /usr/local/bin/install-plugins.sh /plugins.txt -# -# The format of 'plugins.txt. is: -# -# pluginId:pluginVersion - -set -o pipefail -verbose=0 -if [[ "${VERBOSE:-false}" == "true" || "${VERBOSE:-false}" == "1" ]]; then - verbose=1 - set -x -fi - -# BEGIN - From https://raw.githubusercontent.com/jenkinsci/docker/master/jenkins-support -# compare if version1 < version2 -versionLT() { - local v1; v1=$(echo "$1" | cut -d '-' -f 1 ) - local q1; q1=$(echo "$1" | cut -s -d '-' -f 2- ) - local v2; v2=$(echo "$2" | cut -d '-' -f 1 ) - local q2; q2=$(echo "$2" | cut -s -d '-' -f 2- ) - if [ "$v1" = "$v2" ]; then - if [ "$q1" = "$q2" ]; then - return 1 - else - if [ -z "$q1" ]; then - return 1 - else - if [ -z "$q2" ]; then - return 0 - else - [ "$q1" = "$(echo -e "$q1\n$q2" | sort -V | head -n1)" ] - fi - fi - fi - else - [ "$v1" = "$(echo -e "$v1\n$v2" | sort -V | head -n1)" ] - fi -} - -# returns a plugin version from a plugin archive -get_plugin_version() { - local archive; archive=$1 - local version; version=$(unzip -p "$archive" META-INF/MANIFEST.MF | grep "^Plugin-Version: " | sed -e 's#^Plugin-Version: ##') - version=${version%%[[:space:]]} - echo "$version" -} - -# Copy files from /usr/share/jenkins/ref into $JENKINS_HOME -# So the initial JENKINS-HOME is set with expected content. -# Don't override, as this is just a reference setup, and use from UI -# can then change this, upgrade plugins, etc. -copy_reference_file() { - f="${1%/}" - b="${f%.override}" - rel="${b:23}" - version_marker="${rel}.version_from_image" - dir=$(dirname "${b}") - local action; - local reason; - local container_version; - local image_version; - local marker_version; - local log; log=false - if [[ ${rel} == plugins/*.jpi ]]; then - container_version=$(get_plugin_version "$JENKINS_HOME/${rel}") - image_version=$(get_plugin_version "${f}") - if [[ -e $JENKINS_HOME/${version_marker} ]]; then - marker_version=$(cat "$JENKINS_HOME/${version_marker}") - if versionLT "$marker_version" "$container_version"; then - action="SKIPPED" - reason="Installed version ($container_version) has been manually upgraded from initial version ($marker_version)" - log=true - else - if [[ "$image_version" == "$container_version" ]]; then - action="SKIPPED" - reason="Version from image is the same as the installed version $image_version" - else - if versionLT "$image_version" "$container_version"; then - action="SKIPPED" - log=true - reason="Image version ($image_version) is older than installed version ($container_version)" - else - action="UPGRADED" - log=true - reason="Image version ($image_version) is newer than installed version ($container_version)" - fi - fi - fi - else - if [[ -n "$TRY_UPGRADE_IF_NO_MARKER" ]]; then - if [[ "$image_version" == "$container_version" ]]; then - action="SKIPPED" - reason="Version from image is the same as the installed version $image_version (no marker found)" - # Add marker for next time - echo "$image_version" > "$JENKINS_HOME/${version_marker}" - else - if versionLT "$image_version" "$container_version"; then - action="SKIPPED" - log=true - reason="Image version ($image_version) is older than installed version ($container_version) (no marker found)" - else - action="UPGRADED" - log=true - reason="Image version ($image_version) is newer than installed version ($container_version) (no marker found)" - fi - fi - fi - fi - if [[ ! -e $JENKINS_HOME/${rel} || "$action" == "UPGRADED" || $f = *.override ]]; then - action=${action:-"INSTALLED"} - log=true - mkdir -p "$JENKINS_HOME/${dir:23}" - # if done on rhel, we may need to override a link to /usr/lib/jenkins, so include --remove-destination - cp --remove-destination -r "${f}" "$JENKINS_HOME/${rel}"; - # pin plugins on initial copy - touch "$JENKINS_HOME/${rel}.pinned" - echo "$image_version" > "$JENKINS_HOME/${version_marker}" - reason=${reason:-$image_version} - else - action=${action:-"SKIPPED"} - fi - else - if [[ ! -e $JENKINS_HOME/${rel} || $f = *.override ]] - then - action="INSTALLED" - log=true - mkdir -p "$JENKINS_HOME/${dir:23}" - # if done on rhel, we may need to override a link to /usr/lib/jenkins, so include --remove-destination - cp --remove-destination -r "${f}" "$JENKINS_HOME/${rel}"; - else - action="SKIPPED" - fi - fi - if [[ -n "$VERBOSE" || "$log" == "true" ]]; then - if [ -z "$reason" ]; then - echo "$action $rel" >> "$COPY_REFERENCE_FILE_LOG" - else - echo "$action $rel : $reason" >> "$COPY_REFERENCE_FILE_LOG" - fi - fi -} -# END - From https://raw.githubusercontent.com/jenkinsci/docker/master/jenkins-support - -REF_DIR=${REF:-/opt/openshift/plugins} -FAILED="$REF_DIR/failed-plugins.txt" - -JENKINS_WAR=/usr/share/java/jenkins.war - -INCREMENTAL_BUILD_ARTIFACTS_DIR="/tmp/artifacts" - -function getLockFile() { - echo -n "$REF_DIR/${1}.lock" -} - -function getArchiveFilename() { - echo -n "$REF_DIR/${1}.jpi" -} - -function download() { - local plugin originalPlugin version lock ignoreLockFile - plugin="$1" - version="${2:-latest}" - ignoreLockFile="${3:-}" - lock="$(getLockFile "$plugin")" - - if [[ $ignoreLockFile ]] || mkdir "$lock" &>/dev/null; then - if ! doDownload "$plugin" "$version"; then - # some plugin don't follow the rules about artifact ID - # typically: docker-plugin - originalPlugin="$plugin" - plugin="${plugin}-plugin" - if ! doDownload "$plugin" "$version"; then - echo "Failed to download plugin: $originalPlugin or $plugin" >&2 - echo "Not downloaded: ${originalPlugin}" >> "$FAILED" - return 1 - fi - fi - - if ! checkIntegrity "$plugin"; then - echo "Downloaded file is not a valid ZIP: $(getArchiveFilename "$plugin")" >&2 - echo "Download integrity: ${plugin}" >> "$FAILED" - return 1 - fi - - resolveDependencies "$plugin" - fi -} - -function doDownload() { - local plugin version url jpi - plugin="$1" - version="$2" - jpi="$(getArchiveFilename "$plugin")" - - # If plugin already exists and is the same version do not download - if test -f "$jpi" && unzip -p "$jpi" META-INF/MANIFEST.MF | tr -d '\r' | grep "^Plugin-Version: ${version}$" > /dev/null; then - echo "Using provided plugin: $plugin" - return 0 - fi - - # Check if the plugin is cached and in correct version; if so, use it instead of downloading - # Some plugins do not follow the naming conventions and include the "-plugin" suffix; both cases need to be covered - for pluginFilename in "$plugin.jpi" "$plugin-plugin.jpi"; do - local cachedPlugin="$INCREMENTAL_BUILD_ARTIFACTS_DIR/plugins/$pluginFilename" - if test -f "$cachedPlugin" && [[ $(get_plugin_version "$cachedPlugin") == "$version" ]]; then - echo "Copying plugin from a cache created by s2i: $cachedPlugin" - cp "$cachedPlugin" "$jpi" - return 0 - fi - done - - if [[ "$version" == "latest" && -n "$JENKINS_UC_LATEST" ]]; then - # If version-specific Update Center is available, which is the case for LTS versions, - # use it to resolve latest versions. - url="$JENKINS_UC_LATEST/latest/${plugin}.hpi" - elif [[ "$version" == "experimental" && -n "$JENKINS_UC_EXPERIMENTAL" ]]; then - # Download from the experimental update center - url="$JENKINS_UC_EXPERIMENTAL/latest/${plugin}.hpi" - else - JENKINS_UC_DOWNLOAD=${JENKINS_UC_DOWNLOAD:-"$JENKINS_UC/download"} - url="$JENKINS_UC_DOWNLOAD/plugins/$plugin/$version/${plugin}.hpi" - fi - - echo "Downloading plugin: $plugin from $url" - curl --connect-timeout "${CURL_CONNECTION_TIMEOUT:-20}" --retry "${CURL_RETRY:-5}" --retry-delay "${CURL_RETRY_DELAY:-0}" --retry-max-time "${CURL_RETRY_MAX_TIME:-60}" -s -f -L "$url" -o "$jpi" - return $? -} - -function checkIntegrity() { - local plugin jpi - plugin="$1" - jpi="$(getArchiveFilename "$plugin")" - - zip -T "$jpi" >/dev/null - return $? -} - -function resolveDependencies() { - local plugin jpi dependencies - plugin="$1" - jpi="$(getArchiveFilename "$plugin")" - - set +o pipefail - dependencies="$(unzip -p "$jpi" META-INF/MANIFEST.MF | tr -d '\r' | tr '\n' '|' | sed -e 's#| ##g' | tr '|' '\n' | grep "^Plugin-Dependencies: " | sed -e 's#^Plugin-Dependencies: ##')" - set -o pipefail - - if [[ ! $dependencies ]]; then - echo " > $plugin has no dependencies" - return - fi - - echo " > $plugin depends on $dependencies" - - IFS=',' read -a array <<< "$dependencies" - - for d in "${array[@]}" - do - plugin="$(cut -d':' -f1 - <<< "$d")" - # - # Note, matrix-auth plugin notes cloudbees-folder as optional in the archive, but then failed to - # load, citing a dependency that is too old, during testing ... so we will download optional dependencies - # - local versionFromPluginParam - if [[ $d == *"resolution:=optional"* ]]; then - echo "Examining optional dependency $plugin" - optional_jpi="$(getArchiveFilename "$plugin")" - if [ ! -f "${optional_jpi}" ]; then - echo "Optional dependency $plugin not installed already, skipping" - continue - fi - echo "Optional dependency $plugin already installed, need to determine if it is at a sufficient version" - versionFromPluginParam="$(cut -d';' -f1 - <<< "$d")" - else - versionFromPluginParam=$d - fi - local pluginInstalled - local minVersion; minVersion=$(versionFromPlugin "${versionFromPluginParam}") - - set +o pipefail - local filename; filename=$(getArchiveFilename "$plugin") - local previouslyDownloadedVersion; previouslyDownloadedVersion=$(get_plugin_version $filename) - set -o pipefail - - # ${bundledPlugins} checks for plugins bundled in the jenkins.war file; per - # https://wiki.jenkins-ci.org/display/JENKINS/Bundling+plugins+with+Jenkins this is getting - # phased out, but we are keeping this check in for now while that transition bakes a bit more - if pluginInstalled="$(echo "${bundledPlugins}" | grep "^${plugin}:")"; then - pluginInstalled="${pluginInstalled//[$'\r']}" - # get the version of the plugin bundled - local versionInstalled; versionInstalled=$(versionFromPlugin "${pluginInstalled}") - # if the bundled plugins is older than the minimum version needed for the dependency, - # download the dependence; passing "true" is needed for "download" to replace the existing dependency - if versionLT "${versionInstalled}" "${minVersion}"; then - echo "Upgrading bundled dependency $d ($minVersion > $versionInstalled)" - download "$plugin" "$minVersion" "true" - else - echo "Skipping already bundled dependency $d ($minVersion <= $versionInstalled)" - fi - # bypass further processing if a bundled plugin - continue - fi - - # if the dependency plugin has yet to be downloaded (hence the var is not set) download - if [[ -z "${previouslyDownloadedVersion:-}" ]]; then - echo "Downloading dependency plugin $plugin version $minVersion that has yet to be installed" - download "$plugin" "$minVersion" - else - # get the version of the dependency plugin already downloaded; if not recent enough, download - # the minimum version required; the "true" parameter is need for "download" to overwrite the existing - # version of the plugin - if versionLT "${previouslyDownloadedVersion}" "${minVersion}"; then - echo "Upgrading previously downloaded plugin $plugin at $previouslyDownloadedVersion to $minVersion" - download "$plugin" "$minVersion" "true" - fi - fi - done - wait -} - -function bundledPlugins() { - if [ -f $JENKINS_WAR ] - then - TEMP_PLUGIN_DIR=/tmp/plugintemp.$$ - for i in $(jar tf $JENKINS_WAR | egrep '[^detached-]plugins.*\..pi' | sort) - do - rm -fr $TEMP_PLUGIN_DIR - mkdir -p $TEMP_PLUGIN_DIR - PLUGIN=$(basename "$i"|cut -f1 -d'.') - (cd $TEMP_PLUGIN_DIR;jar xf "$JENKINS_WAR" "$i";jar xvf "$TEMP_PLUGIN_DIR/$i" META-INF/MANIFEST.MF >/dev/null 2>&1) - VER=$(egrep -i Plugin-Version "$TEMP_PLUGIN_DIR/META-INF/MANIFEST.MF"|cut -d: -f2|sed 's/ //') - echo "$PLUGIN:$VER" - done - rm -fr $TEMP_PLUGIN_DIR - else - echo "ERROR file not found: $JENKINS_WAR" - exit 1 - fi -} - -function versionFromPlugin() { - local plugin=$1 - if [[ $plugin =~ .*:.* ]]; then - echo "${plugin##*:}" - else - echo "latest" - fi - -} - -function installedPlugins() { - for f in "$REF_DIR"/*.jpi; do - echo "$(basename "$f" | sed -e 's/\.jpi//'):$(get_plugin_version "$f")" - done -} - -function jenkinsMajorMinorVersion() { - if [[ -f "$JENKINS_WAR" ]]; then - local version major minor - version="$(/etc/alternatives/java -jar $JENKINS_WAR --version)" - major="$(echo "$version" | cut -d '.' -f 1)" - minor="$(echo "$version" | cut -d '.' -f 2)" - echo "$major.$minor" - else - echo "ERROR file not found: $JENKINS_WAR" - return 1 - fi -} - -main() { - local plugin version - - mkdir -p "$REF_DIR" || exit 1 - - for file in $@; do - # clean up any dos file injected carriage returns - sed -i 's/\r$//' $file - done - - # Create lockfile manually before first run to make sure any explicit version set is used. - echo "Creating initial locks..." - for plugin in `cat $@ | grep -v ^#`; do - if [ -z $plugin ]; then - continue - fi - echo "Locking $plugin" - mkdir "$(getLockFile "${plugin%%:*}")" - done - - echo -e "\nAnalyzing war..." - bundledPlugins="$(bundledPlugins)" - - # Check if there's a version-specific update center, which is the case for LTS versions - jenkinsVersion="$(jenkinsMajorMinorVersion)" - if curl -fsL -o /dev/null "$JENKINS_UC/$jenkinsVersion"; then - JENKINS_UC_LATEST="$JENKINS_UC/$jenkinsVersion" - echo "Using version-specific update center: $JENKINS_UC_LATEST..." - else - JENKINS_UC_LATEST= - fi - - echo -e "\nDownloading plugins..." - for plugin in `cat $@ | grep -v ^#`; do - if [ -z $plugin ]; then - continue - fi - version="" - if [[ $plugin =~ .*:.* ]]; then - version=$(versionFromPlugin "${plugin}") - plugin="${plugin%%:*}" - fi - - download "$plugin" "$version" "true" - done - wait - - echo - echo "WAR bundled plugins:" - echo "${bundledPlugins}" - echo - echo "Installed plugins:" - installedPlugins - - if [[ -f $FAILED ]]; then - echo -e "\nSome plugins failed to download!\n$(<"$FAILED")" >&2 - exit 1 - fi - - echo -e "\nCleaning up locks" - rm -r "$REF_DIR"/*.lock -} - -main "$@" diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/bin/jenkins-run b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/bin/jenkins-run deleted file mode 100644 index 10e6f579b..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/bin/jenkins-run +++ /dev/null @@ -1,148 +0,0 @@ -#!/bin/sh - -source /usr/local/bin/jenkins-util-lib.sh - -generate_passwd_file - -JENKINS_WAR="/usr/share/java/jenkins.war" -JENKINS_PORT="8080" -JENKINS_DEBUG_LEVEL="5" -JENKINS_HANDLER_MAX="100" -JENKINS_HANDLER_IDLE="20" -JENKINS_USER="jenkins" - -#JENKINS_CONFIG=/etc/sysconfig/jenkins -#[ -f "$JENKINS_CONFIG" ] && . "$JENKINS_CONFIG" - -JENKINS_JAVA_CMD="/usr/bin/java" -CONTAINER_MEMORY_IN_BYTES=$(cat /sys/fs/cgroup/memory/memory.limit_in_bytes) -CONTAINER_MEMORY_IN_MB=$((CONTAINER_MEMORY_IN_BYTES/2**20)) - -#Container Memory Limit - 5 Megabytes -JENKINS_MAX_METASPACE_SIZE=$(awk '{ printf "%d", (($1 / 1024) - (1024*5)) }' < /sys/fs/cgroup/memory/memory.limit_in_bytes) - -# jstat -gc 1 - -if [[ -z "${JAVA_TOOL_OPTIONS}" ]]; then - # these options will automatically be picked up by any JVM process but can - # be overridden on that process' command line. - JAVA_TOOL_OPTIONS="-XX:+UnlockExperimentalVMOptions -XX:+UseContainerSupport -Dsun.zip.disableMemoryMapping=true" - export JAVA_TOOL_OPTIONS -fi - -# -XX:MetaspaceSize= -# -XX:MaxMetaspaceSize= - -if [[ -z "$JENKINS_MEM_OPTS" ]]; then - JENKINS_MEM_OPTS="-XX:MaxRAMFraction=2 -XX:MaxMetaspaceSize=${JENKINS_MAX_METASPACE_SIZE}k" -fi -JENKINS_MEM_OPTS="${JENKINS_MEM_OPTS} ${JENKINS_MEM_OPTS_APPEND}" - -if [[ -z "$JAVA_GC_OPTS" ]]; then - # See https://developers.redhat.com/blog/2014/07/22/dude-wheres-my-paas-memory-tuning-javas-footprint-in-openshift-part-2/ . - # The values are aggressively set with the intention of relaxing GC CPU time - # restrictions to enable it to free as much as possible, as well as - # encouraging the GC to free unused heap memory back to the OS. - JAVA_GC_OPTS="-XshowSettings:vm -XX:+PrintFlagsFinal -XX:NativeMemoryTracking=summary -XX:+UseParallelGC -XX:MinHeapFreeRatio=5 -XX:MaxHeapFreeRatio=19 -XX:GCTimeRatio=4 -XX:AdaptiveSizePolicyWeight=90 -XX:ParallelGCThreads=2" -fi -JAVA_GC_OPTS="${JAVA_GC_OPTS} ${JAVA_GC_OPTSAPPEND}" - -if [[ "${USE_JAVA_DIAGNOSTICS}" ]]; then - JAVA_DIAGNOSTICS="-XX:+IgnoreUnrecognizedVMOptions -XX:+PrintGC -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+PrintGCDetails -XX:+UnlockDiagnosticVMOptions -Xloggc:/var/log/jenkins/gc.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=5 -XX:GCLogFileSize=100M" - JAVA_DIAGNOSTICS="${JAVA_DIAGNOSTICS} ${JAVA_DIAGNOSTICS_APPEND}" -fi - -if [[ -z "${JENKINS_OPTIONS}" ]]; then - JENKINS_OPTIONS="-Djava.awt.headless=true -Duser.timezone=America/Vancouver -Dhudson.model.DirectoryBrowserSupport.CSP= -Dhudson.model.UpdateCenter.never=true -Dhudson.Util.noSymLink=true -Djenkins.model.Jenkins.buildsDir=\${JENKINS_HOME}/builds/\${ITEM_FULL_NAME}" - -fi - -JENKINS_OPTIONS="${JENKINS_OPTIONS} ${JENKINS_OPTIONS_APPEND}" - -if [[ -z "${JENKINS_JAVA_OPTIONS}" ]]; then - JENKINS_JAVA_OPTIONS="$JENKINS_MEM_OPTS $JAVA_GC_OPTS $JAVA_DIAGNOSTICS -Dfile.encoding=UTF8 -Djavamelody.displayed-counters=log,error -XX:ErrorFile=/var/lib/jenkins/logs/jvm.error.log -showversion $JENKINS_OPTIONS" -fi - -JENKINS_JAVA_OPTIONS="${JENKINS_JAVA_OPTIONS} ${JENKINS_JAVA_OPTIONS_APPEND}" - -echo "JAVA_GC_OPTS=${JAVA_GC_OPTS}" -echo "JAVA_DIAGNOSTICS=${JAVA_DIAGNOSTICS}" -echo "JENKINS_OPTIONS=${JENKINS_OPTIONS}" -echo "JENKINS_JAVA_OPTIONS=${JENKINS_JAVA_OPTIONS}" - -#JENKINS_JAVA_OPTIONS="${JENKINS_JAVA_OPTIONS:--XX:+UnlockExperimentalVMOptions -XX:+UseContainerSupport -XX:MaxRAMFraction=2 -XX:MaxHeapFreeRatio=20 -XX:MinHeapFreeRatio=10}" - - -if [ ! -z "${JENKINS_DATA}" ]; then - #"${JENKINS_JAVA_CMD}" $JENKINS_JAVA_OPTIONS -XshowSettings:vm -version - echo "Preparing persistent folder structure" - mkdir -p $JENKINS_DATA/jobs - mkdir -p $JENKINS_DATA/logs - mkdir -p $JENKINS_DATA/builds - mkdir -p $JENKINS_DATA/secrets - mkdir -p $JENKINS_DATA/users - - ln -sf $JENKINS_DATA/jobs $JENKINS_HOME/jobs - ln -sf $JENKINS_DATA/logs $JENKINS_HOME/logs - ln -sf $JENKINS_DATA/builds $JENKINS_HOME/builds - ln -sf $JENKINS_DATA/secrets $JENKINS_HOME/secrets - ln -sf $JENKINS_DATA/users $JENKINS_HOME/users - - # touch $JENKINS_DATA/secret.key - ln -sf $JENKINS_DATA/secret.key $JENKINS_HOME/secret.key - # touch $JENKINS_DATA/credentials.xml - ln -sf $JENKINS_DATA/credentials.xml $JENKINS_HOME/credentials.xml - # touch $JENKINS_DATA/queue.xml - ln -sf $JENKINS_DATA/queue.xml $JENKINS_HOME/queue.xml -fi - -#Copy all configuration files to JENKINS_HOME -rsync -av --no-o --no-g --no-perms --keep-dirlinks --no-acls --no-xattrs --no-super --omit-dir-times --exclude 'plugins' $JENKINS_REF_HOME/ $JENKINS_HOME/ -chmod -R g+rw $JENKINS_HOME - -# if file/directoryy exists, and it is not a syymbolic link, delete it -[[ -e "$JENKINS_HOME/plugins" && ! -L "$JENKINS_HOME/plugins" ]] && rm -rf $JENKINS_HOME/plugins -# if symbolic link exists -[[ -L "$JENKINS_HOME/plugins" ]] && unlink $JENKINS_HOME/plugins -# if a symbolic link does not exists, create it -[[ ! -L "$JENKINS_HOME/plugins" ]] && ln -sf $JENKINS_REF_HOME/plugins $JENKINS_HOME/plugins - -export JENKINS_WEBROOT_DIR='/var/cache/jenkins/war--daemon' - -unzip -qo $JENKINS_WAR -d $JENKINS_WEBROOT_DIR - -#https://unix.stackexchange.com/questions/34325/sorting-the-output-of-find - -echo "Running Jenkins System Installer(s)" -[ -d "$JENKINS_HOME/install.d" ] && find $JENKINS_HOME/install.d/ -maxdepth 1 -type f -print0 | sort -z | xargs -t -r0 -I '{}' bash '{}' - -GROOVY_ALL_JAR="$(find /var/cache/jenkins/war--daemon/WEB-INF/lib/ -name '*groovy-all*.jar' -print0)" - -echo "Running Jenkins System Installer(s)" -[ -d "$JENKINS_HOME/install.groovy.d" ] && find $JENKINS_HOME/install.groovy.d/ -maxdepth 1 -type f -print0 | sort -z | xargs -t -r0 -I '{}' java -cp "${GROOVY_ALL_JAR}" groovy.lang.GroovyShell '{}' '{}' - -echo "Running Jenkins Job Installer(s)" -[ -d "$JENKINS_HOME/jobs" ] && find $JENKINS_HOME/jobs/ -type f -name '*.install.groovy' -print0 | sort -z | xargs -t -r0 -I '{}' java -cp "${GROOVY_ALL_JAR}" groovy.lang.GroovyShell '{}' '{}' - - -JAVA_CMD="$JENKINS_JAVA_CMD $JENKINS_JAVA_OPTIONS -Djenkins.install.runSetupWizard=false -DJENKINS_HOME=$JENKINS_HOME -jar $JENKINS_WAR" -PARAMS="--webroot=$JENKINS_WEBROOT_DIR" - -#--logfile=/var/log/jenkins/jenkins.log -[ -n "$JENKINS_PORT" ] && PARAMS="$PARAMS --httpPort=$JENKINS_PORT" -[ -n "$JENKINS_LISTEN_ADDRESS" ] && PARAMS="$PARAMS --httpListenAddress=$JENKINS_LISTEN_ADDRESS" -[ -n "$JENKINS_HTTPS_PORT" ] && PARAMS="$PARAMS --httpsPort=$JENKINS_HTTPS_PORT" -[ -n "$JENKINS_HTTPS_KEYSTORE" ] && PARAMS="$PARAMS --httpsKeyStore=$JENKINS_HTTPS_KEYSTORE" -[ -n "$JENKINS_HTTPS_KEYSTORE_PASSWORD" ] && PARAMS="$PARAMS --httpsKeyStorePassword='$JENKINS_HTTPS_KEYSTORE_PASSWORD'" -[ -n "$JENKINS_HTTPS_LISTEN_ADDRESS" ] && PARAMS="$PARAMS --httpsListenAddress=$JENKINS_HTTPS_LISTEN_ADDRESS" -[ -n "$JENKINS_DEBUG_LEVEL" ] && PARAMS="$PARAMS --debug=$JENKINS_DEBUG_LEVEL" -[ -n "$JENKINS_HANDLER_STARTUP" ] && PARAMS="$PARAMS --handlerCountStartup=$JENKINS_HANDLER_STARTUP" -#[ -n "$JENKINS_HANDLER_MAX" ] && PARAMS="$PARAMS --handlerCountMax=$JENKINS_HANDLER_MAX" -#[ -n "$JENKINS_HANDLER_IDLE" ] && PARAMS="$PARAMS --handlerCountMaxIdle=$JENKINS_HANDLER_IDLE" -[ -n "$JENKINS_ARGS" ] && PARAMS="$PARAMS $JENKINS_ARGS" - -if [ "$JENKINS_ENABLE_ACCESS_LOG" = "yes" ]; then - PARAMS="$PARAMS --accessLoggerClassName=winstone.accesslog.SimpleAccessLogger --simpleAccessLogger.format=combined --simpleAccessLogger.file=/var/log/jenkins/access_log" -fi -set -x -exec $JAVA_CMD $PARAMS diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/bin/jenkins-util-lib.sh b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/bin/jenkins-util-lib.sh deleted file mode 100644 index 5b813a9ae..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/bin/jenkins-util-lib.sh +++ /dev/null @@ -1,41 +0,0 @@ -function generate_passwd_file() { - USER_ID=$(id -u) - GROUP_ID=$(id -g) - - if [ x"$USER_ID" != x"0" -a x"$USER_ID" != x"997" ]; then - echo "default:x:${USER_ID}:${GROUP_ID}:Default Application User:${HOME}:/sbin/nologin" >> /etc/passwd - fi -} - -function generate_jenkins_user() { - local username="$1" - local password="$2" - -local password_hash=`java -classpath "$(find $JENKINS_WEBROOT_DIR/WEB-INF/lib/ -name acegi-security-*.jar):$(find $JENKINS_WEBROOT_DIR/WEB-INF/lib/ -name commons-codec-*.jar):/opt/openshift/password-encoder.jar" com.redhat.openshift.PasswordEncoder superSecret pvKndZ` - -cat >$JENKINS_HOME/users/$username/config.xml < - - ${username} - - - - - - All - false - false - - - - - - ${password_hash} - - - changeme@changeme.com - - - -EOF -} diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/init.groovy.d/000-github-access-token.groovy b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/init.groovy.d/000-github-access-token.groovy deleted file mode 100644 index 8a595e959..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/init.groovy.d/000-github-access-token.groovy +++ /dev/null @@ -1,105 +0,0 @@ -/** - -Test from Jenkins Script Console: -evaluate(new File("/var/lib/jenkins/init.groovy.d/000-github-access-token.groovy")) -**/ -import jenkins.model.Jenkins; - -import com.cloudbees.plugins.credentials.*; -import com.cloudbees.plugins.credentials.impl.*; -import com.cloudbees.plugins.credentials.domains.*; -import org.jenkinsci.plugins.plaincredentials.impl.*; - -import hudson.util.Secret; -import com.cloudbees.jenkins.GitHubWebHook; -import com.cloudbees.jenkins.*; -import org.kohsuke.github.*; - -import org.jose4j.jws.JsonWebSignature; -import org.jose4j.jws.AlgorithmIdentifiers; -import org.jose4j.jwx.HeaderParameterNames; -import org.jose4j.jwt.JwtClaims; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.security.KeyFactory; -import java.security.PrivateKey; -import java.security.spec.PKCS8EncodedKeySpec; -import groovy.json.JsonSlurper; - -import java.util.Properties; -import org.apache.commons.io.IOUtils - - - -def createAccessToken = { - String openshiftPodNamespace=new File('/var/run/pod/namespace').getText('UTF-8').trim() - String githubSecretName = new File('/var/run/secrets/github/metadata.name').getText('UTF-8').trim() - String githubAppName = new File('/var/run/secrets/github/app-name').getText('UTF-8').trim() - String githubAppId = new File('/var/run/secrets/github/app-id').getText('UTF-8').trim() - String githubInstallationId = new File('/var/run/secrets/github/app-installation-id').getText('UTF-8').trim() - String githubAppPrivateKey = new File('/var/run/secrets/github/app-private-key').getText('UTF-8').trim() - - // convert from PKCS#1 (SSLeay format) to PKCS#8 - //openssl pkcs8 -topk8 -inform PEM -outform PEM -nocrypt -in cvarjao-bot.2019-02-21.private-key.pem - - if (!(githubAppPrivateKey.startsWith("-----BEGIN PRIVATE KEY-----") && githubAppPrivateKey.endsWith("-----END PRIVATE KEY-----"))){ - throw new Exception("Invalid private key format. ") - } - - githubAppPrivateKey = githubAppPrivateKey.replaceAll("\\n", "").replace("-----BEGIN PRIVATE KEY-----", "").replace("-----END PRIVATE KEY-----", "").replaceAll("\\s+", ""); - - byte[] encoded = Base64.getDecoder().decode(githubAppPrivateKey) - KeyFactory kf = KeyFactory.getInstance("RSA"); - PKCS8EncodedKeySpec keySpecPKCS8 = new PKCS8EncodedKeySpec(encoded); - PrivateKey privKey = kf.generatePrivate(keySpecPKCS8); - JwtClaims claims = new JwtClaims(); - claims.setIssuer(githubAppId); - claims.setIssuedAtToNow(); - claims.setExpirationTimeMinutesInTheFuture(10); - - String payload = claims.toJson(); - - //println "PAYLOAD:${payload}" - - JsonWebSignature jsonWebSignature = new JsonWebSignature(); - jsonWebSignature.setPayload(payload); - jsonWebSignature.setKey(privKey); - jsonWebSignature.setKeyIdHeaderValue("k1"); - jsonWebSignature.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256); - jsonWebSignature.setHeader(HeaderParameterNames.TYPE, "JWT"); - String jwt = jsonWebSignature.getCompactSerialization(); - - URL url = new URL("https://api.github.com/app/installations/${githubInstallationId}/access_tokens"); - HttpURLConnection con = (HttpURLConnection) url.openConnection(); - con.setRequestMethod("POST"); - con.setRequestProperty("Authorization", "Bearer ${jwt}"); - con.setRequestProperty("Accept", "application/vnd.github.machine-man-preview+json"); - int status = con.getResponseCode(); - String content = IOUtils.toString(con.getInputStream(), 'UTF-8') - //reader.close(); - con.disconnect(); - - //println "CONTENT:\n${content}"; - def accessToken = new JsonSlurper().parseText(content) - //println "accessToken:\n${accessToken.get("token")}" - //println "expires_at:\n${accessToken.get("expires_at")}" - //expires_at - //GitHub github = new GitHubBuilder().withOAuthToken(accessToken.get("token"), githubAppName).build(); - ['oc','patch', "secret/${githubSecretName}", '-p', '{"stringData": {"password": "'+accessToken.get("token")+'", "token": "'+accessToken.get("token")+'", "expires_at":"'+accessToken.get("expires_at")+'"}}', '-n', openshiftPodNamespace].execute().waitFor() - - return accessToken -} - -if (new File('/var/run/secrets/github/app-id').exists()){ - try { - createAccessToken() - } catch (Exception exception){ - println("An exception occured during initialization"); - exception.printStackTrace(); - println("Aborting Jenkins ..."); - Jenkins.instance.doExit(null, null); - } - - //3000000 = 50 minutes - new Timer().schedule createAccessToken as TimerTask, 3000000, 3000000 -} \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/init.groovy.d/001-setup-slave-credential.groovy b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/init.groovy.d/001-setup-slave-credential.groovy deleted file mode 100644 index 33fda2f44..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/init.groovy.d/001-setup-slave-credential.groovy +++ /dev/null @@ -1,43 +0,0 @@ -import com.cloudbees.plugins.credentials.impl.*; -import com.cloudbees.plugins.credentials.*; -import com.cloudbees.plugins.credentials.domains.*; -import org.jenkinsci.plugins.plaincredentials.impl.*; -import jenkins.model.Jenkins -import hudson.model.* -import hudson.util.Secret; - - - -String openshiftPodNamespace=new File('/var/run/pod/namespace').getText('UTF-8').trim() -String openshiftSecretName = new File('/var/run/secrets/jenkins-slave-user/metadata.name').getText('UTF-8').trim() -String username = new File('/var/run/secrets/jenkins-slave-user/username').getText('UTF-8').trim() - -User u = User.get(username) -def apiToken=u.getProperty(jenkins.security.ApiTokenProperty.class) - -//BEFORE 2.129+: https://jenkins.io/blog/2018/07/02/new-api-token-system/ -//println "\'${u.getId()}\' API token:${apiToken.getApiTokenInsecure()}" -//['oc','patch', "secret/${openshiftSecretName}", '-p', '{"stringData": {"password": "'+apiToken.getApiTokenInsecure()+'"}}', '-n', openshiftPodNamespace].execute().waitFor() - -//AFTER 2.129+: https://jenkins.io/blog/2018/07/02/new-api-token-system/ - -//Revoke all existing tokens -for (def token:apiToken.getTokenList()){ - def revoked = apiToken.tokenStore.revokeToken(token.uuid) - if(revoked != null){ - apiToken.tokenStats.removeId(revoked.getUuid()); - } -} -def newToken= apiToken.tokenStore.generateNewToken('swarm') -['oc','patch', "secret/${openshiftSecretName}", '-p', '{"stringData": {"password": "'+newToken.plainValue+'"}}', '-n', openshiftPodNamespace].execute().waitFor() -println "\'${u.getId()}\' API token:${newToken.plainValue}" - -if (Jenkins.instance.getAuthorizationStrategy().getClass().getName().equalsIgnoreCase('com.michelin.cio.hudson.plugins.rolestrategy.RoleBasedAuthorizationStrategy')) { - Jenkins.instance.getAuthorizationStrategy().doAssignRole('globalRoles', 'agent', username) -} else { - Jenkins.instance.getAuthorizationStrategy().add(hudson.slaves.SlaveComputer.CREATE, username) - Jenkins.instance.getAuthorizationStrategy().add(hudson.slaves.SlaveComputer.CONNECT, username) - Jenkins.instance.getAuthorizationStrategy().add(Jenkins.READ, username) -} - -u.save(); diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/init.groovy.d/002-setup-bitbucket.groovy b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/init.groovy.d/002-setup-bitbucket.groovy deleted file mode 100644 index b6ddc78d0..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/init.groovy.d/002-setup-bitbucket.groovy +++ /dev/null @@ -1,31 +0,0 @@ -import jenkins.model.Jenkins - -import com.cloudbees.plugins.credentials.*; -import com.cloudbees.plugins.credentials.impl.*; -import com.cloudbees.plugins.credentials.domains.*; -import org.jenkinsci.plugins.plaincredentials.impl.*; - -import hudson.util.Secret; -import com.cloudbees.jenkins.GitHubWebHook; -import com.cloudbees.jenkins.* -import org.kohsuke.github.* - -if (new File('/var/run/secrets/bitbucket/username').exists()){ - String githubUsername = new File('/var/run/secrets/bitbucket/username').getText('UTF-8').trim() - String githubPassword = new File('/var/run/secrets/bitbucket/password').getText('UTF-8').trim() - - Credentials c1 = (Credentials) new UsernamePasswordCredentialsImpl( - CredentialsScope.GLOBAL, - "bitbucket-account", - "BitBucket account", - githubUsername, - githubPassword); - - SystemCredentialsProvider.getInstance().getStore().addCredentials(Domain.global(), c1); - - - println "Configuring BitBucket API" - def bitbucketServerUrl = new File('/var/run/secrets/bitbucket/url').getText('UTF-8').trim() - def bitbucketConfig = Jenkins.getInstance().getDescriptor(com.cloudbees.jenkins.plugins.bitbucket.endpoints.BitbucketEndpointConfiguration); - bitbucketConfig.setEndpoints([new com.cloudbees.jenkins.plugins.bitbucket.endpoints.BitbucketServerEndpoint('BitBucket', bitbucketServerUrl, false, 'bitbucket-account')]) -} \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/init.groovy.d/002-setup-github.groovy b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/init.groovy.d/002-setup-github.groovy deleted file mode 100644 index 7c06282c8..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/init.groovy.d/002-setup-github.groovy +++ /dev/null @@ -1,44 +0,0 @@ -import jenkins.model.Jenkins - -import com.cloudbees.plugins.credentials.*; -import com.cloudbees.plugins.credentials.impl.*; -import com.cloudbees.plugins.credentials.domains.*; -import org.jenkinsci.plugins.plaincredentials.impl.*; - -import hudson.util.Secret; -import com.cloudbees.jenkins.GitHubWebHook; -import com.cloudbees.jenkins.* -import org.kohsuke.github.* - -if (new File('/var/run/secrets/github/username').exists()){ - String githubUsername = new File('/var/run/secrets/github/username').getText('UTF-8').trim() - String githubPassword = new File('/var/run/secrets/github/password').getText('UTF-8').trim() - - Credentials c1 = (Credentials) new UsernamePasswordCredentialsImpl( - CredentialsScope.GLOBAL, - "github-account", - "GitHub account", - githubUsername, - githubPassword); - - SystemCredentialsProvider.getInstance().getStore().addCredentials(Domain.global(), c1); - - Credentials c2 = (Credentials) new StringCredentialsImpl( - CredentialsScope.GLOBAL, - "github-access-token", - "GitHub account (Access Token)", - Secret.fromString(githubPassword)); - - SystemCredentialsProvider.getInstance().getStore().addCredentials(Domain.global(), c2); - - println "Configuring GitHub API" - - def ghCofigs = Jenkins.getInstance().getDescriptor(org.jenkinsci.plugins.github.config.GitHubPluginConfig.class).getConfigs(); - def ghServerConfig = new org.jenkinsci.plugins.github.config.GitHubServerConfig('github-access-token'); - ghServerConfig.setName('GitHub') - ghServerConfig.setApiUrl('https://api.github.com') - ghServerConfig.setManageHooks(true); - ghServerConfig.setClientCacheSize(21) - ghCofigs.clear(); - ghCofigs.add(ghServerConfig); -} \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/init.groovy.d/003-register-github-webhooks.groovy b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/init.groovy.d/003-register-github-webhooks.groovy deleted file mode 100644 index 3069fd494..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/init.groovy.d/003-register-github-webhooks.groovy +++ /dev/null @@ -1,69 +0,0 @@ -import jenkins.* -import jenkins.model.* -import org.kohsuke.github.* - - -if ('prod'.equalsIgnoreCase(System.getenv('ENV_NAME'))){ - String jenkinsUrl = JenkinsLocationConfiguration.get().getUrl() - String genericWebHookTriggerToken = Jenkins.instance.getItemByFullName('_SYS/ON_GH_EVENT').getTrigger(org.jenkinsci.plugins.gwt.GenericTrigger.class).getToken() - - Jenkins.instance.getAllItems().each { job -> - if (job instanceof jenkins.branch.MultiBranchProject){ - try{ - for (def branchSource:job.getSources()){ - if (branchSource instanceof jenkins.branch.BranchSource){ - if (branchSource.getSource() instanceof org.jenkinsci.plugins.github_branch_source.GitHubSCMSource){ - def scmBranchSource = branchSource.getSource() - //org.jenkinsci.plugins.github_branch_source.GitHubSCMBuilder.uriResolver(job, scmBranchSource.getApiUri()) - com.cloudbees.plugins.credentials.common.StandardCredentials credentials = org.jenkinsci.plugins.github_branch_source.Connector.lookupScanCredentials(job, scmBranchSource.getApiUri(), scmBranchSource.getCredentialsId()) - org.kohsuke.github.GitHub github = org.jenkinsci.plugins.github_branch_source.Connector.connect(scmBranchSource.getApiUri(), credentials); - String fullName = scmBranchSource.getRepoOwner() + "/" + scmBranchSource.getRepository(); - //println fullName - org.kohsuke.github.GHRepository ghRepository = github.getRepository(fullName); - Map hooks =[ - 'github-webhook':['url':"${jenkinsUrl}github-webhook/", 'events':[org.kohsuke.github.GHEvent.PULL_REQUEST, org.kohsuke.github.GHEvent.PUSH]], - 'generic-webhook-trigger.0':['url':"${jenkinsUrl}generic-webhook-trigger/invoke?token=${genericWebHookTriggerToken}", 'events':[org.kohsuke.github.GHEvent.PULL_REQUEST, org.kohsuke.github.GHEvent.ISSUE_COMMENT]] - ] - for (def hook:ghRepository.getHooks()){ - //println hook - hooks.each{ String name, Map newHook -> - if (hook.getConfig() == null || hook.getConfig()['url'] == null){ - println "Something is odd .. a hook in '${fullName}' is null: ${hook}" - }else if (hook.getConfig()['url'].startsWith(newHook.url)){ - newHook['_hook']=hook - } - } - } - //Create Hooks - hooks.each{ String name, Map newHook -> - Map hookCfg = ['url':newHook.url] - if (newHook._hook == null){ - if (newHook.qs){ - if (hookCfg.url.contains('?')){ - hookCfg.url=hookCfg.url+'&' - }else{ - hookCfg.url=hookCfg.url+'?' - } - hookCfg.url=hookCfg.url+newHook.qs - } - println "Registering webhook for ${job.name}: ${[new URL(hookCfg.url), newHook.events]}" - ghRepository.createHook("web",["url":new URL(hookCfg.url).toExternalForm()], newHook.events,'prod'.equalsIgnoreCase(System.getenv('ENV_NAME'))) - //ghRepository.createWebHook(new URL(hookCfg.url), newHook.events) - }else{ - println "Webhook already registered for ${job.name}: ${[new URL(hookCfg.url), newHook.events]}" - } - } - } - } - } //for - } catch (ex){ - println "Error registering webhook for ${job.name}" - println(ex.toString()); - println(ex.getMessage()); - println(ex.getStackTrace()); - } - } - } -}else{ - println "SKIPPING (Register GitHub WebHooks): Not running in Production mode (ENV_NAME != 'prod') " -} \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/install.groovy.d/001.groovy b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/install.groovy.d/001.groovy deleted file mode 100644 index 4bf291a0f..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/install.groovy.d/001.groovy +++ /dev/null @@ -1,24 +0,0 @@ -import java.util.regex.Pattern - -class JenkinsInstall extends Script { - def run() { - println "Running ${args[0]}" - File jenkinsHomeDir = new File(args[0]).getAbsoluteFile().getParentFile().getParentFile() - File configXmlFile = new File(jenkinsHomeDir, 'jenkins.model.JenkinsLocationConfiguration.xml') - File configXmlTemplateFile = new File(jenkinsHomeDir, 'jenkins.model.JenkinsLocationConfiguration.xml.template') - String jenkinsURL = System.getenv()['JENKINS_URL'] - if ( !( jenkinsURL ==~ /^https?:\/\/.*\/$/) ){ - throw new RuntimeException("Invalid JENKINS_URL. Expected to match '^https?://.*/\$'") - } - configXmlFile.withWriter { w -> - configXmlTemplateFile.eachLine { line -> - w << line.replaceAll(Pattern.quote('#{JENKINS_URL}'), jenkinsURL) + System.getProperty("line.separator") - } - } - } - - static void main(String[] args) { - org.codehaus.groovy.runtime.InvokerHelper.runScript(JenkinsInstall, args) - } - -} \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jenkins.model.JenkinsLocationConfiguration.xml.template b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jenkins.model.JenkinsLocationConfiguration.xml.template deleted file mode 100644 index aa2132b2b..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jenkins.model.JenkinsLocationConfiguration.xml.template +++ /dev/null @@ -1,5 +0,0 @@ - - - Sustainment.Team@gov.bc.ca - #{JENKINS_URL} - diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jobs/_SYS/config.xml b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jobs/_SYS/config.xml deleted file mode 100644 index 8e1bc7d3f..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jobs/_SYS/config.xml +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - All - false - false - - - - - - - - \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jobs/_SYS/jobs/ON_GH_EVENT/config.xml.install.groovy b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jobs/_SYS/jobs/ON_GH_EVENT/config.xml.install.groovy deleted file mode 100644 index e02fceb5d..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jobs/_SYS/jobs/ON_GH_EVENT/config.xml.install.groovy +++ /dev/null @@ -1,61 +0,0 @@ -import java.util.regex.Pattern - -class JobInstall extends Script { -static Map exec(List args, Appendable stdout=null, Appendable stderr=null, Closure stdin=null){ - ProcessBuilder builder = new ProcessBuilder(args) - def proc = builder.start() - - if (stdin!=null) { - OutputStream out = proc.getOutputStream(); - stdin(out) - out.flush(); - out.close(); - } - - if (stdout == null ){ - stdout = new StringBuffer() - } - - proc.waitForProcessOutput(stdout, stderr) - int exitValue= proc.exitValue() - - Map ret = ['out': stdout, 'err': stderr, 'status':exitValue, 'cmd':args] - - return ret -} - - def run() { - println "Running ${args[0]}" - - String secretToken = UUID.randomUUID() - Map ocGetSecretToken = exec(['sh', '-c', "set -x; oc get \"secret/\$(cat /var/run/secrets/github/metadata.name)\" \"--output=jsonpath={.data['generic-hook\\.token']}\" | base64 --decode"]) - - if (ocGetSecretToken.status != 0 || ocGetSecretToken.out.toString().trim() == ""){ - println "Updating/Creating token" - exec(['sh', '-c', "oc patch \"secret/\$(cat /var/run/secrets/github/metadata.name)\" -p '{\"stringData\": {\"generic-hook.token\": \"${secretToken}\"}}'" as String]) - }else{ - println "Using existing token" - secretToken = ocGetSecretToken.out.toString().trim() - } - - def installFile = args[0] - def configXmlFile = installFile.substring(0, installFile.length() - '.install.groovy'.length()) - def configXmlTemplateFile = configXmlFile + '.template' - - //println "configXmlFile:${configXmlFile}" - //println "configXmlTemplateFile:${configXmlTemplateFile}" - - new File( configXmlFile ).withWriter { w -> - new File(configXmlTemplateFile).eachLine { line -> - w << line.replaceAll(Pattern.quote('#{TOKEN}'), secretToken ) + System.getProperty("line.separator") - } - } - - return null - } - - static void main(String[] args) { - org.codehaus.groovy.runtime.InvokerHelper.runScript(JobInstall, args) - } - -} \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jobs/_SYS/jobs/ON_GH_EVENT/config.xml.template b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jobs/_SYS/jobs/ON_GH_EVENT/config.xml.template deleted file mode 100644 index d5b049500..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jobs/_SYS/jobs/ON_GH_EVENT/config.xml.template +++ /dev/null @@ -1,68 +0,0 @@ - - - - - false - - - - - x_github_event - - - - - payload - - - - - - - - master - true - false - false - false - - - - - - - - payload - - - - - - X-GitHub-Event - - - - false - false - Generic Cause - #{TOKEN} - - - true - - - - - false - - - - - - - - xterm - - - \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jobs/_SYS/jobs/ON_STARTUP/config.xml b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jobs/_SYS/jobs/ON_STARTUP/config.xml deleted file mode 100644 index d79b18fc9..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jobs/_SYS/jobs/ON_STARTUP/config.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - false - - - true - false - false - false - - - - - 0 - ON_BOTH - - - false - - - - - false - - - - - - - \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jobs/_SYS/jobs/ON_STARTUP/config.xml.install.groovy b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jobs/_SYS/jobs/ON_STARTUP/config.xml.install.groovy deleted file mode 100644 index 6bd4e4f70..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/jobs/_SYS/jobs/ON_STARTUP/config.xml.install.groovy +++ /dev/null @@ -1,12 +0,0 @@ -import jenkins.* -import jenkins.model.* - -class JobInstall extends Script { - def run() { - println "${args}" - } - static void main(String[] args) { - org.codehaus.groovy.runtime.InvokerHelper.runScript(JobInstall, args) - } - -} \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/plugins.txt b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/plugins.txt deleted file mode 100644 index 3d0ff3a51..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/plugins.txt +++ /dev/null @@ -1,111 +0,0 @@ -jquery3-api:3.6.1-2 -plugin-util-api:2.20.0 -snakeyaml-api:1.33-95.va_b_a_e3e47b_fa_4 -trilead-api:2.84.v72119de229b_7 -display-url-api:2.3.7 -durable-task:504.vb10d1ae5ba2f -jackson2-api:2.14.2-319.v37853346a_229 -jdk-tool:63.v62d2fd4b_4793 -jenkins-design-language:1.27.3 -mailer:448.v5b_97805e3767 -okhttp-api:4.10.0-132.v7a_7b_91cef39c -pubsub-light:1.17 -script-security:1229.v4880b_b_e905a_6 -simple-theme-plugin:146.v0e67db_a_9052e -sse-gateway:1.26 -structs:324.va_f5d6774f3a_d -swarm:3.39 -workflow-step-api:639.v6eca_cd8c04a_a_ -blueocean-commons:1.27.3 -blueocean-core-js:1.27.3 -blueocean-jwt:1.27.3 -blueocean-rest:1.27.3 -command-launcher:90.v669d7ccb_7c31 -credentials:1189.vf61b_a_5e2f62e -echarts-api:4.8.0-2 -github-api:1.303-417.ve35d9dd78549 -monitoring:1.92.0 -pipeline-model-api:2.2125.vddb_a_44a_d605e -plain-credentials:143.v1b_df8b_d3b_e48 -scm-api:631.v9143df5b_e4a_a -ssh-credentials:305.v8f4381501156 -token-macro:321.vd7cc1f2a_52c8 -variant:59.vf075fe829ccb -workflow-api:1208.v0cc7c6e0da_9e -workflow-scm-step:408.v7d5b_135a_b_d49 -workflow-support:839.v35e2736cfd5c -ansicolor:1.0.2 -apache-httpcomponents-client-4-api:4.5.14-150.v7a_b_9d17134a_5 -authentication-tokens:1.4 -blueocean-i18n:1.27.3 -blueocean-web:1.27.3 -bouncycastle-api:2.27 -cloudbees-folder:6.758.vfd75d09eea_a_1 -credentials-binding:523.vd859a_4b_122e6 -generic-webhook-trigger:1.86.2 -pipeline-groovy-lib:629.vb_5627b_ee2104 -groovy:453.vcdb_a_c5c99890 -handy-uri-templates-2-api:2.1.8-22.v77d5b_75e6953 -jsch:0.1.55.61.va_e9ee26616e7 -junit:1189.v1b_e593637fa_e -kubernetes-client-api:6.4.1-215.v2ed17097a_8e9 -matrix-auth:3.1.6 -matrix-project:785.v06b_7f47b_c631 -mercurial:1260.vdfb_723cdcc81 -openshift-login:1.0.29 -pipeline-build-step:487.va_823138eee8b_ -pipeline-input-step:466.v6d0a_5df34f81 -pipeline-milestone-step:111.v449306f708b_7 -pipeline-stage-step:305.ve96d0205c1c6 -pipeline-stage-tags-metadata:2.2125.vddb_a_44a_d605e -rebuild:1.34 -timestamper:1.23 -workflow-durable-task-step:2.35 -workflow-job:1289.vd1c337fd5354 -authorize-project:1.5.1 -blueocean-config:1.27.3 -blueocean-dashboard:1.27.3 -blueocean-personalization:1.27.3 -branch-api:2.1071.v1a_188a_562481 -favorite:2.4.1 -git-client:4.2.0 -git-server:99.va_0826a_b_cdfa_d -htmlpublisher:1.31 -jira:3.9 -lockable-resources:1131.vb_7c3d377e723 -startup-trigger-plugin:2.9.3 -workflow-basic-steps:2.20 -workflow-cps:3653.v07ea_433c90b_4 -workflow-multibranch:733.v109046189126 -basic-branch-build-strategies:71.vc1421f89888e -blueocean-display-url:2.4.1 -blueocean-jira:1.23.2 -blueocean-pipeline-scm-api:1.27.3 -blueocean-rest-impl:1.27.3 -git:5.0.0 -github:1.37.0 -github-branch-source:1703.vd5a_2b_29c6cdc -openshift-client:1.0.38 -pipeline-graph-analysis:202.va_d268e64deb_3 -pipeline-model-extensions:2.2125.vddb_a_44a_d605e -pipeline-rest-api:2.32 -pipeline-stage-view:2.32 -blueocean-autofavorite:1.2.5 -cloudbees-bitbucket-branch-source:800.va_b_b_9a_a_5035c1 -disable-github-multibranch-status:1.2 -docker-commons:419.v8e3cd84ef49c -docker-workflow:1.24 -kubernetes-credentials:0.10.0 -pipeline-model-definition:2.2125.vddb_a_44a_d605e -workflow-aggregator:2.6 -blueocean-pipeline-api-impl:1.23.2 -blueocean-pipeline-editor:1.23.2 -kubernetes:3900.va_dce992317b_4 -blueocean-bitbucket-pipeline:1.23.2 -blueocean-events:1.23.2 -blueocean-git-pipeline:1.23.2 -blueocean-github-pipeline:1.23.2 -blueocean:1.27.3 -configuration-as-code:1616.v11393eccf675 -role-strategy:587.588.v850a_20a_30162 -oic-auth:2.5 diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/scripts.groovy.d/generate-plugins-list.groovy b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/scripts.groovy.d/generate-plugins-list.groovy deleted file mode 100644 index 85be41d40..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/scripts.groovy.d/generate-plugins-list.groovy +++ /dev/null @@ -1,42 +0,0 @@ -def plugins1=[] -def plugins2=[] - - -def includePlugin={ plugin -> - plugins2.add(plugin) - def updateInfo=plugin.getUpdateInfo() - if (updateInfo){ - println("${plugin.getShortName()}:${updateInfo.version}") - }else{ - println("${plugin.getShortName()}:${plugin.getVersion()}") - } -} - -plugins1.addAll(Jenkins.instance.pluginManager.plugins); -plugins1=plugins1.sort({it.getShortName()}) - -def previousSize=-1; -def step=1; -while (plugins1.size()>0 && previousSize!=plugins1.size()){ - //println "##Step ${step}: ${plugins1.size()} - ${plugins2.size()}" - previousSize=plugins1.size(); - def it2 = plugins1.iterator(); - while (it2.hasNext()) { - def plugin = it2.next(); - if ((step ==1 && plugin.getDependencies().size()==0) || plugin.getDependencies().find({dependency -> plugins2.find({dependency.shortName.equals(it.getShortName())})==null})==null){ - it2.remove(); - includePlugin(plugin); - } - } - step++; -} - -// add all leftover (if any) -def it2 = plugins1.iterator(); -while (it2.hasNext()) { - def plugin = it2.next(); - it2.remove(); - includePlugin(plugin); -} - -return; \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/scripts.groovy.d/on-startup.groovy b/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/scripts.groovy.d/on-startup.groovy deleted file mode 100644 index 6d5edafa4..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/docker/contrib/jenkins/configuration/scripts.groovy.d/on-startup.groovy +++ /dev/null @@ -1,12 +0,0 @@ -import groovy.json.* - -class OnStartup extends Script { - def run() { - println "On Startup" - return null; - } //end run - - static void main(String[] args) { - org.codehaus.groovy.runtime.InvokerHelper.runScript(OnStartup, args) - } -} \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/basic/jenkins-basic-build.yaml b/gwells/openshift/ocp4/jenkins/basic/jenkins-basic-build.yaml deleted file mode 100644 index fb2fa8e98..000000000 --- a/gwells/openshift/ocp4/jenkins/basic/jenkins-basic-build.yaml +++ /dev/null @@ -1,65 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: jenkins-basic -objects: -- apiVersion: v1 - kind: BuildConfig - metadata: - creationTimestamp: null - name: ${NAME}${SUFFIX} - spec: - failedBuildsHistoryLimit: 2 - output: - to: - kind: ImageStreamTag - name: ${NAME}:${VERSION} - postCommit: {} - resources: - limits: - cpu: "2" - memory: 2Gi - requests: - cpu: "1" - memory: 1Gi - runPolicy: SerialLatestOnly - source: - contextDir: openshift/ocp4/jenkins/basic/docker - git: - ref: ${SOURCE_REPOSITORY_REF} - uri: ${SOURCE_REPOSITORY_URL} - type: Git - strategy: - dockerStrategy: - from: - kind: DockerImage - name: registry.redhat.io/ubi8/ubi:8.7 - type: Docker - successfulBuildsHistoryLimit: 2 - triggers: - - type: ConfigChange - - imageChange: {} - type: ImageChange -parameters: -- description: A name used for all objects - displayName: Name - name: NAME - required: true - value: jenkins-basic -- description: A name suffix used for all objects - displayName: Suffix - name: SUFFIX - required: false - value: "" -- description: A version used for the image tags - displayName: version - name: VERSION - required: true - value: v2-latest -- name: SOURCE_REPOSITORY_URL - required: true - value: https://github.com/bcgov/gwells.git -- name: SOURCE_REPOSITORY_REF - required: false - value: master \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/docker/Dockerfile b/gwells/openshift/ocp4/jenkins/docker/Dockerfile deleted file mode 100644 index 571f591fd..000000000 --- a/gwells/openshift/ocp4/jenkins/docker/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -FROM BuildConfig -ARG NODE_VERSION=v18.15.0 -ARG SONAR_VERSION=4.8.0.2856 -USER 0 -RUN fix_permission() { while [[ $# > 0 ]] ; do chgrp -R 0 "$1" && chmod -R g=u "$1"; shift; done } && \ - set -x && \ - curl -sSL -o /tmp/sonar-scanner-cli.zip https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${SONAR_VERSION}-linux.zip && \ - unzip -q /tmp/sonar-scanner-cli.zip -d /tmp/sonar-scanner-cli && \ - mv /tmp/sonar-scanner-cli/sonar-scanner-${SONAR_VERSION}-linux /opt/sonar-scanner && \ - ln -s /opt/sonar-scanner/bin/sonar-scanner /usr/local/bin && \ - rm -rf /tmp/sonar-scanner-cli.zip && \ - rm -rf /tmp/sonar-scanner-cli && \ - mkdir /opt/node && \ - curl -sSL https://nodejs.org/dist/${NODE_VERSION}/node-${NODE_VERSION}-linux-x64.tar.gz | tar zxf - --strip-components=1 -C /opt/node && \ - fix_permission '/opt/sonar-scanner' '/opt/node' && \ - find $JENKINS_REF_HOME -maxdepth 1 -type f -name '*.xml' -delete - -ENV NODE_HOME=/opt/node \ - PATH=$PATH:/opt/node/bin \ - CASC_JENKINS_CONFIG=/var/lib/jenkins/casc_configs - -COPY ./contrib/jenkins $JENKINS_REF_HOME - -USER 1001 diff --git a/gwells/openshift/ocp4/jenkins/docker/contrib/jenkins/casc_configs/100_main.yaml b/gwells/openshift/ocp4/jenkins/docker/contrib/jenkins/casc_configs/100_main.yaml deleted file mode 100644 index da17a755d..000000000 --- a/gwells/openshift/ocp4/jenkins/docker/contrib/jenkins/casc_configs/100_main.yaml +++ /dev/null @@ -1,71 +0,0 @@ -jenkins: - mode: EXCLUSIVE - numExecutors: 0 - slaveAgentPort: 50000 - agentProtocols: - - "JNLP4-connect" - - "Ping" - disabledAdministrativeMonitors: - - "jenkins.security.QueueItemAuthenticatorMonitor" - clouds: - - kubernetes: - containerCap: 10 - containerCapStr: "10" - jenkinsTunnel: "${JENKINS_TUNNEL}" - jenkinsUrl: "${JENKINS_URL}" - name: "openshift" - securityRealm: "openShiftOAuth2" -security: - apiToken: - creationOfLegacyTokenEnabled: false - tokenGenerationOnCreationEnabled: false - usageStatisticsEnabled: true - queueItemAuthenticator: - authenticators: - - global: - strategy: "systemAuthorizationStrategy" - sSHD: - port: -1 - scriptApproval: - approvedSignatures: - - "method hudson.model.Run delete" - - "method hudson.model.Run getCauses" - - "method org.jenkinsci.plugins.workflow.support.steps.build.RunWrapper getRawBuild" -unclassified: - gitHubPluginConfig: - configs: - - clientCacheSize: 21 - credentialsId: "github-access-token" - name: "GitHub" - hookUrl: "${JENKINS_URL}/github-webhook/" - globalLibraries: - libraries: - - defaultVersion: "master" - implicit: true - name: "bcdevops-jenkins-shared-library" - retriever: - modernSCM: - scm: - git: - credentialsId: "github-account" - id: "fd48bed5-56bd-4fa0-b477-acfce5bc4929" - remote: "https://github.com/BCDevOps/jenkins-pipeline-shared-lib.git" - traits: - - "gitBranchDiscovery" - - "gitTagDiscovery" - location: - adminAddress: "Sustainment.Team@gov.bc.ca" - url: "${JENKINS_URL}" - mailer: - authentication: - password: "{AQAAABAAAAAgStO5EunJg6DPC+IaKAg8OUJFoAA9MgKF3wdZ+a02caLqCg2DvA7YYov885tVSaK5}" - username: "Sustainment.Team@gov.bc.ca" - charset: "UTF-8" - smtpHost: "apps.smtp.gov.bc.ca" - useSsl: false - useTls: false -tool: - git: - installations: - - name: git - home: /usr/bin/git diff --git a/gwells/openshift/ocp4/jenkins/docker/contrib/jenkins/casc_configs/101_authorization.yaml b/gwells/openshift/ocp4/jenkins/docker/contrib/jenkins/casc_configs/101_authorization.yaml deleted file mode 100644 index 3dd8b8aff..000000000 --- a/gwells/openshift/ocp4/jenkins/docker/contrib/jenkins/casc_configs/101_authorization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -jenkins: - authorizationStrategy: - projectMatrix: - permissions: - - "Overall/Read:authenticated" \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/docker/contrib/jenkins/jobs/gwells/config.xml b/gwells/openshift/ocp4/jenkins/docker/contrib/jenkins/jobs/gwells/config.xml deleted file mode 100644 index f8bcde387..000000000 --- a/gwells/openshift/ocp4/jenkins/docker/contrib/jenkins/jobs/gwells/config.xml +++ /dev/null @@ -1,63 +0,0 @@ - - - - Groundwater Wells and Aquifers application for the Ministry of Environment - GWELLS - - - - - - - - - - - - - - - false - - - - - - - true - 1 - 1 - - - false - - - - - e7d5a06d-b84c-4b3a-b71a-aa75b10fb83f - https://api.github.com - github-account - bcgov - gwells - https://github.com/bcgov/gwells.git - - - 1 - - - - - - - - - - - - - - - - Jenkinsfile.ocp4 - - \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/docker/contrib/jenkins/scripts.groovy.d/on-gh-event.groovy b/gwells/openshift/ocp4/jenkins/docker/contrib/jenkins/scripts.groovy.d/on-gh-event.groovy deleted file mode 100644 index 5f9bdfd47..000000000 --- a/gwells/openshift/ocp4/jenkins/docker/contrib/jenkins/scripts.groovy.d/on-gh-event.groovy +++ /dev/null @@ -1,67 +0,0 @@ -// note: a copy of this file lives in Jenkins config. We need a way to update the Jenkins config -// when changes are made to this file. - -import groovy.json.JsonSlurper - -String TOOLS_PROJECT = "26e83e-tools" -String DEV_PROJECT = "26e83e-dev" - -def jsonSlurper = new JsonSlurper() - -// the webhook trigger comes from GitHub as a POST request with a "payload" object in the body -String ghEventType = build.buildVariableResolver.resolve("x_github_event") -def payload = jsonSlurper.parseText(build.buildVariableResolver.resolve("payload")) -def prNum = payload['number'] - - -// this script is triggered on all events, but we are specifically interested in pull requests that are closed -// pull requests come with actions like "opened", "closed". Merged and closed are the same event (there is an -// additional "merged: true" property) -if (ghEventType == 'pull_request' && payload['action'] == 'closed' && prNum) { - - def sout = new StringBuilder(), serr = new StringBuilder() - - // delete all the objects in the DEV namespace labeled with this PR number - // todo: there are several labels that need to be targeted and hardcoding them is fragile. - // a future task should focus on creating a label that applies to all resources associated with one pull request. - - // these objects were created as part of deploying an app (e.g. replication controller) - def deleteAllAppObjects = "oc delete all,pvc,secret,configmap -n ${DEV_PROJECT} -l app=gwells-dev-pr-${prNum}".execute() - deleteAllAppObjects.consumeProcessOutput(sout, serr) - deleteAllAppObjects.waitForOrKill(25000) - println "out> $sout err> $serr" - - // these objects were created by our templates during the pipeline runs - sout = new StringBuilder() - serr = new StringBuilder() - def deleteCreatedObjects = "oc delete all,pvc,secret,configmap -n ${DEV_PROJECT} -l appver=gwells-dev-pr-${prNum}".execute() - deleteCreatedObjects.consumeProcessOutput(sout, serr) - deleteCreatedObjects.waitForOrKill(25000) - println "out> $sout err> $serr" - - // these objects were generated by openshift for PVC provisioning - sout = new StringBuilder() - serr = new StringBuilder() - def deleteGeneratedObjects = "oc delete all,pvc,secret,configmap -n ${DEV_PROJECT} -l gluster.kubernetes.io/provisioned-for-pvc=gwells-pg12-dev-pr-${prNum}".execute() - deleteGeneratedObjects.consumeProcessOutput(sout, serr) - deleteGeneratedObjects.waitForOrKill(25000) - println "out> $sout err> $serr" - - // delete the objects in the tools project (this is primarly the build configs, - // the imagestream is not unique to each pull request). - sout = new StringBuilder() - serr = new StringBuilder() - def deleteAllBuilds = "oc delete all -n ${TOOLS_PROJECT} -l appver=gwells-dev-pr-${prNum}".execute() - deleteAllBuilds.consumeProcessOutput(sout, serr) - deleteAllBuilds.waitForOrKill(25000) - println "out> $sout err> $serr" - - // untag the images tagged with this PR number - sout = new StringBuilder() - serr = new StringBuilder() - def untagImages = "oc tag -n ${TOOLS_PROJECT} -d gwells-application:pr-${prNum}".execute() - untagImages.consumeProcessOutput(sout, serr) - untagImages.waitForOrKill(25000) - println "out> $sout err> $serr" - -} diff --git a/gwells/openshift/ocp4/jenkins/jenkins-deploy.yaml b/gwells/openshift/ocp4/jenkins/jenkins-deploy.yaml deleted file mode 100644 index 4b2a0d765..000000000 --- a/gwells/openshift/ocp4/jenkins/jenkins-deploy.yaml +++ /dev/null @@ -1,476 +0,0 @@ ---- -apiVersion: template.openshift.io/v1 -kind: Template -labels: - app: jenkins -metadata: - creationTimestamp: null - name: jenkins -objects: -- apiVersion: v1 - kind: Secret - metadata: - annotations: - as-copy-of: template.${NAME}-slave-user - as-copy-of/preserve: password - name: ${NAME}-slave-user - stringData: - metadata.name: ${NAME}-slave-user - password: ${SLAVE_USER_PASSWORD} - username: jenkins-slave - type: kubernetes.io/basic-auth -- apiVersion: v1 - kind: Secret - metadata: - annotations: - as-copy-of: template.${NAME}-github - name: ${NAME}-github - stringData: - metadata.name: ${NAME}-github - password: ${GH_PASSWORD} - username: ${GH_USERNAME} - type: kubernetes.io/basic-auth -- apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - annotations: - volume.beta.kubernetes.io/storage-class: netapp-file-standard - name: ${NAME}${SUFFIX} - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi -- apiVersion: v1 - kind: ServiceAccount - metadata: - annotations: - serviceaccounts.openshift.io/oauth-redirectreference.jenkins: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"${NAME}-controller${SUFFIX}"}}' - name: ${NAME}${SUFFIX} -- apiVersion: v1 - groupNames: null - kind: RoleBinding - metadata: - name: ${NAME}${SUFFIX}_admin - roleRef: - name: admin - subjects: - - kind: ServiceAccount - name: ${NAME}${SUFFIX} -- apiVersion: v1 - kind: DeploymentConfig - metadata: - annotations: - template.alpha.openshift.io/wait-for-ready: "true" - creationTimestamp: null - name: ${NAME}-controller${SUFFIX} - spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - deploymentconfig: ${NAME}-controller${SUFFIX} - strategy: - activeDeadlineSeconds: 21600 - recreateParams: - timeoutSeconds: 600 - resources: {} - type: Recreate - template: - metadata: - creationTimestamp: null - labels: - deploymentconfig: ${NAME}-controller${SUFFIX} - spec: - containers: - - command: - - /usr/local/bin/container-entrypoint - - /usr/local/bin/jenkins-run - env: - - name: USE_JAVA_DIAGNOSTICS - value: "true" - - name: JENKINS_URL - value: "https://gwells-${NAME}${SUFFIX}.${ROUTE_HOST_SUFFIX}${ROUTE_PATH}" - - name: JENKINS_TUNNEL - value: "${NAME}-controller${SUFFIX}:50000" - - name: ENV_NAME - value: ${ENV_NAME} - - name: ENV_ID - value: ${ENV_ID} - - name: APP_TOOLS_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.tools - - name: APP_DEV_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.dev - - name: APP_STAGING_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.test - - name: APP_PROD_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.prod - - name: OCP_PLATFORM - value: '4' - - name: JENKINS_DATA - value: '/var/jenkins-data' - image: ' ' - imagePullPolicy: Always - livenessProbe: - failureThreshold: 3 - httpGet: - path: /login - port: 8080 - initialDelaySeconds: 420 - periodSeconds: 360 - timeoutSeconds: 240 - name: jenkins - ports: - - containerPort: 50000 - protocol: TCP - - containerPort: 8080 - protocol: TCP - readinessProbe: - httpGet: - path: /login - port: 8080 - initialDelaySeconds: 3 - timeoutSeconds: 240 - resources: - limits: - cpu: "${CPU_LIMIT}" - memory: "${MEMORY_LIMIT}" - requests: - cpu: "${CPU_REQUEST}" - memory: "${MEMORY_REQUEST}" - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /var/jenkins-data - name: jenkins-data - readOnly: false - - mountPath: /var/run/pod - name: pod-metadata - - mountPath: /run/secrets/jenkins-slave-user - name: jenkins-slave-user - readOnly: true - - mountPath: /run/secrets/github - name: github - readOnly: true - - mountPath: /run/configs/ns - name: ns-config - readOnly: true - - mountPath: /run/configs/jobs - name: jobs-config - readOnly: true - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - serviceAccount: ${NAME}${SUFFIX} - serviceAccountName: ${NAME}${SUFFIX} - terminationGracePeriodSeconds: 30 - volumes: - - name: jenkins-data - persistentVolumeClaim: - claimName: ${NAME}${SUFFIX} - - downwardAPI: - items: - - fieldRef: - fieldPath: metadata.name - name: name - path: name - - fieldRef: - fieldPath: metadata.namespace - name: namespace - path: namespace - - fieldRef: - fieldPath: metadata.labels - name: labels - path: labels - - fieldRef: - fieldPath: metadata.annotations - name: annotations - path: annotations - name: pod-metadata - - name: jenkins-slave-user - secret: - defaultMode: 420 - secretName: ${NAME}-slave-user - - name: github - secret: - defaultMode: 420 - secretName: ${NAME}-github - - name: ns-config - configMap: - defaultMode: 420 - name: ns-config - - name: jobs-config - configMap: - defaultMode: 420 - name: jobs-config - test: false - triggers: - - imageChangeParams: - automatic: true - containerNames: - - jenkins - from: - kind: ImageStreamTag - name: ${JENKINS_IMAGE_STREAM_NAME} - type: ImageChange - - type: ConfigChange -- apiVersion: v1 - kind: DeploymentConfig - metadata: - creationTimestamp: null - name: ${NAME}-agent${SUFFIX} - spec: - replicas: "${{AGENT_REPLICAS}}" - revisionHistoryLimit: 10 - selector: - deploymentconfig: ${NAME}-agent${SUFFIX} - strategy: - activeDeadlineSeconds: 21600 - recreateParams: - timeoutSeconds: 600 - resources: {} - type: Recreate - template: - metadata: - creationTimestamp: null - labels: - deploymentconfig: ${NAME}-agent${SUFFIX} - spec: - containers: - - command: - - bash - - -c - - cd $HOME && java -XshowSettings:vm -version && exec java -jar /usr/lib/jenkins/swarm-client.jar - -name "$(cat /etc/hostname)" -deleteExistingClients -fsroot "$JENKINS_HOME/$(cat - /etc/hostname)" -master http://$JENKINS_MASTER_SERVICE:8080 -disableSslVerification - -username "$(cat /var/run/secrets/jenkins-slave-user/username)" -passwordFile - /var/run/secrets/jenkins-slave-user/password -description "$(cat /etc/hostname)" - -executors ${AGENT_EXECUTORS} -labels '${AGENT_LABELS}' -mode - 'normal' -retry 10 -tunnel $JENKINS_MASTER_SERVICE:50000 -disableClientsUniqueId - env: - - name: JENKINS_MASTER_SERVICE - value: ${NAME}-controller${SUFFIX} - - name: JAVA_TOOL_OPTIONS - value: -XX:+UnlockExperimentalVMOptions -XX:+UseContainerSupport - -XX:MaxRAMFraction=5 -XX:MaxHeapFreeRatio=20 -XX:MinHeapFreeRatio=10 - -XX:+UseParallelGC -XX:ParallelGCThreads=2 - - name: ENV_NAME - value: ${ENV_NAME} - - name: ENV_ID - value: ${ENV_ID} - - name: APP_TOOLS_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.tools - - name: APP_DEV_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.dev - - name: APP_STAGING_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.test - - name: APP_PROD_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.prod - - name: APP_PROD_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.prod - - name: OCP_PLATFORM - value: '4' - image: ' ' - imagePullPolicy: Always - name: jenkins - ports: - - containerPort: 50000 - protocol: TCP - - containerPort: 8080 - protocol: TCP - resources: - limits: - cpu: ${CPU_LIMIT} - memory: ${MEMORY_LIMIT} - requests: - cpu: ${CPU_REQUEST} - memory: ${MEMORY_REQUEST} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /var/run/pod - name: pod-metadata - - mountPath: /run/secrets/jenkins-slave-user - name: jenkins-slave-user - readOnly: true - - mountPath: /run/secrets/github - name: github - readOnly: true - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - serviceAccount: ${NAME}${SUFFIX} - serviceAccountName: ${NAME}${SUFFIX} - terminationGracePeriodSeconds: 30 - volumes: - - downwardAPI: - items: - - fieldRef: - fieldPath: metadata.name - name: name - path: name - - fieldRef: - fieldPath: metadata.namespace - name: namespace - path: namespace - - fieldRef: - fieldPath: metadata.labels - name: labels - path: labels - - fieldRef: - fieldPath: metadata.annotations - name: annotations - path: annotations - name: pod-metadata - - name: jenkins-slave-user - secret: - defaultMode: 420 - secretName: ${NAME}-slave-user - - name: github - secret: - defaultMode: 420 - secretName: ${NAME}-github - test: false - triggers: - - imageChangeParams: - automatic: true - containerNames: - - jenkins - - init - from: - kind: ImageStreamTag - name: ${JENKINS_IMAGE_STREAM_NAME} - type: ImageChange - - type: ConfigChange -- apiVersion: v1 - kind: Service - metadata: - creationTimestamp: null - name: ${NAME}-controller${SUFFIX} - spec: - ports: - - name: 8080-tcp - port: 8080 - protocol: TCP - targetPort: 8080 - - name: 50000-tcp - port: 50000 - protocol: TCP - targetPort: 50000 - selector: - deploymentconfig: ${NAME}-controller${SUFFIX} - sessionAffinity: None - type: ClusterIP -- apiVersion: v1 - kind: Route - metadata: - creationTimestamp: null - name: ${NAME}-controller${SUFFIX} - spec: - host: "gwells-${NAME}${SUFFIX}.${ROUTE_HOST_SUFFIX}" - path: ${ROUTE_PATH} - port: - targetPort: 8080-tcp - tls: - termination: edge - to: - kind: Service - name: ${NAME}-controller${SUFFIX} - weight: 100 - wildcardPolicy: None -parameters: -- description: A name used for all objects - displayName: Name - name: NAME - required: true -- name: SUFFIX -- description: A version used for the image tags - displayName: version - name: JENKINS_IMAGE_STREAM_NAME - required: true - value: "jenkins-main:v2-latest" -- description: ROUTE_HOST_SUFFIX - displayName: ROUTE_HOST_SUFFIX - name: ROUTE_HOST_SUFFIX - required: true - value: "apps.silver.devops.gov.bc.ca" -- description: ROUTE_PATH - displayName: ROUTE_PATH - name: ROUTE_PATH - required: true - value: / -- description: Environment Name - displayName: ENV_NAME - name: ENV_NAME - required: true - value: prod -- description: Environment ID - displayName: ENV_ID - name: ENV_ID - required: true - value: prod -- description: SLAVE_USER_PASSWORD - displayName: SLAVE_USER_PASSWORD - from: '[a-zA-Z0-9]{16}' - generate: expression - name: SLAVE_USER_PASSWORD -- description: GitHub Username (Same as github secret) - displayName: GH_USERNAME - name: GH_USERNAME - required: true - value: "" -- description: GitHub Personal Access Token (Same as github secret) - displayName: GH_PASSWORD - name: GH_PASSWORD - required: true -- name: CPU_REQUEST - description: Minimal CPU needed to run - displayName: CPU Request - value: 100m -- name: CPU_LIMIT - description: Maximum CPU allowed to use - displayName: CPU Limit - value: 2000m -- name: MEMORY_REQUEST - description: Minimal amount of memory needed to run - displayName: Memory Request - value: 1Gi -- name: MEMORY_LIMIT - description: Maximum amount of memory allowed to use - displayName: Memory Limit - value: 4Gi -- name: AGENT_REPLICAS - value: "2" -- name: AGENT_EXECUTORS - value: "3" -- name: AGENT_LABELS - value: "Linux rhel rhel7 main build test deploy light" \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/jenkins-main-build.yaml b/gwells/openshift/ocp4/jenkins/jenkins-main-build.yaml deleted file mode 100644 index 138d8d67c..000000000 --- a/gwells/openshift/ocp4/jenkins/jenkins-main-build.yaml +++ /dev/null @@ -1,78 +0,0 @@ ---- -apiVersion: template.openshift.io/v1 -kind: Template -labels: - app: jenkins -metadata: - creationTimestamp: null - name: jenkins -objects: -- apiVersion: v1 - kind: ImageStream - metadata: - creationTimestamp: null - labels: - shared: "true" - name: ${NAME}-main - spec: - lookupPolicy: - local: false -- apiVersion: v1 - kind: BuildConfig - metadata: - creationTimestamp: null - name: ${NAME}-main${SUFFIX} - spec: - completionDeadlineSeconds: 600 - failedBuildsHistoryLimit: 3 - successfulBuildsHistoryLimit: 3 - output: - to: - kind: ImageStreamTag - name: ${NAME}-main:${VERSION} - postCommit: {} - resources: - limits: - cpu: 2000m - memory: 2Gi - requests: - cpu: 1000m - memory: 1Gi - runPolicy: SerialLatestOnly - source: - contextDir: openshift/ocp4/jenkins/docker - git: - ref: ${SOURCE_REPOSITORY_REF} - uri: ${SOURCE_REPOSITORY_URL} - type: Git - strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: ${SOURCE_IMAGE_STREAM_TAG} - namespace: ${SOURCE_IMAGE_STREAM_NAMESPACE} - type: Docker - triggers: - - imageChange: {} - type: ImageChange -parameters: -- name: NAME - description: A name used for all objects - displayName: Name - required: true - value: jenkins -- name: SUFFIX -- name: VERSION - description: A version used for the image tags - displayName: version - required: true - value: v1.0.0 -- name: SOURCE_IMAGE_STREAM_NAMESPACE - value: "26e83e-tools" -- name: SOURCE_IMAGE_STREAM_TAG - required: true - value: "jenkins-basic:v2-latest" -- name: SOURCE_REPOSITORY_URL - value: https://github.com/bcgov/gwells.git -- name: SOURCE_REPOSITORY_REF - value: release \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/jenkins-prereq.yaml b/gwells/openshift/ocp4/jenkins/jenkins-prereq.yaml deleted file mode 100644 index 6d1f738a3..000000000 --- a/gwells/openshift/ocp4/jenkins/jenkins-prereq.yaml +++ /dev/null @@ -1,70 +0,0 @@ ---- -kind: Template -apiVersion: v1 -labels: - app: jenkins-prod -metadata: - name: jenkins -objects: -- apiVersion: v1 - data: - project.dev: "${DEV}" - project.test: "${TEST}" - project.prod: "${PROD}" - project.tools: "${TOOLS}" - kind: ConfigMap - metadata: - name: ns-config -- apiVersion: v1 - data: - app.name: "${APP_NAME}" - app.domain: "${APP_DOMAIN}" - repo.name: "${REPO_NAME}" - repo.owner: "${REPO_OWNER}" - kind: ConfigMap - metadata: - name: jobs-config -parameters: -- name: DEV - displayName: Dev Project Namespace - description: Project/Namespace for Dev. Environment - required: true - value: 26e83e-dev -- name: TEST - displayName: Test Project Namespace - description: Project/Namespace for Test Environment - required: true - value: 26e83e-test -- name: PROD - displayName: Prod Project Namespace - description: Project/Namespace for Prod. Environment - required: true - value: 26e83e-prod -- name: TOOLS - displayName: Tools Project Namespace - description: Project/Namespace for Tools Environment - required: true - value: 26e83e-tools -- name: NAME - displayName: Name - description: A name used for all objects - required: true - value: jenkins -- name: REPO_OWNER - displayName: Application Repository Owner - description: A name of the github repo owner - required: true - value: bcgov -- name: REPO_NAME - displayName: Application Repository Name - description: Name of the application repository (code to build) - required: true - value: gwells -- name: APP_NAME - description: Short name (one word, lowercase) of the application - required: true - value: gwells -- name: APP_DOMAIN - description: Internet domain for the application - required: true - value: gwells.apps.silver.devops.gov.bc.ca diff --git a/gwells/openshift/ocp4/jenkins/jenkins-secondary-build.yaml b/gwells/openshift/ocp4/jenkins/jenkins-secondary-build.yaml deleted file mode 100644 index 6d8db4d60..000000000 --- a/gwells/openshift/ocp4/jenkins/jenkins-secondary-build.yaml +++ /dev/null @@ -1,88 +0,0 @@ ---- -apiVersion: template.openshift.io/v1 -kind: Template -labels: - app: jenkins -metadata: - creationTimestamp: null - name: jenkins -objects: -- apiVersion: v1 - kind: ImageStream - metadata: - creationTimestamp: null - labels: - shared: "true" - name: ${NAME}-secondary - spec: - lookupPolicy: - local: false -- apiVersion: v1 - kind: BuildConfig - metadata: - creationTimestamp: null - name: ${NAME}-secondary - spec: - completionDeadlineSeconds: 600 - failedBuildsHistoryLimit: 3 - successfulBuildsHistoryLimit: 3 - output: - to: - kind: ImageStreamTag - name: ${NAME}-secondary:${VERSION} - postCommit: {} - resources: - limits: - cpu: 2000m - memory: 2Gi - requests: - cpu: 1000m - memory: 1Gi - runPolicy: SerialLatestOnly - source: - dockerfile: | - FROM BuildConfig - ARG NODE_VERSION=v10.16.0 - ARG SONAR_VERSION=3.3.0.1492 - USER 0 - RUN fix_permission() { while [[ $# > 0 ]] ; do chgrp -R 0 "$1" && chmod -R g=u "$1"; shift; done } && \ - set -x && \ - curl -sSL -o /tmp/sonar-scanner-cli.zip https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${SONAR_VERSION}-linux.zip && \ - unzip /tmp/sonar-scanner-cli.zip -d /tmp/sonar-scanner-cli && \ - mv /tmp/sonar-scanner-cli/sonar-scanner-${SONAR_VERSION}-linux /opt/sonar-scanner && \ - ln -s /opt/sonar-scanner/bin/sonar-scanner /usr/local/bin && \ - rm -rf /tmp/sonar-scanner-cli.zip && \ - rm -rf /tmp/sonar-scanner-cli && \ - curl -sSL https://nodejs.org/dist/${NODE_VERSION}/node-${NODE_VERSION}-linux-x64.tar.xz | tar -Jx -C /opt && \ - mv /opt/node-${NODE_VERSION}-linux-x64 /opt/node && \ - fix_permission '/opt/sonar-scanner' '/opt/node' - ENV NODE_HOME=/opt/node \ - PATH=$PATH:/opt/node/bin - USER 1001 - type: Dockerfile - strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: ${SOURCE_IMAGE_STREAM_TAG} - namespace: ${SOURCE_IMAGE_STREAM_NAMESPACE} - type: Docker - triggers: - - imageChange: {} - type: ImageChange -parameters: -- name: NAME - description: A name used for all objects - displayName: Name - required: true - value: jenkins -- name: VERSION - description: A version used for the image tags - displayName: version - required: true - value: v1.0.0 -- name: SOURCE_IMAGE_STREAM_NAMESPACE - value: "26e83e-tools" -- name: SOURCE_IMAGE_STREAM_TAG - required: true - value: "jenkins-basic:v1.0.0" diff --git a/gwells/openshift/ocp4/jenkins/jenkins-secondary-deploy.yaml b/gwells/openshift/ocp4/jenkins/jenkins-secondary-deploy.yaml deleted file mode 100644 index 063b037dd..000000000 --- a/gwells/openshift/ocp4/jenkins/jenkins-secondary-deploy.yaml +++ /dev/null @@ -1,188 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: jenkins -objects: -- apiVersion: v1 - kind: DeploymentConfig - metadata: - creationTimestamp: null - name: ${NAME}-${SECONDARY_NAME} - spec: - replicas: "${{REPLICAS}}" - revisionHistoryLimit: 10 - selector: - deploymentconfig: ${NAME}-${SECONDARY_NAME} - strategy: - activeDeadlineSeconds: 21600 - recreateParams: - timeoutSeconds: 600 - resources: {} - type: Recreate - template: - metadata: - creationTimestamp: null - labels: - deploymentconfig: ${NAME}-${SECONDARY_NAME} - spec: - initContainers: - - name: init - image: " " - command: - - "curl" - - "-sSf" - - "http://${NAME}:8080/login" - containers: - - command: - - bash - - -c - - cd $HOME && java -XshowSettings:vm -version && exec java -jar /usr/lib/jenkins/swarm-client.jar - -name "$(cat /etc/hostname)" -deleteExistingClients -fsroot "$JENKINS_HOME/$(cat - /etc/hostname)" -master http://$JENKINS_MASTER_SERVICE:8080 -disableSslVerification - -username "$(cat /var/run/secrets/jenkins-slave-user/username)" -passwordFile - /var/run/secrets/jenkins-slave-user/password -description "$(cat /etc/hostname)" - -executors ${SECONDARY_EXECUTORS} -labels '${SECONDARY_LABELS}' -mode - 'normal' -retry 10 -tunnel $JENKINS_MASTER_SERVICE:50000 -disableClientsUniqueId - env: - - name: JENKINS_MASTER_SERVICE - value: ${NAME} - - name: JAVA_TOOL_OPTIONS - value: -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap - -XX:MaxRAMFraction=5 -XX:MaxHeapFreeRatio=20 -XX:MinHeapFreeRatio=10 - -XX:+UseParallelGC -XX:ParallelGCThreads=2 - - name: ENV_NAME - value: ${ENV_NAME} - - name: ENV_ID - value: ${ENV_ID} - - name: APP_TOOLS_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.tools - - name: APP_DEV_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.dev - - name: APP_STAGING_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.test - - name: APP_PROD_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.prod - - name: APP_PROD_NAMESPACE - valueFrom: - configMapKeyRef: - name: ns-config - key: project.prod - - name: OCP_PLATFORM - value: '4' - image: ' ' - imagePullPolicy: Always - name: jenkins - ports: - - containerPort: 50000 - protocol: TCP - - containerPort: 8080 - protocol: TCP - resources: - limits: - cpu: ${CPU_LIMIT} - memory: ${MEMORY_LIMIT} - requests: - cpu: ${CPU_REQUEST} - memory: ${MEMORY_REQUEST} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /var/run/pod - name: pod-metadata - - mountPath: /run/secrets/jenkins-slave-user - name: jenkins-slave-user - readOnly: true - - mountPath: /run/secrets/github - name: github - readOnly: true - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - serviceAccount: ${NAME} - serviceAccountName: ${NAME} - terminationGracePeriodSeconds: 30 - volumes: - - name: jenkins-home - persistentVolumeClaim: - claimName: ${NAME} - - downwardAPI: - items: - - fieldRef: - fieldPath: metadata.name - name: name - path: name - - fieldRef: - fieldPath: metadata.namespace - name: namespace - path: namespace - - fieldRef: - fieldPath: metadata.labels - name: labels - path: labels - - fieldRef: - fieldPath: metadata.annotations - name: annotations - path: annotations - name: pod-metadata - - name: jenkins-slave-user - secret: - defaultMode: 420 - secretName: ${NAME}-slave-user - - name: github - secret: - defaultMode: 420 - secretName: ${NAME}-github - test: false - triggers: - - imageChangeParams: - automatic: true - containerNames: - - jenkins - - init - from: - kind: ImageStreamTag - name: jenkins-basic:${VERSION} - type: ImageChange - - type: ConfigChange -parameters: -- description: A name used for all objects - displayName: Name - name: NAME - required: true - value: jenkins -- description: A version used for the image tags - displayName: version - name: VERSION - required: true - value: v1.0.0 -- name: SECONDARY_NAME - required: true - value: secondary -- name: SECONDARY_LABELS - value: "Linux rhel rhel7 build test deploy light" -- name: SECONDARY_EXECUTORS - value: "3" -- name: REPLICAS - value: "1" -- name: CPU_REQUEST - value: "300m" -- name: CPU_LIMIT - value: "2000m" -- name: MEMORY_REQUEST - value: "1Gi" -- name: MEMORY_LIMIT - value: "4Gi" \ No newline at end of file diff --git a/gwells/openshift/ocp4/jenkins/jenkins.nsp.yaml b/gwells/openshift/ocp4/jenkins/jenkins.nsp.yaml deleted file mode 100644 index 162eea229..000000000 --- a/gwells/openshift/ocp4/jenkins/jenkins.nsp.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -parameters: -- name: NAMESPACE -objects: -- apiVersion: security.devops.gov.bc.ca/v1alpha1 - kind: NetworkSecurityPolicy - metadata: - name: egress-internet - spec: - description: "allow ${NAMESPACE} namespace to talk to the internet." - source: - - - $namespace=${NAMESPACE} - destination: - - - ext:network=any -- apiVersion: security.devops.gov.bc.ca/v1alpha1 - kind: NetworkSecurityPolicy - metadata: - name: intra-namespace-comms - spec: - description: "allow ${NAMESPACE} namespace to talk to itself" - source: - - - $namespace=${NAMESPACE} - destination: - - - $namespace=${NAMESPACE} -- apiVersion: security.devops.gov.bc.ca/v1alpha1 - kind: NetworkSecurityPolicy - metadata: - name: int-cluster-k8s-api-comms - spec: - description: "allow ${NAMESPACE} pods to talk to the k8s api" - destination: - - - int:network=internal-cluster-api-endpoint - source: - - - $namespace=${NAMESPACE} diff --git a/gwells/openshift/ocp4/jobs/export-databc/export.cj.json b/gwells/openshift/ocp4/jobs/export-databc/export.cj.json deleted file mode 100644 index c8ff9905d..000000000 --- a/gwells/openshift/ocp4/jobs/export-databc/export.cj.json +++ /dev/null @@ -1,195 +0,0 @@ -{ - "kind": "Template", - "apiVersion": "v1", - "metadata": {}, - "parameters": [ - { - "name": "ENV_NAME", - "required": true - }, - { - "name": "PROJECT", - "required": true - }, - { - "name": "TAG", - "required": false, - "value": "${ENV_NAME}" - }, - { - "name": "NAME", - "required": true - }, - { - "name": "COMMAND", - "required": true - }, - { - "name": "SCHEDULE", - "required": true - } - ], - "objects": [ - { - "apiVersion": "batch/v1", - "kind": "CronJob", - "metadata": { - "name": "${NAME}" - }, - "spec": { - "schedule": "${SCHEDULE}", - "concurrencyPolicy": "Forbid", - "jobTemplate": { - "spec": { - "template": { - "spec": { - "containers": [ - { - "name": "${NAME}", - "image": "image-registry.openshift-image-registry.svc:5000/${PROJECT}/gwells-${ENV_NAME}:${TAG}", - "imagePullPolicy": "Always", - "command": [ - "python", - "backend/manage.py", - "${COMMAND}" - ], - "env": [ - { - "name": "DATABASE_SERVICE_NAME", - "value": "gwells-pg12-${ENV_NAME}" - }, - { - "name": "DATABASE_ENGINE", - "value": "postgresql" - }, - { - "name": "DATABASE_NAME", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pg12-${ENV_NAME}", - "key": "database-name" - } - } - }, - { - "name": "DATABASE_USER", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pg12-${ENV_NAME}", - "key": "database-user" - } - } - }, - { - "name": "DATABASE_PASSWORD", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pg12-${ENV_NAME}", - "key": "database-password" - } - } - }, - { - "name": "DATABASE_SCHEMA", - "value": "public" - }, - { - "name": "MINIO_ACCESS_KEY", - "valueFrom": { - "secretKeyRef": { - "name": "minio-access-parameters-${ENV_NAME}", - "key": "MINIO_ACCESS_KEY" - } - } - }, - { - "name": "MINIO_SECRET_KEY", - "valueFrom": { - "secretKeyRef": { - "name": "minio-access-parameters-${ENV_NAME}", - "key": "MINIO_SECRET_KEY" - } - } - }, - { - "name": "S3_PUBLIC_ACCESS_KEY", - "valueFrom": { - "secretKeyRef": { - "name": "minio-access-parameters-${ENV_NAME}", - "key": "S3_PUBLIC_ACCESS_KEY" - } - } - }, - { - "name": "S3_PUBLIC_SECRET_KEY", - "valueFrom": { - "secretKeyRef": { - "name": "minio-access-parameters-${ENV_NAME}", - "key": "S3_PUBLIC_SECRET_KEY" - } - } - }, - { - "name": "S3_HOST", - "valueFrom": { - "secretKeyRef": { - "name": "minio-access-parameters-${ENV_NAME}", - "key": "S3_HOST" - } - } - }, - { - "name": "S3_ROOT_BUCKET", - "valueFrom": { - "secretKeyRef": { - "name": "minio-access-parameters-${ENV_NAME}", - "key": "S3_ROOT_BUCKET" - } - } - }, - { - "name": "S3_PRIVATE_HOST", - "valueFrom": { - "configMapKeyRef": { - "key": "S3_PRIVATE_HOST", - "name": "gwells-global-config-${ENV_NAME}" - } - } - }, - { - "name": "S3_WELL_EXPORT_BUCKET", - "valueFrom": { - "configMapKeyRef": { - "key": "S3_WELL_EXPORT_BUCKET", - "name": "gwells-global-config-${ENV_NAME}" - } - } - }, - { - "name": "S3_PRIVATE_BUCKET", - "valueFrom": { - "configMapKeyRef": { - "key": "S3_PRIVATE_BUCKET", - "name": "gwells-global-config-${ENV_NAME}" - } - } - } - ], - "envFrom": [ - { - "configMapRef": { - "name": "gwells-global-config-${ENV_NAME}" - } - } - ] - } - ], - "restartPolicy": "OnFailure" - } - } - } - } - } - } - ] -} \ No newline at end of file diff --git a/gwells/openshift/ocp4/jobs/import-licences/import-licences.cj.json b/gwells/openshift/ocp4/jobs/import-licences/import-licences.cj.json deleted file mode 100644 index e1455a3eb..000000000 --- a/gwells/openshift/ocp4/jobs/import-licences/import-licences.cj.json +++ /dev/null @@ -1,110 +0,0 @@ -{ - "kind": "Template", - "apiVersion": "v1", - "metadata": {}, - "parameters": [ - { - "name": "ENV_NAME", - "required": true - }, - { - "name": "PROJECT", - "required": true - }, - { - "name": "TAG", - "required": false, - "value": "${ENV_NAME}" - }, - { - "name": "NAME", - "required": true - }, - { - "name": "COMMAND", - "required": true - }, - { - "name": "SCHEDULE", - "required": true - } - ], - "objects": [ - { - "apiVersion": "batch/v1", - "kind": "CronJob", - "metadata": { - "name": "${NAME}" - }, - "spec": { - "schedule": "${SCHEDULE}", - "concurrencyPolicy": "Forbid", - "jobTemplate": { - "spec": { - "template": { - "spec": { - "containers": [ - { - "name": "${NAME}", - "image": "image-registry.openshift-image-registry.svc:5000/${PROJECT}/gwells-${ENV_NAME}:${TAG}", - "imagePullPolicy": "Always", - "command": [ - "python", - "backend/manage.py", - "${COMMAND}" - ], - "env": [ - { - "name": "DATABASE_SERVICE_NAME", - "value": "gwells-pg12-${ENV_NAME}" - }, - { - "name": "DATABASE_ENGINE", - "value": "postgresql" - }, - { - "name": "DATABASE_NAME", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pg12-${ENV_NAME}", - "key": "database-name" - } - } - }, - { - "name": "DATABASE_USER", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pg12-${ENV_NAME}", - "key": "database-user" - } - } - }, - { - "name": "DATABASE_PASSWORD", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pg12-${ENV_NAME}", - "key": "database-password" - } - } - } - ], - "envFrom": [ - { - "configMapRef": { - "name": "gwells-global-config-${ENV_NAME}" - } - } - ] - } - ], - "restartPolicy": "OnFailure" - } - } - } - } - } - } - ] -} diff --git a/gwells/openshift/ocp4/jobs/minio-backup/Dockerfile b/gwells/openshift/ocp4/jobs/minio-backup/Dockerfile deleted file mode 100644 index 9624ac23f..000000000 --- a/gwells/openshift/ocp4/jobs/minio-backup/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM alpine:latest -USER root -RUN apk add --update \ - curl rsync \ - && rm -rf /var/cache/apk/* - -# install restic -RUN curl -Lo restic.bz2 https://github.com/restic/restic/releases/download/v0.9.4/restic_0.9.4_linux_amd64.bz2 \ - && bzip2 -d restic.bz2 \ - && mv restic /usr/bin/restic \ - && chmod +x /usr/bin/restic - -COPY ./entrypoint.sh / -ENTRYPOINT ["/entrypoint.sh"] -USER 1001 diff --git a/gwells/openshift/ocp4/jobs/minio-backup/README.md b/gwells/openshift/ocp4/jobs/minio-backup/README.md deleted file mode 100644 index 89f7a84d5..000000000 --- a/gwells/openshift/ocp4/jobs/minio-backup/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# S3/minio document backup - -This job connects to a PVC containing files to backup and makes a copy to a second PVC. It then syncs the files to NFS storage using restic (see https://restic.net/). - -This leaves 3 copies of the data: the original (in-use) PVC that is mounted by the minio service, -a backup PVC in the cluster that is only mounted by the backup job pods during backup (e.g. gwells-documents-staging-backup-vol), and the provisioned NFS storage (restic repository). - - -Example usage in Jenkins pipeline: - -```groovy -def docBackupCronjob = openshift.process("-f", - "openshift/jobs/minio-backup.cj.yaml", - - // values for the environment that this job will run in - "NAME_SUFFIX=${prodSuffix}", - "NAMESPACE=${prodProject}", - - // this is the backup image version created by the build config in this folder (minio-backup.bc.yaml) - "VERSION=v1.0.0", - "SCHEDULE='15 12 * * *'", - - // the name of the target backup PVC for the restic repository. This will be the 3rd backup. - // the 2nd backup will be a PVC created by the minio-backup.cj.yaml template. - // GWELLS uses a provisioned NFS storage claim for this value. - "DEST_PVC=${backupPVC}", - "SOURCE_PVC=${minioDataPVC}", // the name of the minio data PVC - "PVC_SIZE=40Gi" // you may need enough space to hold a few copies of files on-disk. -) -``` diff --git a/gwells/openshift/ocp4/jobs/minio-backup/entrypoint.sh b/gwells/openshift/ocp4/jobs/minio-backup/entrypoint.sh deleted file mode 100755 index ac21d14f3..000000000 --- a/gwells/openshift/ocp4/jobs/minio-backup/entrypoint.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/sh -set -euo pipefail -IFS=$'\n\t' -[ "${VERBOSE:-}" != true ]|| set -x - -# PVC mount and folder variables, removing any trailing slashes (%/) -# -DEST_MNT=${DEST_MNT:-/backup} -DEST_MNT=${DEST_MNT%/} -# -DEST_DIR=${DEST_MNT}/documents -TMP_BK=${NEW_BK:-${DEST_DIR}/bk-tmp} -NEW_BK=${NEW_BK:-${DEST_DIR}/bk} - -du -hd 1 ${DEST_MNT} - -# Check if NFS repository is initialized. If not, initialize it. -# RESTIC_PASSWORD is required. -if ! restic -r /mnt/dest/gwells-documents snapshots > /dev/null 2>&1; then - restic -r /mnt/dest/gwells-documents init ; fi - -# Backup files using delta (de-duplicate) and encryption -restic --cache-dir ${DEST_DIR}/.cache -r /mnt/dest/gwells-documents backup ${NEW_BK} - -# Clean up old snapshots. -# As an example, the following arguments: -# --keep-daily 7 --keep-weekly 5 --keep-monthly 12 --keep-yearly 2 -# will keep the most recent 7 daily snapshots, 5 weekly, 12 monthly, and 2 yearly snapshots. -# The rest will be pruned. -restic -r /mnt/dest/gwells-documents forget --keep-daily 7 --keep-weekly 5 --keep-monthly 12 --keep-yearly 10 --prune - -# check repository integrity before exiting -restic -r /mnt/dest/gwells-documents check diff --git a/gwells/openshift/ocp4/jobs/minio-backup/minio-backup.bc.yaml b/gwells/openshift/ocp4/jobs/minio-backup/minio-backup.bc.yaml deleted file mode 100644 index c037639a5..000000000 --- a/gwells/openshift/ocp4/jobs/minio-backup/minio-backup.bc.yaml +++ /dev/null @@ -1,77 +0,0 @@ -apiVersion: v1 -kind: Template -metadata: {} -parameters: - - name: VERSION - value: v1.0.0 - - name: NAMESPACE - value: 26e83e-tools -objects: - - apiVersion: image.openshift.io/v1 - kind: ImageStream - metadata: - annotations: - openshift.io/generated-by: OpenShiftNewBuild - creationTimestamp: null - labels: - name: gwells-documents-backup - component: backups - part-of: gwells - name: gwells-documents-backup - namespace: "${NAMESPACE}" - spec: - lookupPolicy: - local: false - status: - dockerImageRepository: "" - - apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - annotations: - openshift.io/generated-by: OpenShiftNewBuild - creationTimestamp: null - labels: - name: "gwells-documents-backup-${VERSION}" - component: backups - version: ${VERSION} - part-of: gwells - name: "gwells-documents-backup-${VERSION}" - namespace: "${NAMESPACE}" - spec: - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: gwells-documents-backup:${VERSION} - postCommit: {} - resources: {} - source: - git: - uri: https://github.com/bcgov/gwells.git - ref: steve/backups - contextDir: openshift/jobs/minio-backup - dockerfile: | - FROM alpine:latest - USER root - RUN apk add --update \ - curl rsync \ - && rm -rf /var/cache/apk/* - RUN curl -Lo restic.bz2 https://github.com/restic/restic/releases/download/v0.9.4/restic_0.9.4_linux_amd64.bz2 \ - && bzip2 -d restic.bz2 \ - && mv restic /usr/bin/restic \ - && chmod +x /usr/bin/restic - COPY ./entrypoint.sh / - ENTRYPOINT ["/entrypoint.sh"] - USER 1001 - type: Dockerfile - strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: alpine:3.7 - namespace: openshift - type: Docker - triggers: - - type: ConfigChange - status: - lastVersion: 0 diff --git a/gwells/openshift/ocp4/jobs/minio-backup/minio-backup.cj.yaml b/gwells/openshift/ocp4/jobs/minio-backup/minio-backup.cj.yaml deleted file mode 100644 index d0c725335..000000000 --- a/gwells/openshift/ocp4/jobs/minio-backup/minio-backup.cj.yaml +++ /dev/null @@ -1,91 +0,0 @@ -apiVersion: v1 -kind: Template -metadata: {} -parameters: - - name: NAME_SUFFIX - required: true - - name: NAMESPACE - required: true - value: 26e83e-tools - - name: PVC_SIZE - value: "15Gi" - - name: DEST_PVC - required: true - - name: VERSION - value: v1.0.0 - - name: SCHEDULE - value: "15 3 * * *" - required: false - - name: SOURCE_PVC - required: true -objects: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - finalizers: - - kubernetes.io/pvc-protection - labels: - app: "gwells-${NAME_SUFFIX}" - name: "gwells-documents-${NAME_SUFFIX}-backup" - component: backups - part-of: gwells - name: gwells-documents-${NAME_SUFFIX}-backup - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: ${PVC_SIZE} - storageClassName: netapp-file-backup - - apiVersion: batch/v1 - kind: CronJob - metadata: - name: gwells-documents-${NAME_SUFFIX}-backup - namespace: "${NAMESPACE}" - spec: - concurrencyPolicy: Forbid - failedJobsHistoryLimit: 3 - jobTemplate: - metadata: - labels: - app: "gwells-${NAME_SUFFIX}" - name: "gwells-documents-${NAME_SUFFIX}-backup" - component: backups - part-of: gwells - spec: - backoffLimit: 10 - template: - spec: - activeDeadlineSeconds: 3600 - containers: - - image: image-registry.openshift-image-registry.svc:5000/${NAMESPACE}/gwells-documents-backup:${VERSION} - imagePullPolicy: Always - name: minio-backup - env: - - name: RESTIC_PASSWORD - valueFrom: - secretKeyRef: - key: RESTIC_PASSWORD - name: minio-access-parameters-${NAME_SUFFIX} - volumeMounts: - - mountPath: /mnt/dest/ - name: dest - - mountPath: /backup - name: backup - - mountPath: /mnt/source/ - name: source - readOnly: true - restartPolicy: OnFailure - terminationGracePeriodSeconds: 30 - volumes: - - name: source - persistentVolumeClaim: - claimName: "${SOURCE_PVC}" - - name: dest - persistentVolumeClaim: - claimName: "${DEST_PVC}" - - name: backup - persistentVolumeClaim: - claimName: "gwells-documents-${NAME_SUFFIX}-backup" - schedule: ${SCHEDULE} - successfulJobsHistoryLimit: 3 diff --git a/gwells/openshift/ocp4/jobs/postgres-backup-nfs/README.md b/gwells/openshift/ocp4/jobs/postgres-backup-nfs/README.md deleted file mode 100644 index 475e0365f..000000000 --- a/gwells/openshift/ocp4/jobs/postgres-backup-nfs/README.md +++ /dev/null @@ -1 +0,0 @@ -This job backs up the GWELLS database to a provisioned NFS volume using https://github.com/BCDevOps/backup-container. diff --git a/gwells/openshift/ocp4/jobs/postgres-backup-nfs/postgres-backup.bc.yaml b/gwells/openshift/ocp4/jobs/postgres-backup-nfs/postgres-backup.bc.yaml deleted file mode 100644 index 2e893e84d..000000000 --- a/gwells/openshift/ocp4/jobs/postgres-backup-nfs/postgres-backup.bc.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -kind: Template -apiVersion: v1 -metadata: - name: "${NAME}-build-template" - creationTimestamp: -parameters: -- name: NAME - displayName: Name - description: The name assigned to all of the resources defined in this template. - required: true - value: backup -- name: GIT_REPO_URL - displayName: Git Repo URL - description: The URL to your GIT repo. - required: true - value: https://github.com/BCDevOps/backup-container.git -- name: GIT_REF - displayName: Git Reference - description: The git reference or branch. - required: true - value: master -- name: SOURCE_CONTEXT_DIR - displayName: Source Context Directory - description: The source context directory. - required: false - value: "/docker" -- name: SOURCE_IMAGE_KIND - displayName: Source Image Kind - description: The 'kind' (type) of the source image; typically ImageStreamTag, or - DockerImage. - required: true - value: DockerImage -- name: SOURCE_IMAGE_NAME - displayName: Source Image Name - description: The name of the source image. - required: true - value: image-registry.openshift-image-registry.svc:5000/26e83e-tools/crunchy-postgres-gis -- name: SOURCE_IMAGE_TAG - displayName: Source Image Tag - description: The tag of the source image. - required: true - value: centos7-12.4-3.0-4.5.0 -- name: DOCKER_FILE_PATH - displayName: Docker File Path - description: The path to the docker file defining the build. - required: false - value: Dockerfile -- name: OUTPUT_IMAGE_TAG - displayName: Output Image Tag - description: The tag given to the built image. - required: true - value: latest -objects: -- kind: ImageStream - apiVersion: v1 - metadata: - name: "${NAME}" -- kind: BuildConfig - apiVersion: v1 - metadata: - name: "${NAME}" - labels: - app: "${NAME}" - spec: - triggers: - - type: ImageChange - - type: ConfigChange - runPolicy: Serial - source: - type: Git - git: - uri: "${GIT_REPO_URL}" - ref: "${GIT_REF}" - contextDir: "${SOURCE_CONTEXT_DIR}" - strategy: - type: Docker - dockerStrategy: - from: - kind: "${SOURCE_IMAGE_KIND}" - name: "${SOURCE_IMAGE_NAME}:${SOURCE_IMAGE_TAG}" - dockerfilePath: "${DOCKER_FILE_PATH}" - output: - to: - kind: ImageStreamTag - name: "${NAME}:${OUTPUT_IMAGE_TAG}" \ No newline at end of file diff --git a/gwells/openshift/ocp4/jobs/postgres-backup-nfs/postgres-backup.cj.yaml b/gwells/openshift/ocp4/jobs/postgres-backup-nfs/postgres-backup.cj.yaml deleted file mode 100644 index 6e9f733ba..000000000 --- a/gwells/openshift/ocp4/jobs/postgres-backup-nfs/postgres-backup.cj.yaml +++ /dev/null @@ -1,252 +0,0 @@ ---- -apiVersion: v1 -kind: Template -metadata: - annotations: - description: "Scheduled Task to perform a Database Backup" - tags: "cronjob,backup" - labels: - app: ${TARGET}-backup - cronjob: ${TARGET}-backup - component: backups - part-of: gwells - template: "${JOB_NAME}-config-template" - name: "${JOB_NAME}-cronjob-template" -parameters: - - name: "NAMESPACE" - required: true - - name: "TARGET" - displayName: "Database name (deployment config, not pod name)" - description: "The name of the database, by deployment config, to be backed up." - required: true - - name: "JOB_NAME" - displayName: "Job Name" - description: "Name of the Scheduled Job to Create." - value: "backup" - required: true - - name: "SCHEDULE" - displayName: "Cron Schedule" - description: "Cron Schedule to Execute the Job (in UTC)" - # 11:00 UTC = 3:00 AM PDT - value: "27 9 * * *" - required: true - - name: "PVC_NAME" - required: true - - name: "SOURCE_IMAGE_NAME" - displayName: "Source Image Name" - description: "The name of the image to use for this resource." - required: false - value: "postgres-backup-container" - - name: "IMAGE_NAMESPACE" - displayName: "Image Namespace" - description: "The namespace of the OpenShift project containing the imagestream for the application." - required: false - value: "26e83e-tools" - - name: "TAG_NAME" - displayName: "Environment TAG name" - description: "The TAG name for this environment, e.g., dev, test, prod" - required: false - value: "v1.0.0" - - name: "DEFAULT_PORT" - displayName: "Database Service Port" - description: "The configured port for the database service" - required: false - value: "5432" - - name: "DATABASE_NAME" - displayName: "Database Name" - description: "The name of the database." - required: false - value: "gwells" - - name: "BACKUP_STRATEGY" - displayName: "Backup Strategy" - description: "The strategy to use for backups; for example daily, or rolling." - required: false - value: "rolling" - - name: "BACKUP_DIR" - displayName: "The root backup directory" - description: "The name of the root backup directory" - required: false - value: "/gwells-db-backups/" - - name: "NUM_BACKUPS" - displayName: "The number of backup files to be retained" - description: "The number of backup files to be retained. Used for the `daily` backup strategy. Ignored when using the `rolling` backup strategy." - required: false - value: "5" - - name: "DAILY_BACKUPS" - displayName: "Number of Daily Backups to Retain" - description: "The number of daily backup files to be retained. Used for the `rolling` backup strategy." - required: false - value: "7" - - name: "WEEKLY_BACKUPS" - displayName: "Number of Weekly Backups to Retain" - description: "The number of weekly backup files to be retained. Used for the `rolling` backup strategy." - required: false - value: "4" - - name: "MONTHLY_BACKUPS" - displayName: "Number of Monthly Backups to Retain" - description: "The number of monthly backup files to be retained. Used for the `rolling` backup strategy." - required: false - value: "12" - - name: "SUCCESS_JOBS_HISTORY_LIMIT" - displayName: "Successful Job History Limit" - description: "The number of successful jobs that will be retained." - value: "5" - required: false - - name: "FAILED_JOBS_HISTORY_LIMIT" - displayName: "Failed Job History Limit" - description: "The number of failed jobs that will be retained." - value: "2" - required: false - - name: "JOB_BACKOFF_LIMIT" - displayName: "Job Backoff Limit" - description: "The number of attempts to try for a successful job outcome." - value: "0" - required: false - - name: "JOB_DEADLINE_SECONDS" - displayName: "Job deadline (seconds)" - description: "The maximum amount of time to let this job run." - value: "600" - required: false -objects: - - apiVersion: v1 - data: - BACKUP_STRATEGY: ${BACKUP_STRATEGY} - DAILY_BACKUPS: ${DAILY_BACKUPS} - DATABASE_SERVICE_NAME: ${TARGET} - DEFAULT_PORT: ${DEFAULT_PORT} - MONTHLY_BACKUPS: ${MONTHLY_BACKUPS} - NUM_BACKUPS: ${NUM_BACKUPS} - POSTGRESQL_DATABASE: ${DATABASE_NAME} - WEEKLY_BACKUPS: ${WEEKLY_BACKUPS} - kind: ConfigMap - metadata: - labels: - app: ${TARGET}-backup - cronjob: ${TARGET}-backup - template: "${JOB_NAME}-config-template" - name: ${TARGET}-backup - namespace: ${NAMESPACE} - - apiVersion: batch/v1 - kind: CronJob - metadata: - name: ${TARGET}-nfs-backup - namespace: ${NAMESPACE} - labels: - name: ${TARGET}-backup - cronjob: ${TARGET}-backup - part-of: gwells - component: backups - template: "${JOB_NAME}-config-template" - spec: - concurrencyPolicy: Forbid - failedJobsHistoryLimit: ${{FAILED_JOBS_HISTORY_LIMIT}} - jobTemplate: - metadata: - creationTimestamp: null - labels: - app: ${TARGET}-backup - cronjob: ${TARGET}-backup - component: backups - template: "${JOB_NAME}-config-template" - spec: - backoffLimit: ${{JOB_BACKOFF_LIMIT}} - template: - metadata: - creationTimestamp: null - spec: - activeDeadlineSeconds: ${{JOB_DEADLINE_SECONDS}} - containers: - - command: - - /bin/bash - - -c - - /backup.sh -1 - env: - - name: BACKUP_DIR - value: /backups/ - - name: BACKUP_STRATEGY - valueFrom: - configMapKeyRef: - key: BACKUP_STRATEGY - name: ${TARGET}-backup - - name: NUM_BACKUPS - valueFrom: - configMapKeyRef: - key: NUM_BACKUPS - name: ${TARGET}-backup - optional: true - - name: DAILY_BACKUPS - valueFrom: - configMapKeyRef: - key: DAILY_BACKUPS - name: ${TARGET}-backup - optional: true - - name: WEEKLY_BACKUPS - valueFrom: - configMapKeyRef: - key: WEEKLY_BACKUPS - name: ${TARGET}-backup - optional: true - - name: MONTHLY_BACKUPS - valueFrom: - configMapKeyRef: - key: MONTHLY_BACKUPS - name: ${TARGET}-backup - optional: true - - name: DATABASE_SERVICE_NAME - valueFrom: - configMapKeyRef: - key: DATABASE_SERVICE_NAME - name: ${TARGET}-backup - - name: DEFAULT_PORT - valueFrom: - configMapKeyRef: - key: DEFAULT_PORT - name: ${TARGET}-backup - optional: true - - name: POSTGRESQL_DATABASE - valueFrom: - configMapKeyRef: - key: POSTGRESQL_DATABASE - name: ${TARGET}-backup - - name: POSTGRESQL_USER - value: postgres - - name: POSTGRESQL_PASSWORD - valueFrom: - secretKeyRef: - key: PG_ROOT_PASSWORD - name: crunchy-db-credentials - - name: DATABASE_NAME - valueFrom: - configMapKeyRef: - key: POSTGRESQL_DATABASE - name: ${TARGET}-backup - - name: DATABASE_USER - value: postgres - - name: DATABASE_PASSWORD - valueFrom: - secretKeyRef: - key: PG_ROOT_PASSWORD - name: crunchy-db-credentials - image: image-registry.openshift-image-registry.svc:5000/${IMAGE_NAMESPACE}/${SOURCE_IMAGE_NAME}:${TAG_NAME} - imagePullPolicy: Always - name: backup-cronjob - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /backups/ - name: backup - dnsPolicy: ClusterFirst - restartPolicy: Never - schedulerName: default-scheduler - securityContext: {} - serviceAccount: default - serviceAccountName: default - terminationGracePeriodSeconds: 30 - volumes: - - name: backup - persistentVolumeClaim: - claimName: ${PVC_NAME} - schedule: ${SCHEDULE} - successfulJobsHistoryLimit: ${{SUCCESS_JOBS_HISTORY_LIMIT}} - suspend: false diff --git a/gwells/openshift/ocp4/jobs/update-aquifer/update-aquifer.cj.json b/gwells/openshift/ocp4/jobs/update-aquifer/update-aquifer.cj.json deleted file mode 100644 index e1455a3eb..000000000 --- a/gwells/openshift/ocp4/jobs/update-aquifer/update-aquifer.cj.json +++ /dev/null @@ -1,110 +0,0 @@ -{ - "kind": "Template", - "apiVersion": "v1", - "metadata": {}, - "parameters": [ - { - "name": "ENV_NAME", - "required": true - }, - { - "name": "PROJECT", - "required": true - }, - { - "name": "TAG", - "required": false, - "value": "${ENV_NAME}" - }, - { - "name": "NAME", - "required": true - }, - { - "name": "COMMAND", - "required": true - }, - { - "name": "SCHEDULE", - "required": true - } - ], - "objects": [ - { - "apiVersion": "batch/v1", - "kind": "CronJob", - "metadata": { - "name": "${NAME}" - }, - "spec": { - "schedule": "${SCHEDULE}", - "concurrencyPolicy": "Forbid", - "jobTemplate": { - "spec": { - "template": { - "spec": { - "containers": [ - { - "name": "${NAME}", - "image": "image-registry.openshift-image-registry.svc:5000/${PROJECT}/gwells-${ENV_NAME}:${TAG}", - "imagePullPolicy": "Always", - "command": [ - "python", - "backend/manage.py", - "${COMMAND}" - ], - "env": [ - { - "name": "DATABASE_SERVICE_NAME", - "value": "gwells-pg12-${ENV_NAME}" - }, - { - "name": "DATABASE_ENGINE", - "value": "postgresql" - }, - { - "name": "DATABASE_NAME", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pg12-${ENV_NAME}", - "key": "database-name" - } - } - }, - { - "name": "DATABASE_USER", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pg12-${ENV_NAME}", - "key": "database-user" - } - } - }, - { - "name": "DATABASE_PASSWORD", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pg12-${ENV_NAME}", - "key": "database-password" - } - } - } - ], - "envFrom": [ - { - "configMapRef": { - "name": "gwells-global-config-${ENV_NAME}" - } - } - ] - } - ], - "restartPolicy": "OnFailure" - } - } - } - } - } - } - ] -} diff --git a/gwells/openshift/ocp4/migration/README.md b/gwells/openshift/ocp4/migration/README.md deleted file mode 100644 index e26b2d863..000000000 --- a/gwells/openshift/ocp4/migration/README.md +++ /dev/null @@ -1,231 +0,0 @@ -# Migration - -## Migration steps -Here are the steps we need to do to migrate from the Openshift Container Platform (OCP) version 3 to version 4. -To avoid confusion, let's call OCP3 Pathfinder and OCP4 Silver. - -### Before you start -You need: -- [ ] Migrator CLI Deployment Config - - Used to migrate the database (gwells and tileserver views) -- [ ] Minio mirror `mc mirror` setup from Silver to Pathfinder - - Make sure we have enough storage in Silver, and turn on `mc mirror` to rsync/mirror the files continuously until migration day - -### Migration checklist -- [x] Make sure all services are down to prevent users from entering data and uploading documents -- [x] [Migrate database](#running-the-database-migration-script) -- [x] Make sure the settings for AWS (public documents) on Silver are working -- [x] Change or copy `gwells-maintenance` reverse proxy config to gwells on Pathfinder -- [x] Stop all scheduled jobs on pathfinder -- [x] Activate minio backups -- [x] Double check everything on Silver. -- [x] Make sure no traffic is getting processed in Pathfinder. - -### Next Steps -- [ ] Create a ticket with INFRA to change their reverse proxy for `apps.nrs.gov.bc.ca/gwells` from Pathfinder to Silver -- [ ] Remove reverse proxy on Pathfinder to Silver and convert into a redirect. This will make it noticeable to anyone who uses the pathfinder URLs (APIs), and give them a chance to update their URLs. -- [x] [Remove the extra db user if needed](#remove-extra-user) -- [ ] Bring over db backups from OCP3 -- [ ] Update Jenkinsfile for the URL prod suffix -- [ ] Use NRS object storage (create a ticket) - - -## Migration tools - -### Setting up the Migrator CLI with the database migration scripts -**Migration scripts** -```bash -# Set namespace (will be used in the next script) -NAMESPACE4="26e83e-dev" - -# Create a config map from the migration scripts -oc -n $NAMESPACE4 create configmap migration-scripts \ ---from-file=scripts/ -``` - -**migrator-cli (importer.dc.yaml)** -```bash -# Deploy migrator dc with oc cli -oc process -f importer.dc.yaml -p NAMESPACE=$NAMESPACE4 | oc apply -f - -``` - -### Running the migration script -**NOTE:** You need your Pathfinder auth token and Silver auth token. Have it handy beforehand. - -Inside the `migrator-cli` pod: -```/bin/bash -cd scripts - -# Run the script -# This script does all the migration steps. -# If it fails, you may run one of the smaller migration scripts it calls and continue from there. -# It doesn't accept environment as a first param; instead it asks for it -./do_migration.sh |& tee /tmp/migration.log -``` - -#### Smaller migration scripts -**`migrate_database.sh`** -```bash -# This simply calls the two db migration scripts -# db_dump_and_copy.sh and db_copy_and_restore.sh -# It doesn't accept environment as a first param; instead it asks for it -./migrate_database.sh -``` - -**`db_dump_and_copy.sh`** -```bash -# Run `pg_dump` (custom postgres format) on Silver, and copy the file on this volume -./db_dump_and_copy.sh [test/prod] -``` - -**`db_copy_and_restore.sh`** -```bash -# Copy the dump file from this volume onto the postgres pod volume -# Run `pg_restore` on postgres pod -./db_copy_and_restore.sh [test/prod] -``` - -**`migrate_minio.sh`** -```bash -# Run `mc mirror` on Pathfinder to copy all the buckets and files/objects to Silver. -# Also runs an `mc diff` to check if there are discrepancies -./migrate_minio.sh [test/prod] -``` - -**`activate_proxy.sh`** -```bash -# Switch the `gwells-staging` routes on Pathfinder to proxy pass to Silver -./activate_proxy.sh [test/prod] - -# To switch them back, add `--revert` at the end -./activate_proxy.sh [test/prod] --revert -``` - -**`scale_down.sh`** -```bash -# Scales down the `gwells-staging` application on Silver -./activate_proxy.sh [test/prod] - -# To scale it up, add `--revert` at the end -./activate_proxy.sh [test/prod] --revert -``` - -**`scale_up.sh`** -```bash -# Scales up the `gwells-staging` application on Silver -./activate_proxy.sh [test/prod] - -# To scale it up, add `--revert` at the end -./activate_proxy.sh [test/prod] --revert -``` - -#### Issues, tips and tricks -**Login to the migrator-cli pod terminal quickly** -I use a helper script named `rsh_migrator_cli.sh` - -```bash -# Make it executable -chmod +x rsh_migrator_cli.sh -``` - -```bash -# RSH into migrator-cli pod, first param is your namespace -./rsh_migrator_cli.sh 26e83e-test -``` - -**If you run into authorization issues while running one of the smaller migration scripts** q -```bash -# Sample unauthorized message -error: You must be logged in to the server (Unauthorized) -``` -Just delete the kubeconfig files `/tmp/KUBECONFIG` and `/tmp/KUBECONFIGSILVER` - -**Database issues** -If there is an existing session in the database, the migration script could fail with the following messages: -```bash -dropdb: error: database removal failed: ERROR: database "gwells" is being accessed by other users -DETAIL: There is 1 other session using the database. - -createdb: error: database creation failed: ERROR: database "gwells" already exists -``` - -You may have to terminate all connections and re-run `./db_copy_and_restore.sh` -```sql -select pg_terminate_backend(pid) from pg_stat_activity where datname='gwells'; -``` - -**Minio issues** -Minio alias to silver must be setup on pathfinder -```bash -# Download mc cli to /opt/minio/mc -curl https://dl.min.io/client/mc/release/linux-amd64/mc -o /opt/minio/mc - -# Set execute permission, may not be needed -chmod +x /opt/minio/mc - -# Set sync target alias -/opt/minio/mc -C /opt/minio/.mc alias set target https://minio-on-silver.com your--access-id your-access-secret - -# Sync Minio data to the target service bucket, assuming Minio data is stored in /data -/opt/minio/mc -C /opt/minio/.mc mirror /data target/ - -``` - - -```bash -mc: Unable to create bucket at `silver/.minio.sys`. Bucket name contains invalid characters -``` - -You may have to move the .minio.sys folder that's in `/opt/minio/s3/data/.minio.sys`. This is just metadata so you can also delete it. -The script *does* check for this file and moves it, so you may not encounter this issue. - -`./migrate_minio.sh` also does an `mc diff` as a sanity check. Normally, you won't see any outputs between the lines below: -```bash ------------------------------------------------------------------------------- -Found pod gwells-minio-4-djptd on moe-gwells-test -Starting minio client (mc) mirror... ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- -Minio mirror took 0 minutes and 18 seconds. -Please check the mc diff result (/tmp/mc_diff.log) ------------------------------------------------------------------------------- -``` - -BUT if you encounter discrepancies with the data...like the example below. You may need to investigate further. -```bash ------------------------------------------------------------------------------- -Found pod gwells-minio-4-djptd on moe-gwells-test -Starting minio client (mc) mirror... ------------------------------------------------------------------------------- -`/opt/minio/s3/data/gwells-export-test/api/v1/gis/wells.json` -> `silver/gwells-export-test/api/v1/gis/wells.json` -`/opt/minio/s3/data/gwells-export-test/api/v1/gis/lithology.json` -> `silver/gwells-export-test/api/v1/gis/lithology.json` -Total: 0 B, Transferred: 960.46 MiB, Speed: 112.39 MiB/s -command terminated with exit code 1 -! https://gwells-docs-staging.apps.silver.devops.gov.bc.ca/gwells-export-test/api/v1/gis/lithology.json -! https://gwells-docs-staging.apps.silver.devops.gov.bc.ca/gwells-export-test/api/v1/gis/wells.json ------------------------------------------------------------------------------- -Minio mirror took 0 minutes and 18 seconds. -Please check the mc diff result (/tmp/mc_diff.log) - -``` - -Here is the `mc diff` legend -``` -LEGEND: - < - object is only in source. - > - object is only in destination. - ! - newer object is in source. -``` - -If you encounter `!`, you may need to run `mc cp` like the following: -```bash -/opt/minio/mc -C /opt/minio/.mc cp /opt/minio/s3/data/gwells-export-test/api/v1/gis/wells.json silver/gwells-export-test/api/v1/gis/wells.json -``` - -### Remove extra user -If you have two users in the database, remove the extra one by reassigning its privileges to the primary user. Double check which user is the primary one. -```sql -REASSIGN OWNED BY TO -DROP OWNED BY -DROP USER -``` \ No newline at end of file diff --git a/gwells/openshift/ocp4/migration/importer.dc.yaml b/gwells/openshift/ocp4/migration/importer.dc.yaml deleted file mode 100644 index aaf2d8a25..000000000 --- a/gwells/openshift/ocp4/migration/importer.dc.yaml +++ /dev/null @@ -1,91 +0,0 @@ -apiVersion: v1 -kind: Template -labels: - template: migrator-cli -metadata: - creationTimestamp: null - name: migrator-cli -parameters: - - name: NAMESPACE - required: true - - name: NAME - value: migrator-cli - - description: Storage class for PVCs. - displayName: Storage class for PVCs. - name: STORAGE_CLASS - value: netapp-file-standard - - description: Size of volume. - displayName: Size of volume. - name: VOLUME_CAPACITY - value: 10Gi -objects: - - kind: DeploymentConfig - apiVersion: apps.openshift.io/v1 - metadata: - name: ${NAME} - namespace: ${NAMESPACE} - labels: - app: ${NAME} - task: migration - spec: - strategy: - type: Recreate - resources: {} - activeDeadlineSeconds: 21600 - triggers: - - type: ConfigChange - replicas: 1 - revisionHistoryLimit: 10 - selector: - app: ${NAME} - deploymentconfig: ${NAME} - task: migration - template: - metadata: - labels: - app: ${NAME} - deploymentconfig: ${NAME} - task: migration - spec: - volumes: - - name: cli-db-volume - persistentVolumeClaim: - claimName: ${NAME} - - name: migration-scripts - configMap: - name: migration-scripts - defaultMode: 0777 - containers: - - name: ${NAME} - image: >- - image-registry.openshift-image-registry.svc:5000/openshift/cli@sha256:cc4eaab57638fe0b20e449dcc94ae5325dfd9cb69dc631b28420be85deb32e60 - command: - - /bin/bash - - '-c' - - 'trap : TERM INT; sleep 7d & wait' - resources: {} - volumeMounts: - - name: cli-db-volume - mountPath: /mnt - - name: migration-scripts - mountPath: /scripts - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - imagePullPolicy: Always - restartPolicy: Always - terminationGracePeriodSeconds: 30 - dnsPolicy: ClusterFirst - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: ${NAME} - labels: - app: ${NAME} - task: migration - spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: ${VOLUME_CAPACITY} - storageClassName: ${STORAGE_CLASS} diff --git a/gwells/openshift/ocp4/migration/rsh_migrator_cli.sh b/gwells/openshift/ocp4/migration/rsh_migrator_cli.sh deleted file mode 100755 index 89d827e3d..000000000 --- a/gwells/openshift/ocp4/migration/rsh_migrator_cli.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -MIGRATOR_POD=$(oc get pods -n "$1" | grep "migrator-cli" | grep Running | head -1 | awk '{print $1}') -echo "oc rsh -n $1 $MIGRATOR_POD /bin/bash" -oc rsh -n "$1" "$MIGRATOR_POD" /bin/bash \ No newline at end of file diff --git a/gwells/openshift/ocp4/migration/scripts/activate_proxy.sh b/gwells/openshift/ocp4/migration/scripts/activate_proxy.sh deleted file mode 100755 index 481d08ba8..000000000 --- a/gwells/openshift/ocp4/migration/scripts/activate_proxy.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/bash -# Usage: ./activate-proxy.sh [test/prod] - -# add --revert to end (after test/prod argument) to switch the routes back to OCP3 services. -# i.e. ./activate_proxy.sh [test/prod] --revert - -# Get variables from previous scripts or params -ENVIRONMENT=${ENVIRONMENT:-$1} -. ./params.sh "$ENVIRONMENT" -. ./require_pathfinder_auth.sh - -set -euo pipefail - -echo -echo "Switching main GWELLS route to forward requests to OCP4 ($NAMESPACE ; env: $POD_SUFFIX)" -echo - - -ROUTE_PATCH=$(cat <<-EOF -{ - "spec": { - "port": { - "targetPort": "2015-tcp" - }, - "to": { - "kind": "Service", - "name": "gwells-maintenance-${POD_SUFFIX}" - } - } -} -EOF -) - -REVERT_ROUTE_PATCH_GWELLS=$(cat <<-EOF -{ - "spec": { - "port": { - "targetPort": "web" - }, - "to": { - "kind": "Service", - "name": "gwells-${POD_SUFFIX}" - } - } -} -EOF -) - -REVERT_ROUTE_PATCH_TILESERV=$(cat <<-EOF -{ - "spec": { - "port": { - "targetPort": 8080 - }, - "to": { - "kind": "Service", - "name": "pgtileserv-${POD_SUFFIX}" - } - } -} -EOF -) - -if echo $* | grep -e "--revert" -q -then - # use revert script - oc --kubeconfig="$KUBECONFIG" -n "$NAMESPACE" patch "route/gwells-$POD_SUFFIX" -p "$REVERT_ROUTE_PATCH_GWELLS" - oc --kubeconfig="$KUBECONFIG" -n "$NAMESPACE" patch "route/pgtileserv-$POD_SUFFIX" -p "$REVERT_ROUTE_PATCH_TILESERV" - echo - echo "route/gwells-$POD_SUFFIX patched to direct traffic to OCP3 services (proxy to ocp4 disabled)" - echo -else - oc --kubeconfig="$KUBECONFIG" -n "$NAMESPACE" patch "route/gwells-$POD_SUFFIX" -p "$ROUTE_PATCH" - oc --kubeconfig="$KUBECONFIG" -n "$NAMESPACE" patch "route/pgtileserv-$POD_SUFFIX" -p "$ROUTE_PATCH" - - echo - echo "route/gwells-$POD_SUFFIX patched to direct traffic to OCP4 proxy" - echo -fi - - -# switch to service gwells-maintenance-staging / gwells-maintenance-production ; target port: 2015-tcp - diff --git a/gwells/openshift/ocp4/migration/scripts/db_copy_and_restore.sh b/gwells/openshift/ocp4/migration/scripts/db_copy_and_restore.sh deleted file mode 100644 index f88114ba4..000000000 --- a/gwells/openshift/ocp4/migration/scripts/db_copy_and_restore.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash -# Usage: ./db_copy_and_restore.sh [test/prod] -# This script copies the dump from the migrator-cli volume to the db volume and restores the database using pg_restore - -# Get variables from previous scripts or params -ENVIRONMENT=${ENVIRONMENT:-$1} -. ./params.sh "$ENVIRONMENT" -. ./require_silver_auth.sh - -# Start copy to db pod and restore -GWELLS4_DB_POD=$(oc --kubeconfig="$KUBECONFIGSILVER" get pods -n "$NAMESPACE4" | grep "gwells-pg12-$POD_SUFFIX" | grep Running | head -1 | awk '{print $1}') - -echo "------------------------------------------------------------------------------" -echo "Found pod $GWELLS4_DB_POD on $NAMESPACE4" -echo "Starting copy to db pod..." -echo "------------------------------------------------------------------------------" - -# Copy to db pod -SECONDS=0 -oc --kubeconfig="$KUBECONFIGSILVER" -n "$NAMESPACE4" rsync /tmp/backup/ "$GWELLS4_DB_POD":/pgdata/backup/ -c postgresql -duration=$SECONDS -echo "------------------------------------------------------------------------------" -echo "Copy took $((duration / 60)) minutes and $((duration % 60)) seconds." -echo "Starting pg_reload..." -echo "------------------------------------------------------------------------------" - - -# Reload database -PG_DUMPFILE="/pgdata/backup/gwells-$ENVIRONMENT-db-latest" -SECONDS=0 -oc --kubeconfig="$KUBECONFIGSILVER" exec -n "$NAMESPACE4" "$GWELLS4_DB_POD" -c postgresql -- bash -c "dropdb gwells" -oc --kubeconfig="$KUBECONFIGSILVER" exec -n "$NAMESPACE4" "$GWELLS4_DB_POD" -c postgresql -- bash -c "createdb --owner=\$PG_USER gwells" -oc --kubeconfig="$KUBECONFIGSILVER" exec -n "$NAMESPACE4" "$GWELLS4_DB_POD" -c postgresql -- bash -c "pg_restore -d gwells $PG_DUMPFILE" - -# Do we need to change the db user? This reassigns the privileges of the old user to $PG_USER -read -r -p 'Do you need to change db user? [Y/n]: ' ASK_CHANGE - -if [[ "$ASK_CHANGE" =~ ^[Yy]$ ]]; then - read -r -p 'Enter old user: ' OLD_USER - - if [[ ${#OLD_USER} -gt 0 ]]; then - echo "Changing user from $OLD_USER to \$PG_USER on pod" - echo "Command is psql -U postgres -d gwells -c \"REASSIGN OWNED BY \\\"$OLD_USER\\\" TO \\\"\$PG_USER\\\"\"" - oc --kubeconfig="$KUBECONFIGSILVER" exec -n "$NAMESPACE4" "$GWELLS4_DB_POD" -c postgresql -- bash -c "psql -U postgres -d gwells -c \"REASSIGN OWNED BY \\\"$OLD_USER\\\" TO \\\"\$PG_USER\\\"\"" - fi -fi -#oc --kubeconfig="$KUBECONFIGSILVER" exec -n "$NAMESPACE4" "$GWELLS4_DB_POD" -c postgresql -- bash -c "pg_restore --no-owner --role=\$PG_USER -d gwells $PG_DUMPFILE" -#oc --kubeconfig="$KUBECONFIGSILVER" exec -n "$NAMESPACE4" "$GWELLS4_DB_POD" -c postgresql -- bash -c "psql -U postgres -d gwells -c \"GRANT USAGE ON SCHEMA postgis_ftw TO ftw_reader\"" -#oc --kubeconfig="$KUBECONFIGSILVER" exec -n "$NAMESPACE4" "$GWELLS4_DB_POD" -c postgresql -- bash -c "psql -U postgres -d gwells -c \"ALTER DEFAULT PRIVILEGES IN SCHEMA postgis_ftw GRANT SELECT ON TABLES TO ftw_reader\"" - -duration=$SECONDS -echo "------------------------------------------------------------------------------" -echo "Reload took $((duration / 60)) minutes and $((duration % 60)) seconds." -echo "Database migration done. Please check the database." -echo "------------------------------------------------------------------------------" \ No newline at end of file diff --git a/gwells/openshift/ocp4/migration/scripts/db_dump_and_copy.sh b/gwells/openshift/ocp4/migration/scripts/db_dump_and_copy.sh deleted file mode 100644 index 753f9838c..000000000 --- a/gwells/openshift/ocp4/migration/scripts/db_dump_and_copy.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -# Usage: ./db_dump_and_copy.sh [test/prod] - -# This script dumps the old database and copies it to the migrator-cli's volume - -# Get variables from previous scripts or params -ENVIRONMENT=${ENVIRONMENT:-$1} -. ./params.sh "$ENVIRONMENT" -. ./require_pathfinder_auth.sh - -# Start dump and copy -GWELLS_DB_POD=$(oc --kubeconfig="$KUBECONFIG" get pods -n "$NAMESPACE" | grep "gwells-pg12-$POD_SUFFIX" | head -1 | awk '{print $1}') -echo "------------------------------------------------------------------------------" -echo "Found pod $GWELLS_DB_POD on $NAMESPACE" -echo "Starting database dump..." -echo "------------------------------------------------------------------------------" - -# On Pathfinder - dump db -DB_DUMPFILE="/tmp/gwells-$ENVIRONMENT-db-latest" -SECONDS=0 -oc --kubeconfig="$KUBECONFIG" exec -n "$NAMESPACE" "$GWELLS_DB_POD" -- bash -c "pg_dump -Fc gwells > $DB_DUMPFILE" -duration=$SECONDS -echo "------------------------------------------------------------------------------" -echo "Dump took $((duration / 60)) minutes and $((duration % 60)) seconds." -echo "Starting to copy dumpfile from Pathfinder to this pod's volume..." -echo "------------------------------------------------------------------------------" - - -# On ocp4 - copy file from Pathfinder -mkdir -p /tmp/backup -SECONDS=0 -oc --kubeconfig="$KUBECONFIG" rsync -n "$NAMESPACE" "$GWELLS_DB_POD":"$DB_DUMPFILE" /tmp/backup/ -duration=$SECONDS -echo "------------------------------------------------------------------------------" -echo "Rsync took $((duration / 60)) minutes and $((duration % 60)) seconds." - -# delete dump from source -echo "Cleanup - deleting dump from Pathfinder" -echo "------------------------------------------------------------------------------" - -oc --kubeconfig="$KUBECONFIG" exec -n "$NAMESPACE" "$GWELLS_DB_POD" -- rm -f "$DB_DUMPFILE" - -# Scale down the pathfinder database DC -#oc --kubeconfig="$KUBECONFIG" -n "$NAMESPACE" scale --replicas=0 "dc/gwells-pg12-$POD_SUFFIX" diff --git a/gwells/openshift/ocp4/migration/scripts/do_migration.sh b/gwells/openshift/ocp4/migration/scripts/do_migration.sh deleted file mode 100644 index bc6a4641f..000000000 --- a/gwells/openshift/ocp4/migration/scripts/do_migration.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# Usage ./do_migration.sh |& tee /tmp/migration.log - -# --------------------------------------------------------------------------------- -# Require all needed input/params for migration -# --------------------------------------------------------------------------------- -# running source on params.sh takes care of all needed parameters -# First thing it does is ask what environment we're doing the migration for -. ./params.sh - -# Require login upfront -. ./require_pathfinder_auth.sh -. ./require_silver_auth.sh - -# Scale down the gwells application on Pathfinder and Silver -. ./scale_down.sh - -# Migrate the database -. ./migrate_database.sh - -# Mirror minio data -. ./migrate_minio.sh - -if [ "$TEST_RUN" == 0 ]; then - # Scale up the gwells application on Silver - . ./scale_up.sh - - # Activate the proxy - . ./activate_proxy.sh -fi \ No newline at end of file diff --git a/gwells/openshift/ocp4/migration/scripts/migrate_database.sh b/gwells/openshift/ocp4/migration/scripts/migrate_database.sh deleted file mode 100644 index ffbb904c2..000000000 --- a/gwells/openshift/ocp4/migration/scripts/migrate_database.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -# Usage: ./db_dump_and_copy.sh - -# Run the database migration scripts - -. ./db_dump_and_copy.sh -ls -alh /tmp/backup - -echo "------------------------------------------------------------------------------" -echo "Copy from Pathfinder successful. Copying to the db pod and restoring the database..." -echo "------------------------------------------------------------------------------" - -. ./db_copy_and_restore.sh diff --git a/gwells/openshift/ocp4/migration/scripts/migrate_minio.sh b/gwells/openshift/ocp4/migration/scripts/migrate_minio.sh deleted file mode 100644 index 220b7a1cd..000000000 --- a/gwells/openshift/ocp4/migration/scripts/migrate_minio.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Usage: ./migrate_minio.sh [test/prod] - -# This script connects to minio on pathfinder and runs `mc mirror` to the silver minio alias - -# Get variables from previous scripts or params -ENVIRONMENT=${ENVIRONMENT:-$1} -. ./params.sh -. ./require_pathfinder_auth.sh - -# Start minio migration -GWELLS_MINIO_POD=$(oc --kubeconfig="$KUBECONFIG" get pods -n "$NAMESPACE" | grep "gwells-minio" | head -1 | awk '{print $1}') -echo "------------------------------------------------------------------------------" -echo "Found pod $GWELLS_MINIO_POD on $NAMESPACE" -echo "Starting minio client (mc) mirror..." -echo "------------------------------------------------------------------------------" - - -# Run mc mirror -# Note: The options --remove and --overwrite are there so we can be sure we copy the right data -SECONDS=0 -# The .minio.sys folder causes issues when doing mc mirror, so let's delete it. -# It contains metadata and it won't affect your data. -oc --kubeconfig="$KUBECONFIG" exec -n "$NAMESPACE" "$GWELLS_MINIO_POD" -- bash -c "if [ -d /opt/minio/s3/data/.minio.sys ];then rm -rf /opt/minio/s3/data/.minio.sys; fi" -oc --kubeconfig="$KUBECONFIG" exec -n "$NAMESPACE" "$GWELLS_MINIO_POD" -- bash -c "/opt/minio/mc -C /opt/minio/.mc mirror --remove --overwrite /opt/minio/s3/data/ silver/" -duration=$SECONDS -# Sanity check -oc --kubeconfig="$KUBECONFIG" exec -n "$NAMESPACE" "$GWELLS_MINIO_POD" -- bash -c "/opt/minio/mc -C /opt/minio/.mc diff /opt/minio/s3/data/ silver/" |& tee /tmp/mc_diff.log -echo "------------------------------------------------------------------------------" -echo "Minio mirror took $((duration / 60)) minutes and $((duration % 60)) seconds." -echo "Please check the mc diff result (/tmp/mc_diff.log)" -echo "------------------------------------------------------------------------------" - diff --git a/gwells/openshift/ocp4/migration/scripts/params.sh b/gwells/openshift/ocp4/migration/scripts/params.sh deleted file mode 100644 index e0d58ec58..000000000 --- a/gwells/openshift/ocp4/migration/scripts/params.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -# --------------------------------------------------------------------------------- -# Set variables and get authentication -# --------------------------------------------------------------------------------- - -# get params -ENVIRONMENT=${ENVIRONMENT:-$1} - -# Ask what environment we're migrating -if [[ -z "$ENVIRONMENT" ]]; then - read -r -p 'Namespace [test/prod]: ' ENVIRONMENT -fi - -# Pathfinder namespace -NAMESPACE="moe-gwells-$ENVIRONMENT" - -# Silver namespace -NAMESPACE4="26e83e-$ENVIRONMENT" - -# Pod suffix i.e. gwells-staging, gwells-production -POD_SUFFIX='staging' -if [ "$ENVIRONMENT" == 'prod' ]; then - POD_SUFFIX='production' -fi - -# Ask if this is a test run or not -if [[ -z "$ASK_RUN" ]]; then - read -r -p 'Is this a test run? [Y/n]: ' ASK_RUN -fi - -TEST_RUN=1 -if [[ "$ASK_RUN" =~ ^[Nn]$ ]]; then - echo "Confirm that this migration is NOT a test run." - echo "It will scale down pathfinder, scale up silver, and activate the proxy." - read -r -p "Type PROCEED to confirm and proceed: " CONFIRM - - if [[ "$CONFIRM" == 'PROCEED' ]]; then - TEST_RUN=0 - fi -fi \ No newline at end of file diff --git a/gwells/openshift/ocp4/migration/scripts/require_pathfinder_auth.sh b/gwells/openshift/ocp4/migration/scripts/require_pathfinder_auth.sh deleted file mode 100644 index b37f8e48a..000000000 --- a/gwells/openshift/ocp4/migration/scripts/require_pathfinder_auth.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -KUBECONFIG=/tmp/KUBECONFIG -if [ ! -f "$KUBECONFIG" ]; then - read -r -p "Enter Pathfinder auth token: " AUTH_TOKEN - oc --kubeconfig="$KUBECONFIG" login https://console.pathfinder.gov.bc.ca:8443 --token="$AUTH_TOKEN" -fi \ No newline at end of file diff --git a/gwells/openshift/ocp4/migration/scripts/require_silver_auth.sh b/gwells/openshift/ocp4/migration/scripts/require_silver_auth.sh deleted file mode 100644 index bdb294967..000000000 --- a/gwells/openshift/ocp4/migration/scripts/require_silver_auth.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -KUBECONFIGSILVER=/tmp/KUBECONFIGSILVER -if [ ! -f "$KUBECONFIGSILVER" ]; then - read -r -p "Enter Silver auth token: " AUTH_TOKEN - oc --kubeconfig="$KUBECONFIGSILVER" login --token="$AUTH_TOKEN" --server=https://api.silver.devops.gov.bc.ca:6443 -fi \ No newline at end of file diff --git a/gwells/openshift/ocp4/migration/scripts/reset_migration.sh b/gwells/openshift/ocp4/migration/scripts/reset_migration.sh deleted file mode 100644 index e55e8f2c3..000000000 --- a/gwells/openshift/ocp4/migration/scripts/reset_migration.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -# Usage: ./reset_migration.sh [test/prod] - -echo "WARNING! This will not reset the database. This script will only reverse the proxy and pod scaling." -. ./activate_proxy "$1" --revert -. ./scale_up.sh "$1" --revert -. ./scale_down.sh "$1" --revert diff --git a/gwells/openshift/ocp4/migration/scripts/scale_down.sh b/gwells/openshift/ocp4/migration/scripts/scale_down.sh deleted file mode 100755 index 1b2634fd4..000000000 --- a/gwells/openshift/ocp4/migration/scripts/scale_down.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -# Usage: ./scale_down.sh [test/prod] - -# Scales down the gwells application on Pathfinder -# add --revert to end (after test/prod) to scale back to 2 on Pathfinder. -# i.e. ./scale_down.sh [test/prod] --revert - -# Get variables from previous scripts or params -ENVIRONMENT=${ENVIRONMENT:-$1} -. ./params.sh "$ENVIRONMENT" -. ./require_pathfinder_auth.sh - -set -euo pipefail -if echo $* | grep -e "--revert" -q -then - # revert to 2 replicas - - echo "Scaling back to 2 replicas on Silver ($NAMESPACE4 ; env: $POD_SUFFIX)" - oc --kubeconfig="$KUBECONFIGSILVER" -n "$NAMESPACE4" scale "dc/gwells-$POD_SUFFIX" --replicas=2 - oc --kubeconfig="$KUBECONFIGSILVER" -n "$NAMESPACE4" scale "dc/pgtileserv-$POD_SUFFIX" --replicas=1 - - echo "Scaling back to 2 replicas on Pathfinder ($NAMESPACE ; env: $POD_SUFFIX)" - oc --kubeconfig="$KUBECONFIG" -n "$NAMESPACE" scale "dc/gwells-$POD_SUFFIX" --replicas=2 - echo "Scaled to 2 replicas" - -else - - echo "Scaling down on Silver ($NAMESPACE4 ; env: $POD_SUFFIX)" - oc --kubeconfig="$KUBECONFIGSILVER" -n "$NAMESPACE4" scale "dc/gwells-$POD_SUFFIX" --replicas=0 - oc --kubeconfig="$KUBECONFIGSILVER" -n "$NAMESPACE4" scale "dc/pgtileserv-$POD_SUFFIX" --replicas=0 - - # Scale down only when we're not doing a test run - if [ "$TEST_RUN" == 0 ]; then - echo "Scaling down on Pathfinder ($NAMESPACE ; env: $POD_SUFFIX)" - oc --kubeconfig="$KUBECONFIG" -n "$NAMESPACE" scale "dc/gwells-$POD_SUFFIX" --replicas=0 - REPLICAS=$(oc --kubeconfig="$KUBECONFIG" -n "$NAMESPACE" get dc "gwells-$POD_SUFFIX" -o go-template="{{.status.replicas}}") - while [ "$REPLICAS" != 0 ] - do - echo "Waiting for GWELLS to scale down (current replicas: ${REPLICAS})" - sleep 1 - REPLICAS=$(oc --kubeconfig="$KUBECONFIG" -n "$NAMESPACE" get dc "gwells-$POD_SUFFIX" -o go-template="{{.status.replicas}}") - done - echo "Successfully scaled down (current replicas: ${REPLICAS})" - fi -fi \ No newline at end of file diff --git a/gwells/openshift/ocp4/migration/scripts/scale_up.sh b/gwells/openshift/ocp4/migration/scripts/scale_up.sh deleted file mode 100755 index 16bedfe22..000000000 --- a/gwells/openshift/ocp4/migration/scripts/scale_up.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -# This script scales up the gwells application on Silver -# add --revert to end (after test/prod) to scale back to 2 on Pathfinder. -# i.e. ./scale_up.sh [test/prod] --revert - -ENVIRONMENT=${ENVIRONMENT:-$1} -. ./params.sh "$ENVIRONMENT" -. ./require_silver_auth.sh - -set -euo pipefail - -if echo $* | grep -e "--revert" -q -then - # revert to 2 replicas - echo "Scaling down to 0 replicas on Silver ($NAMESPACE4 ; env: $POD_SUFFIX)" - - oc --kubeconfig="$KUBECONFIGSILVER" -n "$NAMESPACE4" scale "dc/gwells-$POD_SUFFIX" --replicas=0 - oc --kubeconfig="$KUBECONFIGSILVER" -n "$NAMESPACE4" scale "dc/pgtileserv-$POD_SUFFIX" --replicas=0 - - echo "Scaled to 0 replicas" - -else - echo "Scaling up on Silver ($NAMESPACE4 ; env: $POD_SUFFIX)" - - oc --kubeconfig="$KUBECONFIGSILVER" -n "$NAMESPACE4" scale "dc/gwells-$POD_SUFFIX" --replicas=2 - oc --kubeconfig="$KUBECONFIGSILVER" -n "$NAMESPACE4" scale "dc/pgtileserv-$POD_SUFFIX" --replicas=1 - - REPLICAS=$(oc --kubeconfig="$KUBECONFIGSILVER" -n "$NAMESPACE4" get dc "gwells-$POD_SUFFIX" -o go-template="{{.status.readyReplicas}}") - while [ "$REPLICAS" != 2 ] - do - echo "Waiting for GWELLS to scale up..." - sleep 3 - REPLICAS=$(oc --kubeconfig="$KUBECONFIGSILVER" -n "$NAMESPACE4" get dc "gwells-$POD_SUFFIX" -o go-template="{{.status.readyReplicas}}") - done - echo "Successfully scaled up (current replicas: ${REPLICAS})" -fi diff --git a/gwells/openshift/ocp4/minio/minio.bc.yaml b/gwells/openshift/ocp4/minio/minio.bc.yaml deleted file mode 100644 index 2f1187800..000000000 --- a/gwells/openshift/ocp4/minio/minio.bc.yaml +++ /dev/null @@ -1,50 +0,0 @@ ---- -kind: Template -apiVersion: v1 -metadata: - name: gwells-minio-bc -parameters: -- name: NAME - value: "gwells-minio" -- name: DEST_IMG_NAME - value: "gwells-minio" -- name: DEST_IMG_TAG - value: "latest" -- name: SRC_REPO_URL - value: "https://github.com/bcgov/gwells" -- name: SRC_REPO_BRANCH - value: "release" -objects: -- kind: ImageStream - apiVersion: v1 - metadata: - name: "${DEST_IMG_NAME}" - labels: - app: "${NAME}" -- apiVersion: v1 - kind: BuildConfig - metadata: - labels: - app: "${NAME}" - name: "${NAME}" - template: "${NAME}" - name: "${NAME}" - spec: - successfulBuildsHistoryLimit: 3 - failedBuildsHistoryLimit: 3 - triggers: - - type: ImageChange - - type: ConfigChange - runPolicy: SerialLatestOnly - source: - contextDir: "/openshift/docker/minio" - git: - ref: "${SRC_REPO_BRANCH}" - uri: "${SRC_REPO_URL}" - type: Git - strategy: - type: Docker - output: - to: - kind: ImageStreamTag - name: "${DEST_IMG_NAME}:${DEST_IMG_TAG}" diff --git a/gwells/openshift/ocp4/minio/minio.dc.yaml b/gwells/openshift/ocp4/minio/minio.dc.yaml deleted file mode 100644 index 7bca2612f..000000000 --- a/gwells/openshift/ocp4/minio/minio.dc.yaml +++ /dev/null @@ -1,152 +0,0 @@ ---- -apiVersion: v1 -kind: Template -metadata: - annotations: - description: "Minio deployment" - labels: - app: "gwells${NAME_SUFFIX}" - template: "gwells-minio-template${NAME_SUFFIX}" - name: "gwells-minio${NAME_SUFFIX}" -parameters: - - name: "NAME_SUFFIX" - required: true - - name: "SECRETS" - displayName: "Secret object to reference" - value: "gwells-minio-secrets" - - name: "SRC_NAMESPACE" - displayName: "Namespace containing the deployment source" - value: "26e83e-tools" - - name: "SRC_IMAGE" - displayName: "Name of source image" - value: "gwells-minio" - - name: "SRC_TAG" - displayName: "Tag of source image" - value: "latest" - - name: "DEST_PVC_SIZE" - displayName: "PVC size" - value: "1Gi" - - name: "DEST_PVC_CLASS" - displayName: "PVC class" - value: "netapp-file-standard" - - name: "DEST_PVC_ACCESS" - displayName: "PVC access mode" - value: "ReadWriteMany" - - name: "HOSTNAME" - displayName: "Hostname for Minio service" - required: true -objects: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - finalizers: - - kubernetes.io/pvc-protection - name: "gwells-minio${NAME_SUFFIX}" - labels: - app: "gwells${NAME_SUFFIX}" - name: gwells-minio${NAME_SUFFIX} - spec: - accessModes: - - ${DEST_PVC_ACCESS} - resources: - requests: - storage: ${DEST_PVC_SIZE} - storageClassName: ${DEST_PVC_CLASS} - - apiVersion: apps.openshift.io/v1 - kind: DeploymentConfig - metadata: - labels: - app: "gwells${NAME_SUFFIX}" - name: "gwells-minio${NAME_SUFFIX}" - name: "gwells-minio${NAME_SUFFIX}" - spec: - replicas: 1 - strategy: - activeDeadlineSeconds: 3000 - recreateParams: - timeoutSeconds: 600 - type: Recreate - template: - metadata: - labels: - app: "gwells${NAME_SUFFIX}" - name: gwells-minio${NAME_SUFFIX} - spec: - containers: - - env: - - name: MINIO_ACCESS_KEY - valueFrom: - secretKeyRef: - name: ${SECRETS} - key: MINIO_ACCESS_KEY - - name: MINIO_SECRET_KEY - valueFrom: - secretKeyRef: - name: ${SECRETS} - key: MINIO_SECRET_KEY - - name: MINIO_DATA_DIR - value: /opt/minio/s3/data - image: image-registry.openshift-image-registry.svc:5000/${SRC_NAMESPACE}/${SRC_IMAGE}:${SRC_TAG} - imagePullPolicy: Always - name: "gwells-minio${NAME_SUFFIX}" - command: - - '/bin/sh' - - '-c' - args: - - '/minio-entrypoint.sh server /opt/minio/s3/data' - ports: - - containerPort: 9000 - protocol: TCP - resources: - limits: - cpu: 250m - requests: - cpu: 100m - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /opt/minio/s3/data - name: minio-vol - restartPolicy: Always - terminationGracePeriodSeconds: 30 - volumes: - - name: minio-vol - persistentVolumeClaim: - claimName: "gwells-minio${NAME_SUFFIX}" - triggers: - - type: ConfigChange - - apiVersion: v1 - kind: Service - metadata: - labels: - app: gwells${NAME_SUFFIX} - name: gwells-minio${NAME_SUFFIX} - name: gwells-minio${NAME_SUFFIX} - spec: - selector: - app: gwells${NAME_SUFFIX} - name: gwells-minio${NAME_SUFFIX} - ports: - - name: 9000-tcp - port: 9000 - protocol: TCP - targetPort: 9000 - - apiVersion: route.openshift.io/v1 - kind: Route - metadata: - labels: - app: gwells${NAME_SUFFIX} - name: gwells-minio${NAME_SUFFIX} - name: gwells-minio${NAME_SUFFIX} - spec: - host: ${HOSTNAME} - port: - targetPort: 9000-tcp - to: - kind: Service - name: gwells-minio${NAME_SUFFIX} - weight: 100 - wildcardPolicy: None - tls: - insecureEdgeTerminationPolicy: Redirect - termination: edge diff --git a/gwells/openshift/ocp4/networkpolicy.yaml b/gwells/openshift/ocp4/networkpolicy.yaml deleted file mode 100644 index aebda5f45..000000000 --- a/gwells/openshift/ocp4/networkpolicy.yaml +++ /dev/null @@ -1,97 +0,0 @@ ---- -apiVersion: template.openshift.io/v1 -kind: Template -labels: - template: quickstart-network-security-policy -metadata: - name: quickstart-network-security-policy -objects: - - kind: NetworkPolicy - apiVersion: networking.k8s.io/v1 - metadata: - name: deny-by-default - namespace: ${NAMESPACE} - spec: - # The default posture for a security first namespace is to - # deny all traffic. If not added this rule will be added - # by Platform Services during environment cut-over. - podSelector: {} - ingress: [] - - apiVersion: networking.k8s.io/v1 - kind: NetworkPolicy - metadata: - name: allow-from-openshift-ingress - namespace: ${NAMESPACE} - spec: - # This policy allows any pod with a route & service combination - # to accept traffic from the OpenShift router pods. This is - # required for things outside of OpenShift (like the Internet) - # to reach your pods. - ingress: - - from: - - namespaceSelector: - matchLabels: - network.openshift.io/policy-group: ingress - podSelector: {} - policyTypes: - - Ingress - - kind: NetworkPolicy - apiVersion: networking.k8s.io/v1 - metadata: - name: allow-same-namespace - namespace: ${NAMESPACE} - spec: - # Allow all pods within the current namespace to communicate - # to one another. - podSelector: - ingress: - - from: - - podSelector: {} - - kind: NetworkPolicy - apiVersion: networking.k8s.io/v1 - metadata: - name: allow-jenkins - namespace: ${NAMESPACE} - spec: - # Allow all pods within the current namespace to communicate - # to one another. - podSelector: - ingress: - - from: - - namespaceSelector: - matchLabels: - environment: tools - name: 26e83e - podSelector: - matchLabels: - app: jenkins - - apiVersion: security.devops.gov.bc.ca/v1alpha1 - kind: NetworkSecurityPolicy - metadata: - name: any-to-any - namespace: ${NAMESPACE} - spec: - description: | - allow all pods to communicate - source: - - - "$namespace=${NAMESPACE}" - destination: - - - "$namespace=*" - - apiVersion: security.devops.gov.bc.ca/v1alpha1 - kind: NetworkSecurityPolicy - metadata: - name: any-to-external - namespace: ${NAMESPACE} - spec: - description: | - Allow all pods to talk to external systems - source: - - - "$namespace=${NAMESPACE}" - destination: - - - "ext:network=any" -parameters: - - name: NAMESPACE - displayName: Namespace - description: | - The namespace this policy is being deployed to; - required: true \ No newline at end of file diff --git a/gwells/openshift/ocp4/pg_tileserv/README.md b/gwells/openshift/ocp4/pg_tileserv/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/gwells/openshift/ocp4/pg_tileserv/nginx.conf b/gwells/openshift/ocp4/pg_tileserv/nginx.conf deleted file mode 100644 index 248a4acca..000000000 --- a/gwells/openshift/ocp4/pg_tileserv/nginx.conf +++ /dev/null @@ -1,151 +0,0 @@ -# For more information on configuration, see: -# * Official English Documentation: http://nginx.org/en/docs/ -# * Official Russian Documentation: http://nginx.org/ru/docs/ - - -worker_processes auto; -error_log /var/opt/rh/rh-nginx116/log/nginx/error.log; -pid /var/opt/rh/rh-nginx116/run/nginx/nginx.pid; - -# Load dynamic modules. See /opt/rh/rh-nginx116/root/usr/share/doc/README.dynamic. -include /opt/rh/rh-nginx116/root/usr/share/nginx/modules/*.conf; - -events { - worker_connections 1024; -} - -http { - log_format main '$remote_addr - $remote_user [$time_local] ' - '"$request" $status $body_bytes_sent ' - '"$http_referer" "$http_user_agent" ' - 'rt=$request_time uct="$upstream_connect_time" uht="$upstream_header_time" urt="$upstream_response_time"'; - - access_log /var/opt/rh/rh-nginx116/log/nginx/access.log main; - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - - - # Enable Gzip compressed. - gzip on; - - # Serve pre-compressed files - gzip_static on; - - # Enable compression both for HTTP/1.0 and HTTP/1.1 (required for CloudFront). - gzip_http_version 1.0; - - # Compression level (1-9). - # 5 is a perfect compromise between size and cpu usage, offering about - # 75% reduction for most ascii files (almost identical to level 9). - gzip_comp_level 5; - - # Don't compress anything that's already small and unlikely to shrink much - # if at all (the default is 20 bytes, which is bad as that usually leads to - # larger files after gzipping). - gzip_min_length 256; - - # Compress data even for clients that are connecting to us via proxies, - # identified by the "Via" header (required for CloudFront). - gzip_proxied any; - - # Tell proxies to cache both the gzipped and regular version of a resource - # whenever the client's Accept-Encoding capabilities header varies; - # Avoids the issue where a non-gzip capable client (which is extremely rare - # today) would display gibberish if their proxy gave them the gzipped version. - gzip_vary on; - - # This assigns the number and the size of the compression buffers. - # The default is gzip_buffers 4 4k; - # Increase this value to make sure that big Javascript or - # CSS files can be compressed as well. - gzip_buffers 16 8k; - - # Compress all output labeled with one of the following MIME-types. - gzip_types - application/atom+xml - application/javascript - application/x-javascript - text/xml - application/xml+rss - text/javascript - application/json - application/rss+xml - application/vnd.ms-fontobject - application/x-font-ttf - application/x-web-app-manifest+json - application/xhtml+xml - application/xml - application/vnd.mapbox-vector-tile - font/opentype - image/svg+xml - image/x-icon - text/css - text/plain - text/x-component; - # text/html is always compressed by HttpGzipModule - - include /etc/opt/rh/rh-nginx116/nginx/mime.types; - default_type application/octet-stream; - - # Load modular configuration files from the /etc/nginx/conf.d directory. - # See http://nginx.org/en/docs/ngx_core_module.html#include - # for more information. - include /opt/app-root/etc/nginx.d/*.conf; - - - proxy_cache_path /tmp/cache levels=1:2 keys_zone=tile_cache:10m max_size=1g - inactive=15m use_temp_path=off; - - server { - listen 8080 default_server; - listen [::]:8080 default_server; - server_name _; - root /opt/app-root/src; - - # Load configuration files for the default server block. - include /opt/app-root/etc/nginx.default.d/*.conf; - - location /gwells/tiles/ { - proxy_pass http://localhost:7800/; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - - proxy_cache tile_cache; - proxy_cache_valid 200 15m; - proxy_cache_valid 500 0s; - proxy_cache_min_uses 2; - proxy_cache_use_stale error timeout updating http_500 http_502 - http_503 http_504; - proxy_cache_lock on; - add_header X-Cache-Status $upstream_cache_status; - } - - - location /health { - access_log off; - return 200 "healthy\n"; - } - - location / { - } - - # Media: images, icons, video, audio, HTC - location ~* \.(?:jpg|jpeg|gif|png|ico|cur|gz|svg|svgz|mp4|ogg|ogv|webm|htc)$ { - expires 1M; - access_log off; - add_header Cache-Control "public"; - } - - # Javascript and CSS files - location ~* \.(?:css|js)$ { - try_files $uri =404; - expires 1y; - access_log off; - add_header Cache-Control "public"; - } - } -} \ No newline at end of file diff --git a/gwells/openshift/ocp4/pg_tileserv/pg_tileserv.dc.yaml b/gwells/openshift/ocp4/pg_tileserv/pg_tileserv.dc.yaml deleted file mode 100644 index 1a904987a..000000000 --- a/gwells/openshift/ocp4/pg_tileserv/pg_tileserv.dc.yaml +++ /dev/null @@ -1,185 +0,0 @@ -apiVersion: v1 -kind: Template -labels: - template: pgtileserv-template -metadata: - creationTimestamp: null - name: pgtileserv-dc -parameters: - - name: IMAGE_TAG - value: '20201112' - - name: IMAGE_REGISTRY - value: image-registry.openshift-image-registry.svc:5000 - - name: IMAGE_NAMESPACE - value: 26e83e-tools - - name: NAME_SUFFIX - required: true - - name: DATABASE_SERVICE_NAME - required: true - - description: Request for CPU resources measured in cpu units, e.g. 200m - displayName: CPU resource request - name: REQUEST_CPU - required: false - value: 100m - - description: Request for memory resources measured in bytes, e.g. 512Mi, 1Gi. - displayName: Memory resource request - name: REQUEST_MEMORY - required: false - value: 256Mi - - description: Limit for CPU resources measured in cpu units, e.g. 200m - displayName: CPU resource limit - name: LIMIT_CPU - required: false - value: 200m - - description: Limit for memory resources measured in bytes, e.g. 512Mi, 1Gi. - displayName: Memory resource limit - name: LIMIT_MEMORY - required: false - value: 512Mi - - name: HOST - required: true -objects: - - - apiVersion: v1 - kind: Route - metadata: - name: pgtileserv${NAME_SUFFIX} - labels: - app: gwells${NAME_SUFFIX} - name: pgtileserv${NAME_SUFFIX} - appver: gwells${NAME_SUFFIX} - component: pgtileserv - spec: - host: ${HOST} - path: /gwells/tiles - to: - kind: Service - name: pgtileserv${NAME_SUFFIX} - weight: 100 - port: - targetPort: 8080 - tls: - insecureEdgeTerminationPolicy: Redirect - termination: edge - wildcardPolicy: None - - apiVersion: v1 - kind: Service - metadata: - name: pgtileserv${NAME_SUFFIX} - spec: - ports: - - port: 8080 - protocol: TCP - selector: - name: pgtileserv${NAME_SUFFIX} - sessionAffinity: None - type: ClusterIP - status: - loadBalancer: {} - - apiVersion: v1 - kind: DeploymentConfig - metadata: - name: pgtileserv${NAME_SUFFIX} - spec: - replicas: 1 - selector: - name: pgtileserv${NAME_SUFFIX} - strategy: - type: Rolling - template: - metadata: - labels: - app: gwells${NAME_SUFFIX} - name: pgtileserv${NAME_SUFFIX} - appver: gwells${NAME_SUFFIX} - spec: - volumes: - containers: - - name: nginx - image: ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/nginx-tilecache:latest - imagePullPolicy: IfNotPresent - readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - httpGet: - path: /health - port: 8080 - timeoutSeconds: 1 - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - httpGet: - path: /health - port: 8080 - timeoutSeconds: 1 - ports: - - containerPort: 8080 - protocol: TCP - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 200m - memory: 256Mi - - capabilities: {} - env: - - name: DATABASE_HOST - value: ${DATABASE_SERVICE_NAME} - - name: PG_DATABASE - valueFrom: - secretKeyRef: - key: database-name - name: ${DATABASE_SERVICE_NAME} - - name: PG_PASSWORD - valueFrom: - secretKeyRef: - key: database-password - name: ${DATABASE_SERVICE_NAME} - - name: PG_USER - value: ftw_reader - - name: DATABASE_URL - value: postgres://$(PG_USER):$(PG_PASSWORD)@$(DATABASE_HOST)/gwells - image: ${IMAGE_REGISTRY}/${IMAGE_NAMESPACE}/pg_tileserv:${IMAGE_TAG} - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 5 - periodSeconds: 30 - successThreshold: 1 - tcpSocket: - port: 7800 - timeoutSeconds: 5 - name: pgtileserv - ports: - - containerPort: 7800 - protocol: TCP - readinessProbe: - httpGet: - path: / - port: 7800 - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 5 - resources: - requests: - cpu: ${REQUEST_CPU} - memory: ${REQUEST_MEMORY} - limits: - cpu: ${LIMIT_CPU} - memory: ${LIMIT_MEMORY} - securityContext: - capabilities: {} - privileged: false - terminationMessagePath: /dev/termination-log - dnsPolicy: ClusterFirst - restartPolicy: Always - triggers: - - type: ConfigChange - status: {} diff --git a/gwells/openshift/pg_tileserv/README.md b/gwells/openshift/pg_tileserv/README.md deleted file mode 100644 index 305d7ae21..000000000 --- a/gwells/openshift/pg_tileserv/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# pg_tileserv - -GWELLS uses pg_tileserv to render vector tiles for displaying aquifers and wells (currently only for Aquifer search and details pages, not for all GWELLS pages). - -https://github.com/CrunchyData/pg_tileserv - -## Prerequisites - -The pg_tileserv config in pg_tileserv.dc.yaml requires a read only database user. This user should have limited access only to data that can be rendered on the map. In GWELLS, the read only user only has usage and select privileges in the `postgis_ftw` schema, where views can be created to be made available as vector layers. For more info, see openshift/database.deploy.yml and the GWELLS Django migrations where views are created. - -### nginx (basic tile cache) - -nginx has been deployed as a basic tile cache. To build the nginx server and include the nginx.conf file located in this folder, run (from the GWELLS tools namespace): - -`oc new-build nginx:1.12~https://github.com/bcgov/gwells.git --context-dir=openshift/pg_tileserv --name=nginx-tilecache` - -The template `pg_tileserv.dc.yaml` will now be able to pull the nginx-tilecache container image. This image should be rebuilt when edits are made to the nginx.conf file. diff --git a/gwells/openshift/pg_tileserv/nginx.conf b/gwells/openshift/pg_tileserv/nginx.conf deleted file mode 100644 index 5c68a7eaa..000000000 --- a/gwells/openshift/pg_tileserv/nginx.conf +++ /dev/null @@ -1,151 +0,0 @@ -# For more information on configuration, see: -# * Official English Documentation: http://nginx.org/en/docs/ -# * Official Russian Documentation: http://nginx.org/ru/docs/ - - -worker_processes auto; -error_log /var/opt/rh/rh-nginx112/log/nginx/error.log; -pid /var/opt/rh/rh-nginx112/run/nginx/nginx.pid; - -# Load dynamic modules. See /opt/rh/rh-nginx112/root/usr/share/doc/README.dynamic. -include /opt/rh/rh-nginx112/root/usr/share/nginx/modules/*.conf; - -events { - worker_connections 1024; -} - -http { - log_format main '$remote_addr - $remote_user [$time_local] ' - '"$request" $status $body_bytes_sent ' - '"$http_referer" "$http_user_agent" ' - 'rt=$request_time uct="$upstream_connect_time" uht="$upstream_header_time" urt="$upstream_response_time"'; - - access_log /var/opt/rh/rh-nginx112/log/nginx/access.log main; - - sendfile on; - tcp_nopush on; - tcp_nodelay on; - keepalive_timeout 65; - types_hash_max_size 2048; - - - # Enable Gzip compressed. - gzip on; - - # Serve pre-compressed files - gzip_static on; - - # Enable compression both for HTTP/1.0 and HTTP/1.1 (required for CloudFront). - gzip_http_version 1.0; - - # Compression level (1-9). - # 5 is a perfect compromise between size and cpu usage, offering about - # 75% reduction for most ascii files (almost identical to level 9). - gzip_comp_level 5; - - # Don't compress anything that's already small and unlikely to shrink much - # if at all (the default is 20 bytes, which is bad as that usually leads to - # larger files after gzipping). - gzip_min_length 256; - - # Compress data even for clients that are connecting to us via proxies, - # identified by the "Via" header (required for CloudFront). - gzip_proxied any; - - # Tell proxies to cache both the gzipped and regular version of a resource - # whenever the client's Accept-Encoding capabilities header varies; - # Avoids the issue where a non-gzip capable client (which is extremely rare - # today) would display gibberish if their proxy gave them the gzipped version. - gzip_vary on; - - # This assigns the number and the size of the compression buffers. - # The default is gzip_buffers 4 4k; - # Increase this value to make sure that big Javascript or - # CSS files can be compressed as well. - gzip_buffers 16 8k; - - # Compress all output labeled with one of the following MIME-types. - gzip_types - application/atom+xml - application/javascript - application/x-javascript - text/xml - application/xml+rss - text/javascript - application/json - application/rss+xml - application/vnd.ms-fontobject - application/x-font-ttf - application/x-web-app-manifest+json - application/xhtml+xml - application/xml - application/vnd.mapbox-vector-tile - font/opentype - image/svg+xml - image/x-icon - text/css - text/plain - text/x-component; - # text/html is always compressed by HttpGzipModule - - include /etc/opt/rh/rh-nginx112/nginx/mime.types; - default_type application/octet-stream; - - # Load modular configuration files from the /etc/nginx/conf.d directory. - # See http://nginx.org/en/docs/ngx_core_module.html#include - # for more information. - include /opt/app-root/etc/nginx.d/*.conf; - - - proxy_cache_path /tmp/cache levels=1:2 keys_zone=tile_cache:10m max_size=1g - inactive=15m use_temp_path=off; - - server { - listen 8080 default_server; - listen [::]:8080 default_server; - server_name _; - root /opt/app-root/src; - - # Load configuration files for the default server block. - include /opt/app-root/etc/nginx.default.d/*.conf; - - location /gwells/tiles/ { - proxy_pass http://localhost:7800/; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - - proxy_cache tile_cache; - proxy_cache_valid 200 1m; - proxy_cache_valid 500 0s; - proxy_cache_min_uses 2; - proxy_cache_use_stale error timeout updating http_500 http_502 - http_503 http_504; - proxy_cache_lock on; - add_header X-Cache-Status $upstream_cache_status; - } - - - location /health { - access_log off; - return 200 "healthy\n"; - } - - location / { - } - - # Media: images, icons, video, audio, HTC - location ~* \.(?:jpg|jpeg|gif|png|ico|cur|gz|svg|svgz|mp4|ogg|ogv|webm|htc)$ { - expires 1M; - access_log off; - add_header Cache-Control "public"; - } - - # Javascript and CSS files - location ~* \.(?:css|js)$ { - try_files $uri =404; - expires 1y; - access_log off; - add_header Cache-Control "public"; - } - } -} \ No newline at end of file diff --git a/gwells/openshift/pg_tileserv/pg_tileserv.dc.yaml b/gwells/openshift/pg_tileserv/pg_tileserv.dc.yaml deleted file mode 100644 index 122e18b05..000000000 --- a/gwells/openshift/pg_tileserv/pg_tileserv.dc.yaml +++ /dev/null @@ -1,181 +0,0 @@ -apiVersion: v1 -kind: Template -labels: - template: pgtileserv-template -metadata: - creationTimestamp: null - name: pgtileserv-dc -parameters: - - name: IMAGE_TAG - value: '20200610' - - name: NAME_SUFFIX - required: true - - name: DATABASE_SERVICE_NAME - required: true - - description: Request for CPU resources measured in cpu units, e.g. 200m - displayName: CPU resource request - name: REQUEST_CPU - required: false - value: 100m - - description: Request for memory resources measured in bytes, e.g. 512Mi, 1Gi. - displayName: Memory resource request - name: REQUEST_MEMORY - required: false - value: 256Mi - - description: Limit for CPU resources measured in cpu units, e.g. 200m - displayName: CPU resource limit - name: LIMIT_CPU - required: false - value: 200m - - description: Limit for memory resources measured in bytes, e.g. 512Mi, 1Gi. - displayName: Memory resource limit - name: LIMIT_MEMORY - required: false - value: 512Mi - - name: HOST - required: true -objects: - - - apiVersion: v1 - kind: Route - metadata: - name: pgtileserv${NAME_SUFFIX} - labels: - app: gwells${NAME_SUFFIX} - name: pgtileserv${NAME_SUFFIX} - appver: gwells${NAME_SUFFIX} - component: pgtileserv - spec: - host: ${HOST} - path: /gwells/tiles - to: - kind: Service - name: pgtileserv${NAME_SUFFIX} - weight: 100 - port: - targetPort: 8080 - tls: - insecureEdgeTerminationPolicy: Redirect - termination: edge - wildcardPolicy: None - - apiVersion: v1 - kind: Service - metadata: - name: pgtileserv${NAME_SUFFIX} - spec: - ports: - - port: 8080 - protocol: TCP - selector: - name: pgtileserv${NAME_SUFFIX} - sessionAffinity: None - type: ClusterIP - status: - loadBalancer: {} - - apiVersion: v1 - kind: DeploymentConfig - metadata: - name: pgtileserv${NAME_SUFFIX} - spec: - replicas: 1 - selector: - name: pgtileserv${NAME_SUFFIX} - strategy: - type: Rolling - template: - metadata: - labels: - app: gwells${NAME_SUFFIX} - name: pgtileserv${NAME_SUFFIX} - appver: gwells${NAME_SUFFIX} - spec: - volumes: - containers: - - name: nginx - image: docker-registry.default.svc:5000/moe-gwells-tools/nginx-tilecache - imagePullPolicy: IfNotPresent - readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - httpGet: - path: /health - port: 8080 - timeoutSeconds: 1 - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - httpGet: - path: /health - port: 8080 - timeoutSeconds: 1 - ports: - - containerPort: 8080 - protocol: TCP - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 200m - memory: 256Mi - - capabilities: {} - env: - - name: DATABASE_HOST - value: ${DATABASE_SERVICE_NAME} - - name: PG_DATABASE - valueFrom: - secretKeyRef: - key: database-name - name: ${DATABASE_SERVICE_NAME} - - name: PG_PASSWORD - valueFrom: - secretKeyRef: - key: database-password - name: ${DATABASE_SERVICE_NAME} - - name: PG_USER - value: ftw_reader - - name: DATABASE_URL - value: postgres://$(PG_USER):$(PG_PASSWORD)@$(DATABASE_HOST)/gwells - image: docker-registry.default.svc:5000/moe-gwells-tools/pg_tileserv:${IMAGE_TAG} - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 5 - periodSeconds: 30 - successThreshold: 1 - tcpSocket: - port: 7800 - timeoutSeconds: 5 - name: pgtileserv - ports: - - containerPort: 7800 - protocol: TCP - readinessProbe: - httpGet: - path: / - port: 7800 - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 5 - resources: - requests: - cpu: ${REQUEST_CPU} - memory: ${REQUEST_MEMORY} - limits: - cpu: ${LIMIT_CPU} - memory: ${LIMIT_MEMORY} - securityContext: - capabilities: {} - privileged: false - terminationMessagePath: /dev/termination-log - dnsPolicy: ClusterFirst - restartPolicy: Always - triggers: - - type: ConfigChange - status: {} diff --git a/gwells/openshift/postgresql.dc.yml b/gwells/openshift/postgresql.dc.yml deleted file mode 100644 index f8c90dcdf..000000000 --- a/gwells/openshift/postgresql.dc.yml +++ /dev/null @@ -1,299 +0,0 @@ -apiVersion: v1 -kind: Template -labels: - template: postgresql-persistent-template -metadata: - creationTimestamp: null - name: gwells-postgresql-dc -parameters: - - description: The OpenShift ImageStream name. - displayName: IMAGE_STREAM_NAME - name: IMAGE_STREAM_NAME - value: postgresql-9.6-oracle-fdw - - description: The OpenShift Namespace where the ImageStream resides. - displayName: Namespace - name: IMAGE_STREAM_NAMESPACE - value: bcgov - - description: The name of the OpenShift Service exposed for the database. - displayName: Database Service Name - name: DATABASE_SERVICE_NAME - required: true - value: postgresql - - description: Username for PostgreSQL user that will be used for accessing the database. - displayName: PostgreSQL Connection Username - from: user[A-Z0-9]{3} - generate: expression - name: POSTGRESQL_USER - required: true - - description: Password for the PostgreSQL connection user. - displayName: PostgreSQL Connection Password - from: "[a-zA-Z0-9]{16}" - generate: expression - name: POSTGRESQL_PASSWORD - required: true - - description: Name of the PostgreSQL database accessed. - displayName: PostgreSQL Database Name - name: POSTGRESQL_DATABASE - required: true - value: sampledb - - description: Volume space available for data, e.g. 512Mi, 2Gi. - displayName: Volume Capacity - name: VOLUME_CAPACITY - required: true - value: 1Gi - - description: Request for CPU resources measured in cpu units, e.g. 200m - displayName: CPU resource request - name: REQUEST_CPU - required: false - value: 100m - - description: Request for memory resources measured in bytes, e.g. 512Mi, 1Gi. - displayName: Memory resource request - name: REQUEST_MEMORY - required: false - value: 512Mi - - description: Limit for CPU resources measured in cpu units, e.g. 200m - displayName: CPU resource limit - name: LIMIT_CPU - required: false - value: 200m - - description: Limit for memory resources measured in bytes, e.g. 512Mi, 1Gi. - displayName: Memory resource limit - name: LIMIT_MEMORY - required: false - value: 1Gi - - description: Version of PostgreSQL image to be used (9.5 or 9.6). - displayName: Version of PostgreSQL Image - name: IMAGE_STREAM_VERSION - required: true - value: v1-stable - - name: NAME_SUFFIX - required: true - - description: Storage class for PVCs. - displayName: Storage class for PVCs. - name: STORAGE_CLASS - value: gluster-file-db -objects: - - apiVersion: v1 - data: - setup.sql: |- - SET application_name="container_setup"; - - create extension postgis; - create extension postgis_topology; - create extension fuzzystrmatch; - create extension postgis_tiger_geocoder; - create extension pg_stat_statements; - create extension pgaudit; - create extension plr; - create extension "uuid-ossp"; - - alter user postgres password 'PG_ROOT_PASSWORD'; -- these values are automatically replaced by secrets - - create user "PG_PRIMARY_USER" with REPLICATION PASSWORD 'PG_PRIMARY_PASSWORD'; - create user "PG_USER" with password 'PG_PASSWORD'; - create user ftw_reader with password 'PG_PASSWORD'; - - create table primarytable (key varchar(20), value varchar(20)); - grant all on primarytable to "PG_PRIMARY_USER"; - - create database "PG_DATABASE"; - - grant all privileges on database "PG_DATABASE" to "PG_USER"; - - - \c "PG_DATABASE" - - create extension postgis; - create extension postgis_topology; - create extension fuzzystrmatch; - create extension postgis_tiger_geocoder; - create extension pg_stat_statements; - create extension pgaudit; - create extension plr; - create extension "uuid-ossp"; - - \c "PG_DATABASE" "PG_USER"; - - - - -- read only account for generating tiles - - create schema postgis_ftw; - grant usage on schema postgis_ftw to ftw_reader; - ALTER DEFAULT PRIVILEGES IN SCHEMA postgis_ftw GRANT SELECT ON TABLES TO ftw_reader; - - kind: ConfigMap - metadata: - labels: - app: gwells${NAME_SUFFIX} - name: ${DATABASE_SERVICE_NAME}-setupcfg - appver: gwells${NAME_SUFFIX} - name: ${DATABASE_SERVICE_NAME}-setupcfg - - apiVersion: v1 - kind: Secret - metadata: - annotations: - template.openshift.io/expose-database_name: "{.data['database-name']}" - template.openshift.io/expose-password: "{.data['database-password']}" - template.openshift.io/expose-username: "{.data['database-user']}" - as-copy-of: gwells-database-secrets - name: ${DATABASE_SERVICE_NAME} - stringData: - database-name: ${POSTGRESQL_DATABASE} - database-password: ${POSTGRESQL_PASSWORD} - database-user: ${POSTGRESQL_USER} - fdw-database-user: proxy_wells_gwells - fdw-database-password: null - fdw-database-server: "//nrk1-scan.bcgov/envprod1.nrs.bcgov" - fdw-database-schema: WELLS - - apiVersion: v1 - kind: Service - metadata: - annotations: - template.openshift.io/expose-uri: postgres://{.spec.clusterIP}:{.spec.ports[?(.name=="postgresql")].port} - name: ${DATABASE_SERVICE_NAME} - spec: - ports: - - name: postgresql - nodePort: 0 - port: 5432 - protocol: TCP - targetPort: 5432 - selector: - name: ${DATABASE_SERVICE_NAME} - sessionAffinity: None - type: ClusterIP - status: - loadBalancer: {} - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: ${DATABASE_SERVICE_NAME} - annotations: - template.openshift.io.bcgov/create: "true" - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: ${VOLUME_CAPACITY} - storageClassName: ${STORAGE_CLASS} - - apiVersion: v1 - kind: DeploymentConfig - metadata: - annotations: - template.alpha.openshift.io/wait-for-ready: "true" - name: ${DATABASE_SERVICE_NAME} - spec: - replicas: 1 - selector: - name: ${DATABASE_SERVICE_NAME} - strategy: - type: Recreate - template: - metadata: - labels: - name: ${DATABASE_SERVICE_NAME} - appver: gwells${NAME_SUFFIX} - spec: - containers: - - capabilities: {} - env: - - name: PG_PRIMARY_PORT - value: "5432" - - name: CONTAINER_NAME - value: postgresql - - name: PG_PRIMARY_HOST - value: ${DATABASE_SERVICE_NAME} - - name: PGDATA_PATH_OVERRIDE - value: userdata - - name: PGHOST - value: /tmp - - name: PG_DATABASE - valueFrom: - secretKeyRef: - key: database-name - name: ${DATABASE_SERVICE_NAME} - - name: PG_PASSWORD - valueFrom: - secretKeyRef: - key: database-password - name: ${DATABASE_SERVICE_NAME} - - name: PG_USER - valueFrom: - secretKeyRef: - key: database-user - name: ${DATABASE_SERVICE_NAME} - envFrom: - - secretRef: - name: crunchy-db-credentials - image: " " - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - tcpSocket: - port: 5432 - timeoutSeconds: 1 - name: postgresql - ports: - - containerPort: 5432 - protocol: TCP - readinessProbe: - exec: - command: - - /usr/bin/env - - bash - - '-c' - - >- - psql -q -d - $PG_DATABASE -c 'SELECT 1' - failureThreshold: 5 - initialDelaySeconds: 30 - periodSeconds: 15 - successThreshold: 1 - timeoutSeconds: 1 - resources: - requests: - cpu: ${REQUEST_CPU} - memory: ${REQUEST_MEMORY} - limits: - cpu: ${LIMIT_CPU} - memory: ${LIMIT_MEMORY} - securityContext: - capabilities: {} - privileged: false - terminationMessagePath: /dev/termination-log - volumeMounts: - - mountPath: /pgdata - name: ${DATABASE_SERVICE_NAME}-data - - mountPath: /var/run/postgresql - name: ${DATABASE_SERVICE_NAME}-run - - mountPath: /pgconf - name: ${DATABASE_SERVICE_NAME}-setupcfg - dnsPolicy: ClusterFirst - restartPolicy: Always - volumes: - - name: ${DATABASE_SERVICE_NAME}-data - persistentVolumeClaim: - claimName: ${DATABASE_SERVICE_NAME} - - name: ${DATABASE_SERVICE_NAME}-run - emptyDir: {} - - configMap: - name: ${DATABASE_SERVICE_NAME}-setupcfg - name: ${DATABASE_SERVICE_NAME}-setupcfg - triggers: - - imageChangeParams: - automatic: true - containerNames: - - postgresql - from: - kind: ImageStreamTag - name: ${IMAGE_STREAM_NAME}:${IMAGE_STREAM_VERSION} - namespace: ${IMAGE_STREAM_NAMESPACE} - lastTriggeredImage: "" - type: ImageChange - - type: ConfigChange - status: {} diff --git a/gwells/openshift/psql-backups.cj.json b/gwells/openshift/psql-backups.cj.json deleted file mode 100644 index 394913cfa..000000000 --- a/gwells/openshift/psql-backups.cj.json +++ /dev/null @@ -1,118 +0,0 @@ -{ - "kind": "List", - "apiVersion": "v1", - "metadata": {}, - "items": [ - { - "kind": "CronJob", - "apiVersion": "batch/v1beta1", - "metadata": { - "name": "gwells-pgsql-backup-production", - "creationTimestamp": null, - "labels": { - "run": "gwells-pgsql-backup-production" - } - }, - "spec": { - "schedule": "45 10 * * *", - "concurrencyPolicy": "Replace", - "suspend": false, - "jobTemplate": { - "metadata": { - "creationTimestamp": null - }, - "spec": { - "activeDeadlineSeconds": 300, - "template": { - "metadata": { - "creationTimestamp": null, - "labels": { - "run": "gwells-pgsql-backup-production" - } - }, - "spec": { - "volumes": [ - { - "name": "backup", - "persistentVolumeClaim": { - "claimName": "gwells-pgsql-production-backup" - } - } - ], - "containers": [ - { - "name": "gwells-pgsql-backup-production", - "image": "docker-registry.default.svc:5000/openshift/postgresql:9.5", - "command": [ - "bash", - "-c", - " - psql --version; \ - mkdir -p /pgsql-backup/gwells-pgsql-production; \ - echo Dump Command: pg_dump -U ${PGUSER} -Fc -f /pgsql-backup/gwells-pgsql-production/$(date +%Y-%m-%d-%H%M).dump --no-privileges --no-tablespaces --schema=public ${PGDATABASE}; \ - pg_dump -U ${PGUSER} -Fc -f /pgsql-backup/gwells-pgsql-production/$(date +%Y-%m-%d-%H%M).dump --no-privileges --no-tablespaces --schema=public ${PGDATABASE} && \ - ls /pgsql-backup/gwells-pgsql-production/*.dump -1pr | tail -n +11 | xargs -r rm --; \ - ls -lh /pgsql-backup/gwells-pgsql-production/ \ - " - ], - "env": [ - { - "name": "PGHOST", - "value": "gwells-pg12-production" - }, - { - "name": "PGDATABASE", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pgsql-production", - "key": "database-name" - } - } - }, - { - "name": "PGUSER", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pgsql-production", - "key": "database-user" - } - } - }, - { - "name": "PGPASSWORD", - "valueFrom": { - "secretKeyRef": { - "name": "gwells-pgsql-production", - "key": "database-password" - } - } - } - ], - "resources": {}, - "volumeMounts": [ - { - "name": "backup", - "mountPath": "/pgsql-backup" - } - ], - "terminationMessagePath": "/dev/termination-log", - "terminationMessagePolicy": "File", - "imagePullPolicy": "Always" - } - ], - "restartPolicy": "Never", - "terminationGracePeriodSeconds": 30, - "activeDeadlineSeconds": 300, - "dnsPolicy": "ClusterFirst", - "securityContext": {}, - "schedulerName": "default-scheduler" - } - } - } - }, - "successfulJobsHistoryLimit": 3, - "failedJobsHistoryLimit": 3 - } - } - ] -} diff --git a/gwells/openshift/scripts/db-drops.sql b/gwells/openshift/scripts/db-drops.sql deleted file mode 100644 index ff0153213..000000000 --- a/gwells/openshift/scripts/db-drops.sql +++ /dev/null @@ -1,104 +0,0 @@ -drop table if exists activity_submission cascade; -drop table if exists activity_submission_water_quality cascade; -drop table if exists activity_submission_drilling_methods cascade; -drop table if exists activity_submission_development_methods cascade; -drop table if exists aquifer cascade; -drop table if exists aquifer_demand_code cascade; -drop table if exists aquifer_material_code cascade; -drop table if exists aquifer_productivity_code cascade; -drop table if exists aquifer_subtype_code cascade; -drop table if exists aquifer_vulnerability_code cascade; -drop table if exists aquifer_well cascade; -drop table if exists auth_group cascade; -drop table if exists auth_group_permissions cascade; -drop table if exists auth_permission cascade; -drop table if exists auth_user cascade; -drop table if exists auth_user_groups cascade; -drop table if exists auth_user_user_permissions cascade; -drop table if exists bcgs_number cascade; -drop table if exists bedrock_material_code cascade; -drop table if exists bedrock_material_descriptor_code cascade; -drop table if exists casing cascade; -drop table if exists casing_code cascade; -drop table if exists casing_material_code cascade; -drop table if exists decommission_method_code cascade; -drop table if exists development_method_code cascade; -drop table if exists django_admin_log cascade; -drop table if exists django_content_type cascade; -drop table if exists django_migrations cascade; -drop table if exists django_session cascade; -drop table if exists drilling_company cascade; -drop table if exists drilling_method_code cascade; -drop table if exists filter_pack_material_code cascade; -drop table if exists filter_pack_material_size_code cascade; -drop table if exists ground_elevation_method_code cascade; -drop table if exists gwells_survey cascade; -drop table if exists hydraulic_property cascade; -drop table if exists intended_water_use_code cascade; -drop table if exists land_district_code cascade; -drop table if exists licenced_status_code cascade; -drop table if exists liner_material_code cascade; -drop table if exists liner_perforation cascade; -drop table if exists lithology_colour_code cascade; -drop table if exists lithology_description cascade; -drop table if exists lithology_description_code cascade; -drop table if exists lithology_hardness_code cascade; -drop table if exists lithology_material_code cascade; -drop table if exists lithology_moisture_code cascade; -drop table if exists lithology_structure_code cascade; -drop table if exists ltsa_owner cascade; -drop table if exists obs_well_status_code cascade; -drop table if exists online_survey cascade; -drop table if exists perforation cascade; -drop table if exists production_data cascade; -drop table if exists profile cascade; -drop table if exists province_state_code cascade; -drop table if exists quality_concern_code cascade; -drop table if exists registries_accredited_certificate_code cascade; -drop table if exists registries_activity_code cascade; -drop table if exists registries_application cascade; -drop table if exists registries_application_status_code cascade; -drop table if exists registries_certifying_authority_code cascade; -drop table if exists registries_organization cascade; -drop table if exists registries_organization_note cascade; -drop table if exists registries_person cascade; -drop table if exists registries_person_note cascade; -drop table if exists registries_proof_of_age_code cascade; -drop table if exists registries_register cascade; -drop table if exists registries_register_note cascade; -drop table if exists registries_removal_reason_code cascade; -drop table if exists registries_subactivity_code cascade; -drop table if exists registries_well_class_code cascade; -drop table if exists registries_well_qualification cascade; -drop table if exists reversion_revision cascade; -drop table if exists reversion_version cascade; -drop table if exists screen cascade; -drop table if exists screen_assembly_type_code cascade; -drop table if exists screen_bottom_code cascade; -drop table if exists screen_intake_method_code cascade; -drop table if exists screen_material_code cascade; -drop table if exists screen_opening_code cascade; -drop table if exists screen_type_code cascade; -drop table if exists surface_seal_material_code cascade; -drop table if exists surface_seal_method_code cascade; -drop table if exists surficial_material_code cascade; -drop table if exists water_quality_characteristic cascade; -drop table if exists water_quality_colour_code cascade; -drop table if exists water_use_code cascade; -drop table if exists well cascade; -drop table if exists well_activity_code cascade; -drop table if exists well_class_code cascade; -drop table if exists well_status_code cascade; -drop table if exists well_subclass_code cascade; -drop table if exists well_water_quality cascade; -drop table if exists well_yield_unit_code cascade; -drop table if exists well_drilling_methods cascade; -drop table if exists well_development_methods cascade; -drop table if exists wells_decommissiondescription cascade; -drop table if exists wells_decommissionmaterialcode cascade; -drop table if exists well_publication_status_code cascade; -drop table if exists xform_aquifers cascade; -drop table if exists yield_estimation_method_code cascade; -drop table if exists coordinate_acquisition_code cascade; - -drop sequence if exists aquifer_aquifer_id_seq; \ No newline at end of file diff --git a/gwells/openshift/scripts/oc-dump.sh b/gwells/openshift/scripts/oc-dump.sh deleted file mode 100755 index 734ee508b..000000000 --- a/gwells/openshift/scripts/oc-dump.sh +++ /dev/null @@ -1,117 +0,0 @@ -#!/bin/sh -# -# Dumps from a GWells database and stores locally. Project namespace required. -# -# Based on TFRS' process: -# https://github.com/bcgov/gwells/tree/developer/app/database/xfer-registries-to-openshift -# -# NOTE: You need to be logged in with a token, via: -# https://console.pathfinder.gov.bc.ca:8443/oauth/token/request -# -# EXAMPLE: ./oc-dump.sh moe-gwells-test/gwells-pgsql-staging - - -# Halt conditions, verbosity and field separator -# -set -euo pipefail -[ "${VERBOSE:-x}" != true ]|| set -x -IFS=$'\n\t' - - -# Parameters -# -TARGET=${1:-""} -PROJECT=$( echo ${TARGET} | cut -d "/" -f 1 ) -DC_NAME=$( echo ${TARGET} | cut -d "/" -f 2 ) -SAVE_TO=${2:-./${DC_NAME}-$( date +%Y-%m-%d-%H%M )} - - -# APP and mode variables -# -APP_NAME=${APP_NAME:-gwells} -KEEP_APP_ONLINE=${KEEP_APP_ONLINE:-true} - - -# Show message if passed any params -# -if [ "${#}" -eq 0 ]||[ "${#}" -gt 2 ]||[ "${PROJECT}" == "${DC_NAME}" ] -then - echo - echo "Dumps from a GWells database to store locally" - echo - echo "Provide a target name." - echo " './oc-dump.sh / '" - echo - exit -fi - - -# Check login -# -if ! oc whoami -then - echo - echo "Please obtain an OpenShift API token. A window will open shortly." - sleep 5 - open https://console.pathfinder.gov.bc.ca:8443/oauth/token/request - exit -fi - - -# Check project availability -# -CHECK=$( oc projects | tr -d '*' | grep -v "Using project" | grep "${PROJECT}" | awk '{ print $1 }' || echo ) -if [ "${PROJECT}" != "${CHECK}" ] -then - echo - echo "Unable to access project ${PROJECT}" - echo - exit -fi - - -# Put GWells into maintenance mode and scale down (deployment config) -# -if [ "${KEEP_APP_ONLINE}" != "true" ] -then - APPLICATION_NAME=${APP_NAME} ../maintenance/maintenance.sh ${PROJECT} on - oc scale -n ${PROJECT} --replicas=0 deploymentconfig ${APP_NAME} -fi - - -# Make sure $SAVE_TO ends in .gz -# -[ "$( echo ${SAVE_TO} | tail -c4 )" == ".dump" ]|| SAVE_TO="${SAVE_TO}.dump" - - -# Identify database and take a backup -# -POD_DB=$( oc get pods -n ${PROJECT} -o name | grep -Eo "${DC_NAME}-[[:digit:]]+-[[:alnum:]]+" ) -SAVE_FILE=$( basename ${SAVE_TO} ) -SAVE_PATH=$( dirname ${SAVE_TO} ) -mkdir -p ${SAVE_PATH} -oc exec ${POD_DB} -n ${PROJECT} -- /bin/bash -c '\ - pg_dump -U ${POSTGRESQL_USER} -d ${POSTGRESQL_DATABASE} -Fc -f /tmp/'${SAVE_FILE}' \ - --no-privileges --no-tablespaces --schema=public --exclude-table=spatial_ref_sys \ -' - -oc rsync ${POD_DB}:/tmp/${SAVE_FILE} ${SAVE_PATH} -n ${PROJECT} --progress=true --no-perms=true -oc exec ${POD_DB} -n ${PROJECT} -- /bin/bash -c 'rm /tmp/'${SAVE_FILE} - - -# Take GWells out of maintenance mode and scale back up (deployment config) -# -if [ "${KEEP_APP_ONLINE}" != "true" ] -then - oc scale -n ${PROJECT} --replicas=1 deploymentconfig ${APP_NAME} - sleep 30 - APPLICATION_NAME=${APP_NAME} ../maintenance/maintenance.sh ${PROJECT} off -fi - - -# Summarize -# -echo -echo "Size: $( du -h ${SAVE_TO} | awk '{ print $1 }' )" -echo "Name: ${SAVE_TO}" -echo diff --git a/gwells/openshift/scripts/oc-restore.sh b/gwells/openshift/scripts/oc-restore.sh deleted file mode 100755 index 1399c916a..000000000 --- a/gwells/openshift/scripts/oc-restore.sh +++ /dev/null @@ -1,133 +0,0 @@ -#!/bin/sh -# -# Dumps from a GWells database and stores locally. Project namespace required. -# -# Based on TFRS' process: -# https://github.com/bcgov/gwells/tree/developer/database/xfer-registries-to-openshift -# -# NOTE: You need to be logged in with a token, via: -# https://console.pathfinder.gov.bc.ca:8443/oauth/token/request -# -# EXAMPLE: ./oc-restore.sh moe-gwells-prod/gwells-pgsql-production gwells-pgsql-production-2018-12-20-1542.dump - - -# Halt conditions, verbosity and field separator -# -set -euo pipefail -[ "${VERBOSE:-x}" != true ]|| set -x -IFS=$'\n\t' - - -# Parameters -# -TARGET=${1:-} -RESTORE=${2:-} -PROJECT=$( echo ${TARGET} | cut -d "/" -f 1 ) -DC_NAME=$( echo ${TARGET} | cut -d "/" -f 2 ) - - -# APP and mode variables -# -APP_NAME=${APP_NAME:-gwells} -DB_NAME=${DB_NAME:-${APP_NAME}} -KEEP_APP_ONLINE=${KEEP_APP_ONLINE:-true} - - -# Show message if passed any params -# -if [ "${#}" -ne 2 ] -then - echo - echo "Restores a GWells database from a local file" - echo - echo "Provide a target name and backup file to restore." - echo " './oc-restore.sh / '" - echo - exit -fi - - -# Verify ${RESTORE} file -# -if [ ! -f "${RESTORE}" ] -then - echo - echo "Please verify ${RESTORE} exists and is non-empty. Exiting." - echo - exit -fi - - -# Check login -# -if ! oc whoami -then - echo - echo "Please obtain an OpenShift API token. A window will open shortly." - sleep 3 - open https://console.pathfinder.gov.bc.ca:8443/oauth/token/request - exit -fi - - -# Check project availability -# -CHECK=$( oc projects | tr -d '*' | grep -v "Using project" | grep "${PROJECT}" | awk '{ print $1 }' || echo ) -if [ "${PROJECT}" != "${CHECK}" ] -then - echo - echo "Unable to access project ${PROJECT}" - echo - exit -fi - - -# Put GWells into maintenance mode and scale down (deployment config) -# -if [ "${KEEP_APP_ONLINE}" != "true" ] -then - APPLICATION_NAME=${APP_NAME} ../maintenance/maintenance.sh ${PROJECT} on - oc scale -n ${PROJECT} --replicas=0 deploymentconfig ${APP_NAME} -fi - - -# Copy dump into pod -# -RESTORE_PATH=$( dirname ${RESTORE} ) -RESTORE_FILE=$( basename ${RESTORE} ) -POD_DB=$( oc get pods -n ${PROJECT} -o name | grep -Eo "${DC_NAME}-[[:digit:]]+-[[:alnum:]]+" ) -oc cp ${RESTORE} "${POD_DB}":/tmp/ -n ${PROJECT} - - -# Drop tables and functions from ./db-drops.sql -# -while read c -do - oc exec ${POD_DB} -n ${PROJECT} -- /bin/bash -c "psql -d ${DB_NAME} -U \${POSTGRESQL_USER} -c \"${c}\"" -done < db-drops.sql - - -# Restore database dump -# -oc exec ${POD_DB} -n ${PROJECT} -- /bin/bash -c "pg_restore -d ${DB_NAME} -U \${POSTGRESQL_USER} --no-owner /tmp/${RESTORE_FILE}" - - -# Take GWells out of maintenance mode and scale back up (deployment config) -# -if [ "${KEEP_APP_ONLINE}" != "true" ] -then - oc scale -n ${PROJECT} --replicas=1 deploymentconfig ${APP_NAME} - sleep 30 - APPLICATION_NAME=${APP_NAME} ../maintenance/maintenance.sh ${PROJECT} off -fi - - -# Summarize -# -echo -echo "Name: ${DC_NAME}" -echo "Proj: ${PROJECT}" -echo "DB: ${DB_NAME}" -echo "Size: $( du -h ${RESTORE} | awk '{ print $1 }' )" -echo "File: ${RESTORE}" -echo diff --git a/gwells/openshift/sonar-runner/README.md b/gwells/openshift/sonar-runner/README.md deleted file mode 100644 index c54bc2c74..000000000 --- a/gwells/openshift/sonar-runner/README.md +++ /dev/null @@ -1,59 +0,0 @@ -This example demonstrates how to analyze a simple Java project with Gradle. - -Prerequisites -============= -* [SonarQube](http://www.sonarqube.org/downloads/) 6.7+ -* [Gradle](http://www.gradle.org/) 2.1 or higher - -Usage -===== -* Analyze the project with SonarQube using Gradle: - - ./gradlew sonarqube [-Dsonar.host.url=... -Dsonar.jdbc.url=... -Dsonar.jdbc.username=... -Dsonar.jdbc.password=...] - -Local Install -============= -To install SonarQube locally do the following: -* Download the version for your OS from [SonarQube](http://www.sonarqube.org/downloads/) -* Install locally following the directions -* Run server: http://localhost:9000 -* Review your build.gradle, you need to add the following property: ```property "sonar.host.url", "http://localhost:9000"``` -* run ./gradlew sonarqube from this directory -* Go to web browser and review result - -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/gate?key=org.sonarqube:bcgov-gwells&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) - -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=lines&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) - -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=ncloc&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) - -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=comment_lines_density&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=public_documented_api_density&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=function_complexity&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=test_errors&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=test_failures&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=skipped_tests&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=test_success_density&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=coverage&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=new_coverage&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=it_coverage&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=new_it_coverage&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=overall_coverage&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=new_overall_coverage&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=duplicated_lines_density&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=new_duplicated_lines_density&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=blocker_violations&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=critical_violations&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=new_blocker_violations&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=new_critical_violations&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=code_smells&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=new_code_smells&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=bugs&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=new_bugs&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=vulnerabilities&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=new_vulnerabilities&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=sqale_debt_ratio&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=new_sqale_debt_ratio&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=new_maintainability_rating&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=new_reliability_rating&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) -[![Quality Gate](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/api/badges/measure?key=org.sonarqube:bcgov-gwells&metric=new_security_rating&template=FLAT)](https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca/dashboard/index/org.sonarqube:bcgov-gwells) diff --git a/gwells/openshift/sonar-runner/build.gradle b/gwells/openshift/sonar-runner/build.gradle deleted file mode 100644 index c8670bd65..000000000 --- a/gwells/openshift/sonar-runner/build.gradle +++ /dev/null @@ -1,51 +0,0 @@ -// Uses DSL plugins resolution introduced in Gradle 2.1 -plugins { - id "java" - id "jacoco" - id "org.sonarqube" version "2.6.1" -} - -sonarqube { - properties { - //property "sonar.host.url", "https://sonarqube-moe-gwells-tools.pathfinder.gov.bc.ca" - property "sonar.projectName", "GWells" - property "sonar.projectKey", "org.sonarqube:bcgov-gwells" - property "sonar.projectBaseDir", "../app/backend/" - property "sonar.sources", "./gwells,./registries,./frontend" - property "sonar.lanuage", "py" - //property "sonar.tests", "gwells/test*.py" - property "sonar.sourceEncoding", "UTF-8" - property "sonar.python.coveragePlugin","cobertura" - property "sonar.jacoco.reportPath", "${project.buildDir}/jacoco/test.exec" - } -} - -allprojects { - def env = System.getenv() - TimeZone.getTimeZone('UTC') - Date date= new Date() - String newdate=date.format("YYYYMMdd") - ext.baseVersion = newdate + "." + env['BUILD_NUMBER'] - ext.snapshotVersion = false - group = "org.sonarqube" - version = "$baseVersion" + (snapshotVersion ? "-SNAPSHOT" : "") -} - -test { - ignoreFailures = true -} - -dependencies { - testCompile 'junit:junit:4.12' -} - -repositories { - repositories { - maven { - url "http://repo1.maven.org/maven2/" - } - maven { - url "https://plugins.gradle.org/m2/" - } - } -} diff --git a/gwells/openshift/sonar-runner/gradle.properties b/gwells/openshift/sonar-runner/gradle.properties deleted file mode 100644 index 2e8d9ceb4..000000000 --- a/gwells/openshift/sonar-runner/gradle.properties +++ /dev/null @@ -1,2 +0,0 @@ -org.gradle.daemon=false -org.gradle.workers.max=4 diff --git a/gwells/openshift/sonar-runner/gradle/wrapper/gradle-wrapper.jar b/gwells/openshift/sonar-runner/gradle/wrapper/gradle-wrapper.jar deleted file mode 100644 index 6ffa23784..000000000 Binary files a/gwells/openshift/sonar-runner/gradle/wrapper/gradle-wrapper.jar and /dev/null differ diff --git a/gwells/openshift/sonar-runner/gradle/wrapper/gradle-wrapper.properties b/gwells/openshift/sonar-runner/gradle/wrapper/gradle-wrapper.properties deleted file mode 100644 index 525012a34..000000000 --- a/gwells/openshift/sonar-runner/gradle/wrapper/gradle-wrapper.properties +++ /dev/null @@ -1,6 +0,0 @@ -#Tue Sep 20 22:02:53 BST 2016 -distributionBase=GRADLE_USER_HOME -distributionPath=wrapper/dists -zipStoreBase=GRADLE_USER_HOME -zipStorePath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-4.2-bin.zip diff --git a/gwells/openshift/sonar-runner/gradlew b/gwells/openshift/sonar-runner/gradlew deleted file mode 100755 index 9aa616c27..000000000 --- a/gwells/openshift/sonar-runner/gradlew +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/env bash - -############################################################################## -## -## Gradle start up script for UN*X -## -############################################################################## - -# Attempt to set APP_HOME -# Resolve links: $0 may be a link -PRG="$0" -# Need this for relative symlinks. -while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi -done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null - -APP_NAME="Gradle" -APP_BASE_NAME=`basename "$0"` - -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS="" - -# Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD="maximum" - -warn ( ) { - echo "$*" -} - -die ( ) { - echo - echo "$*" - echo - exit 1 -} - -# OS specific support (must be 'true' or 'false'). -cygwin=false -msys=false -darwin=false -nonstop=false -case "`uname`" in - CYGWIN* ) - cygwin=true - ;; - Darwin* ) - darwin=true - ;; - MINGW* ) - msys=true - ;; - NONSTOP* ) - nonstop=true - ;; -esac - -CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar - -# Determine the Java command to use to start the JVM. -if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" - else - JAVACMD="$JAVA_HOME/bin/java" - fi - if [ ! -x "$JAVACMD" ] ; then - die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." - fi -else - JAVACMD="java" - which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." -fi - -# Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then - MAX_FD_LIMIT=`ulimit -H -n` - if [ $? -eq 0 ] ; then - if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then - MAX_FD="$MAX_FD_LIMIT" - fi - ulimit -n $MAX_FD - if [ $? -ne 0 ] ; then - warn "Could not set maximum file descriptor limit: $MAX_FD" - fi - else - warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" - fi -fi - -# For Darwin, add options to specify how the application appears in the dock -if $darwin; then - GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" -fi - -# For Cygwin, switch paths to Windows format before running java -if $cygwin ; then - APP_HOME=`cygpath --path --mixed "$APP_HOME"` - CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` - JAVACMD=`cygpath --unix "$JAVACMD"` - - # We build the pattern for arguments to be converted via cygpath - ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` - SEP="" - for dir in $ROOTDIRSRAW ; do - ROOTDIRS="$ROOTDIRS$SEP$dir" - SEP="|" - done - OURCYGPATTERN="(^($ROOTDIRS))" - # Add a user-defined pattern to the cygpath arguments - if [ "$GRADLE_CYGPATTERN" != "" ] ; then - OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" - fi - # Now convert the arguments - kludge to limit ourselves to /bin/sh - i=0 - for arg in "$@" ; do - CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` - CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option - - if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition - eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` - else - eval `echo args$i`="\"$arg\"" - fi - i=$((i+1)) - done - case $i in - (0) set -- ;; - (1) set -- "$args0" ;; - (2) set -- "$args0" "$args1" ;; - (3) set -- "$args0" "$args1" "$args2" ;; - (4) set -- "$args0" "$args1" "$args2" "$args3" ;; - (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; - (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; - (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; - (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; - (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; - esac -fi - -# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules -function splitJvmOpts() { - JVM_OPTS=("$@") -} -eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS -JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" - -# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong -if [[ "$(uname)" == "Darwin" ]] && [[ "$HOME" == "$PWD" ]]; then - cd "$(dirname "$0")" -fi - -exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" diff --git a/gwells/openshift/sonar-runner/gradlew.bat b/gwells/openshift/sonar-runner/gradlew.bat deleted file mode 100644 index f9553162f..000000000 --- a/gwells/openshift/sonar-runner/gradlew.bat +++ /dev/null @@ -1,84 +0,0 @@ -@if "%DEBUG%" == "" @echo off -@rem ########################################################################## -@rem -@rem Gradle startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME% - -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS= - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto init - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar - -@rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% - -:end -@rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd - -:fail -rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega diff --git a/gwells/pics/container_diagram.png b/gwells/pics/container_diagram.png deleted file mode 100644 index 43646f7a5..000000000 Binary files a/gwells/pics/container_diagram.png and /dev/null differ diff --git a/minio/openshift.deploy.old.yml b/minio/openshift.deploy.old.yml deleted file mode 100644 index f2807f266..000000000 --- a/minio/openshift.deploy.old.yml +++ /dev/null @@ -1,139 +0,0 @@ -kind: Template -apiVersion: template.openshift.io/v1 -metadata: - annotations: - description: "Minio deployment" - labels: - app: "${NAME}" - template: "${NAME}-template" - name: "${NAME}" -parameters: - - name: "NAME" - displayName: "App to deploy" - value: "minio" - - name: "SRC_NAMESPACE" - displayName: "Namespace containing the deployment source" - value: "moe-gwells-tools" - - name: "SRC_IMAGE" - displayName: "Name of source image" - value: "minio" - - name: "DEST_PVC_SIZE" - displayName: "PVC size" - value: "1Gi" - - name: "DEST_PVC_ACCESS" - displayName: "PVC access mode" - value: "ReadWriteOnce" - - name: "IMAGE_TAG" - required: true - - name: "MINIO_DATA_DIR" - default: /opt/minio/s3/data - - name: "NAME_SUFFIX" - required: false -objects: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - finalizers: - - kubernetes.io/pvc-protection - name: "gwells-${NAME_SUFFIX}-minio-config-vol-v2" - labels: - app: nr-gwells-${NAME_SUFFIX} - spec: - accessModes: - - ${DEST_PVC_ACCESS} - resources: - requests: - storage: ${DEST_PVC_SIZE} - storageClassName: netapp-file-standard - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - finalizers: - - kubernetes.io/pvc-protection - name: "gwells-${NAME_SUFFIX}-minio-data-vol" - labels: - app: nr-gwells-${NAME_SUFFIX} - spec: - accessModes: - - ${DEST_PVC_ACCESS} - resources: - requests: - storage: ${DEST_PVC_SIZE} - storageClassName: netapp-file-standard - - apiVersion: apps/v1 - kind: Deployment - metadata: - labels: - app: nr-gwells-${NAME_SUFFIX} - name: "gwells-${NAME_SUFFIX}-minio" - spec: - replicas: 1 - selector: - matchLabels: - deployment: "gwells-${NAME_SUFFIX}-minio" - strategy: - activeDeadlineSeconds: 3000 - recreateParams: - timeoutSeconds: 600 - type: Recreate - template: - metadata: - labels: - name: "gwells-${NAME_SUFFIX}-minio" - deployment: "gwells-${NAME_SUFFIX}-minio" - spec: - containers: - - env: - - name: MINIO_ACCESS_KEY - valueFrom: - secretKeyRef: - name: gwells-${NAME_SUFFIX}-minio - key: MINIO_ACCESS_KEY - - name: MINIO_SECRET_KEY - valueFrom: - secretKeyRef: - name: gwells-${NAME_SUFFIX}-minio - key: MINIO_SECRET_KEY - - name: MINIO_DATA_DIR - value: /opt/minio/s3/data - - name: MINIO_CONFIG_DIR - value: /opt/minio/s3/config - image: ghcr.io/bcgov/nr-gwells/minio:${IMAGE_TAG} - imagePullPolicy: Always - name: "gwells-${NAME_SUFFIX}-minio" - ports: - - containerPort: 9000 - protocol: TCP - resources: - limits: - cpu: 150m - requests: - cpu: 100m - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /opt/minio/s3/config - name: config-vol - - mountPath: /opt/minio/s3/data - name: data-vol - restartPolicy: Always - terminationGracePeriodSeconds: 30 - volumes: - - name: config-vol - persistentVolumeClaim: - claimName: "gwells-${NAME_SUFFIX}-minio-config-vol-v2" - - name: data-vol - persistentVolumeClaim: - claimName: "gwells-${NAME_SUFFIX}-minio-data-vol" - - kind: Service - apiVersion: v1 - metadata: - labels: - app: nr-gwells-${NAME_SUFFIX} - name: "gwells-${NAME_SUFFIX}-minio" - spec: - ports: - - name: 9000-tcp - port: 9000 - protocol: TCP - targetPort: 9000 diff --git a/minio/openshift.deploy.yml b/minio/openshift.deploy.yml index 6451e1205..610e5a2a9 100644 --- a/minio/openshift.deploy.yml +++ b/minio/openshift.deploy.yml @@ -1,48 +1,39 @@ -apiVersion: template.openshift.io/v1 kind: Template -metadata: - annotations: - description: "Minio deployment" - labels: - app: "gwells-${NAME_SUFFIX}" - template: "gwells-minio-template-${NAME_SUFFIX}" - name: "gwells-minio-${NAME_SUFFIX}" +apiVersion: template.openshift.io/v1 parameters: - - name: "NAME_SUFFIX" + - name: REPO + description: Repository name + value: nr-gwells + - name: COMPONENT + description: Component name + value: minio + - name: ZONE + description: Deployment zone, e.g. pr-### or prod required: true - - name: "SRC_NAMESPACE" - displayName: "Namespace containing the deployment source" - value: "26e83e-tools" - - name: "SRC_IMAGE" - displayName: "Name of source image" - value: "gwells-minio" - - name: "SRC_TAG" - displayName: "Tag of source image" - value: "latest" - - name: "DEST_PVC_SIZE" + - name: TAG + required: true + - name: DEST_PVC_SIZE displayName: "PVC size" value: "1Gi" - - name: "DEST_PVC_CLASS" + - name: DEST_PVC_CLASS displayName: "PVC class" value: "netapp-file-standard" - - name: "DEST_PVC_ACCESS" + - name: DEST_PVC_ACCESS displayName: "PVC access mode" value: "ReadWriteMany" - - name: "IMAGE_TAG" - required: true objects: - apiVersion: v1 kind: PersistentVolumeClaim metadata: finalizers: - - kubernetes.io/pvc-protection - name: "gwells-minio-${NAME_SUFFIX}" + - kubernetes.io/pvc-protection + name: ${REPO}-${ZONE}-${COMPONENT} labels: - app: "nr-gwells-${NAME_SUFFIX}" - name: gwells-minio-${NAME_SUFFIX} + app: ${REPO}-${ZONE} + name: ${REPO}-${ZONE}-${COMPONENT} spec: accessModes: - - ${DEST_PVC_ACCESS} + - ${DEST_PVC_ACCESS} resources: requests: storage: ${DEST_PVC_SIZE} @@ -51,14 +42,14 @@ objects: kind: Deployment metadata: labels: - app: "nr-gwells-${NAME_SUFFIX}" - name: "gwells-minio-${NAME_SUFFIX}" - name: "gwells-minio-${NAME_SUFFIX}" + app: ${REPO}-${ZONE} + name: ${REPO}-${ZONE}-${COMPONENT} + name: ${REPO}-${ZONE}-${COMPONENT} spec: replicas: 1 selector: matchLabels: - app: "nr-gwells-${NAME_SUFFIX}" + app: ${REPO}-${ZONE} strategy: activeDeadlineSeconds: 3000 recreateParams: @@ -67,25 +58,25 @@ objects: template: metadata: labels: - app: "nr-gwells-${NAME_SUFFIX}" - name: gwells-minio-${NAME_SUFFIX} - service: gwells-minio-${NAME_SUFFIX} + app: ${REPO}-${ZONE} + name: ${REPO}-${ZONE}-${COMPONENT} + service: ${REPO}-${ZONE}-${COMPONENT} spec: containers: - env: - name: MINIO_ACCESS_KEY valueFrom: secretKeyRef: - name: gwells-minio-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-${COMPONENT} key: MINIO_ACCESS_KEY - name: MINIO_SECRET_KEY valueFrom: secretKeyRef: - name: gwells-minio-${NAME_SUFFIX} + name: ${REPO}-${ZONE}-${COMPONENT} key: MINIO_SECRET_KEY - name: MINIO_DATA_DIR value: /opt/minio/s3/data - image: ghcr.io/bcgov/nr-gwells/minio:${IMAGE_TAG} + image: ghcr.io/bcgov/${REPO}/${COMPONENT}:${TAG} securityContext: capabilities: add: ["NET_BIND_SERVICE"] @@ -100,7 +91,7 @@ objects: # successThreshold: 1 # failureThreshold: 30 # imagePullPolicy: Always - name: "gwells-minio-${NAME_SUFFIX}" + name: ${REPO}-${ZONE}-${COMPONENT} command: - '/bin/sh' - '-c' @@ -124,19 +115,19 @@ objects: volumes: - name: minio-vol persistentVolumeClaim: - claimName: "gwells-minio-${NAME_SUFFIX}" + claimName: ${REPO}-${ZONE}-${COMPONENT} - apiVersion: v1 kind: Service metadata: labels: - app: nr-gwells-${NAME_SUFFIX} - name: gwells-minio-${NAME_SUFFIX} - name: gwells-minio-${NAME_SUFFIX} + app: ${REPO}-${ZONE} + name: ${REPO}-${ZONE}-${COMPONENT} + name: ${REPO}-${ZONE}-${COMPONENT} spec: selector: - app: nr-gwells-${NAME_SUFFIX} - name: gwells-minio-${NAME_SUFFIX} - service: gwells-minio-${NAME_SUFFIX} + app: ${REPO}-${ZONE} + name: ${REPO}-${ZONE}-${COMPONENT} + service: ${REPO}-${ZONE}-${COMPONENT} ports: - name: 9000-tcp port: 9000