diff --git a/api/.pipeline/config.js b/api/.pipeline/config.js index ac506c9645..dc1059f9b4 100644 --- a/api/.pipeline/config.js +++ b/api/.pipeline/config.js @@ -21,6 +21,8 @@ const deployChangeId = (isStaticDeployment && 'deploy') || changeId; const branch = (isStaticDeployment && options.branch) || null; const tag = (branch && `build-${version}-${changeId}-${branch}`) || `build-${version}-${changeId}`; +const prCronSchedule = '* * 31 2 *'; // Cronjob schedule that never runs (Feb 31st is an invalid date) + const staticUrlsAPI = config.staticUrlsAPI; const staticUrls = config.staticUrls; @@ -71,6 +73,8 @@ const phases = { dbName: `${dbName}`, phase: 'dev', changeId: deployChangeId, + telemetryCronjobSchedule: '0 0 * * *', // Daily at midnight + telemetryCronjobDisabled: !isStaticDeployment, suffix: `-dev-${deployChangeId}`, instance: `${name}-dev-${deployChangeId}`, version: `${deployChangeId}-${changeId}`, @@ -114,6 +118,8 @@ const phases = { dbName: `${dbName}`, phase: 'test', changeId: deployChangeId, + telemetryCronjobSchedule: '0 0 * * *', // Daily at midnight + telemetryCronjobDisabled: !isStaticDeployment, suffix: `-test`, instance: `${name}-test`, version: `${version}`, @@ -157,6 +163,8 @@ const phases = { dbName: `${dbName}-spi`, phase: 'test-spi', changeId: deployChangeId, + telemetryCronjobSchedule: '0 0 * * *', // Daily at midnight + telemetryCronjobDisabled: !isStaticDeployment, suffix: `-test-spi`, instance: `${name}-spi-test-spi`, version: `${version}`, @@ -200,6 +208,8 @@ const phases = { dbName: `${dbName}`, phase: 'prod', changeId: deployChangeId, + telemetryCronjobSchedule: '0 0 * * *', // Daily at midnight + telemetryCronjobDisabled: !isStaticDeployment, suffix: `-prod`, instance: `${name}-prod`, version: `${version}`, diff --git a/api/.pipeline/lib/api.deploy.js b/api/.pipeline/lib/api.deploy.js index 08a1266580..ff8eaa088a 100644 --- a/api/.pipeline/lib/api.deploy.js +++ b/api/.pipeline/lib/api.deploy.js @@ -25,12 +25,16 @@ const apiDeploy = async (settings) => { objects.push( ...oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/api.dc.yaml`, { param: { + NAMESPACE: phases[phase].namespace, NAME: phases[phase].name, SUFFIX: phases[phase].suffix, VERSION: phases[phase].tag, HOST: phases[phase].host, APP_HOST: phases[phase].appHost, CHANGE_ID: phases.build.changeId || changeId, + // Cronjobs + TELEMETRY_CRONJOB_SCHEDULE: phases[phase].telemetryCronjobSchedule, + TELEMETRY_CRONJOB_DISABLED: phases[phase].telemetryCronjobDisabled, // Node NODE_ENV: phases[phase].nodeEnv, NODE_OPTIONS: phases[phase].nodeOptions, diff --git a/api/.pipeline/templates/README.md b/api/.pipeline/templates/README.md index 5053b7343d..7f7f004327 100644 --- a/api/.pipeline/templates/README.md +++ b/api/.pipeline/templates/README.md @@ -10,3 +10,9 @@ The pipeline code builds and deploys all pods/images/storage/etc needed to deplo - Create ObjectStore Secret The included templates under `prereqs` can be imported via the "Import YAML" page in OpenShift. + +## Telemetry Cronjob + +How to manually trigger cronjob? + +- `oc create job --from=cronjob/biohubbc-telemetry-cronjob- ` diff --git a/api/.pipeline/templates/api.dc.yaml b/api/.pipeline/templates/api.dc.yaml index 82dd6713db..0b5047414a 100644 --- a/api/.pipeline/templates/api.dc.yaml +++ b/api/.pipeline/templates/api.dc.yaml @@ -5,6 +5,12 @@ metadata: labels: build: biohubbc-api parameters: + - name: NAMESPACE + description: Openshift namespace name + value: '' + - name: BASE_IMAGE_REGISTRY_URL + description: The base image registry URL + value: image-registry.openshift-image-registry.svc:5000 - name: NAME value: biohubbc-api - name: SUFFIX @@ -21,6 +27,9 @@ parameters: - name: CHANGE_ID description: Change id of the project. This will help to pull image stream required: true + - name: CRONJOB_SCHEDULE + description: The + required: true value: '0' - name: NODE_ENV description: Application Environment type variable @@ -76,6 +85,10 @@ parameters: - name: DB_SERVICE_NAME description: 'Database service name associated with deployment' required: true + - name: DB_PORT + description: 'Database port' + required: true + value: '5432' # Keycloak - name: KEYCLOAK_HOST description: Key clock login url @@ -195,6 +208,22 @@ parameters: value: '1' - name: REPLICAS_MAX value: '1' + # Telemetry + - name: TELEMETRY_CRONJOB_SCHEDULE + description: The schedule for the telemetry cronjob + value: '0 0 * * *' # 12am + - name: TELEMETRY_CRONJOB_DISABLED + description: Boolean flag to disable the cronjob, only static deployments should run on schedule. + value: 'true' + - name: TELEMETRY_SECRET + description: The name of the Openshift Biohubbc telemetry secret + value: biohubbc-telemetry + - name: LOTEK_API_HOST + description: The host URL for Lotek webservice API + value: https://webservice.lotek.com + - name: VECTRONIC_API_HOST + description: The host URL for Vectronic webservice API + value: https://api.vectronic-wildlife.com objects: - kind: ImageStream apiVersion: image.openshift.io/v1 @@ -316,7 +345,7 @@ objects: key: database-name name: ${DB_SERVICE_NAME} - name: DB_PORT - value: '5432' + value: ${DB_PORT} # Keycloak - name: KEYCLOAK_HOST value: ${KEYCLOAK_HOST} @@ -537,6 +566,97 @@ objects: status: ingress: null + - kind: CronJob + apiVersion: batch/v1 + metadata: + name: biohubbc-telemetry-cronjob${SUFFIX} + labels: + role: telemetry-cronjob + spec: + schedule: ${TELEMETRY_CRONJOB_SCHEDULE} + suspend: ${{TELEMETRY_CRONJOB_DISABLED}} + concurrencyPolicy: 'Forbid' + successfulJobsHistoryLimit: 1 + failedJobsHistoryLimit: 1 + jobTemplate: + spec: + backoffLimit: 0 + template: + spec: + containers: + - name: api + image: ${BASE_IMAGE_REGISTRY_URL}/${NAMESPACE}/${NAME}:${VERSION} + imagePullPolicy: Always + restartPolicy: 'Never' + terminationGracePeriodSeconds: 30 + activeDeadlineSeconds: 220 + env: + - name: NODE_ENV + value: ${NODE_ENV} + - name: NODE_OPTIONS + value: ${NODE_OPTIONS} + # Database + - name: TZ + value: ${TZ} + - name: DB_HOST + value: ${DB_SERVICE_NAME} + - name: DB_USER_API + valueFrom: + secretKeyRef: + key: database-user-api + name: ${DB_SERVICE_NAME} + - name: DB_USER_API_PASS + valueFrom: + secretKeyRef: + key: database-user-api-password + name: ${DB_SERVICE_NAME} + - name: DB_DATABASE + valueFrom: + secretKeyRef: + key: database-name + name: ${DB_SERVICE_NAME} + - name: DB_PORT + value: ${DB_PORT} + # Telemetry + - name: LOTEK_API_HOST + value: ${LOTEK_API_HOST} + - name: LOTEK_ACCOUNT_USERNAME + valueFrom: + secretKeyRef: + key: lotek_account_username + name: ${TELEMETRY_SECRET} + - name: LOTEK_ACCOUNT_PASSWORD + valueFrom: + secretKeyRef: + key: lotek_account_password + name: ${TELEMETRY_SECRET} + - name: VECTRONIC_API_HOST + value: ${VECTRONIC_API_HOST} + # Logging + - name: LOG_LEVEL + value: ${LOG_LEVEL} + - name: LOG_LEVEL_FILE + value: data/cronjob-logs + - name: LOG_FILE_DIR + value: ${LOG_FILE_DIR} + - name: LOG_FILE_NAME + value: sims-telemetry-cronjob-%DATE%.log + - name: LOG_FILE_DATE_PATTERN + value: ${LOG_FILE_DATE_PATTERN} + - name: LOG_FILE_MAX_SIZE + value: ${LOG_FILE_MAX_SIZE} + - name: LOG_FILE_MAX_FILES + value: ${LOG_FILE_MAX_FILES} + # Api Validation + - name: API_RESPONSE_VALIDATION_ENABLED + value: ${API_RESPONSE_VALIDATION_ENABLED} + - name: DATABASE_RESPONSE_VALIDATION_ENABLED + value: ${DATABASE_RESPONSE_VALIDATION_ENABLED} + command: ["npm", "run"] + #command: ["npm", "run", "telemetry-cronjob", "--", "--batchSize 1000", "--concurrently 100"] + restartPolicy: Never + + # Disable the HPA for now, as it is preferrable to run an exact number of pods (e.g. min:2, max:2) # - kind: HorizontalPodAutoscaler # apiVersion: autoscaling/v2 diff --git a/api/.pipeline/templates/prereqs/biohubbc-telemetry.yaml b/api/.pipeline/templates/prereqs/biohubbc-telemetry.yaml new file mode 100644 index 0000000000..85e80f199a --- /dev/null +++ b/api/.pipeline/templates/prereqs/biohubbc-telemetry.yaml @@ -0,0 +1,8 @@ +kind: Secret +apiVersion: v1 +metadata: + name: biohubbc-telemetry +data: + lotek_acount_username: + lotek_acount_password: +type: Opaque