diff --git a/.github/workflows/chart-lint-publish.yml b/.github/workflows/chart-lint-publish.yml
new file mode 100644
index 0000000000..1b51dddc74
--- /dev/null
+++ b/.github/workflows/chart-lint-publish.yml
@@ -0,0 +1,62 @@
+name: Validate / Publish helm charts
+
+on:
+ release:
+ types: [published]
+ pull_request:
+ types: [opened, reopened, synchronize]
+ paths:
+ - 'helm/**'
+ workflow_dispatch:
+ inputs:
+ IGNORE_CHARTS:
+ description: 'Provide list of charts to be ignored separated by pipe(|)'
+ required: false
+ default: '""'
+ type: string
+ CHART_PUBLISH:
+ description: 'Chart publishing to gh-pages branch'
+ required: false
+ default: 'NO'
+ type: string
+ options:
+ - YES
+ - NO
+ INCLUDE_ALL_CHARTS:
+ description: 'Include all charts for Linting/Publishing (YES/NO)'
+ required: false
+ default: 'NO'
+ type: string
+ options:
+ - YES
+ - NO
+ push:
+ branches:
+ - '!release-branch'
+ - '!master'
+ - 1.*
+ - 0.*
+ - develop
+ - release*
+ paths:
+ - 'helm/**'
+
+jobs:
+ chart-lint-publish:
+ uses: mosip/kattu/.github/workflows/chart-lint-publish.yml@master
+ with:
+ CHARTS_DIR: ./helm
+ CHARTS_URL: https://mosip.github.io/mosip-helm
+ REPOSITORY: mosip-helm
+ BRANCH: gh-pages
+ INCLUDE_ALL_CHARTS: "${{ inputs.INCLUDE_ALL_CHARTS || 'NO' }}"
+ IGNORE_CHARTS: "${{ inputs.IGNORE_CHARTS || '\"\"' }}"
+ CHART_PUBLISH: "${{ inputs.CHART_PUBLISH || 'YES' }}"
+ LINTING_CHART_SCHEMA_YAML_URL: "https://raw.githubusercontent.com/mosip/kattu/master/.github/helm-lint-configs/chart-schema.yaml"
+ LINTING_LINTCONF_YAML_URL: "https://raw.githubusercontent.com/mosip/kattu/master/.github/helm-lint-configs/lintconf.yaml"
+ LINTING_CHART_TESTING_CONFIG_YAML_URL: "https://raw.githubusercontent.com/mosip/kattu/master/.github/helm-lint-configs/chart-testing-config.yaml"
+ LINTING_HEALTH_CHECK_SCHEMA_YAML_URL: "https://raw.githubusercontent.com/mosip/kattu/master/.github/helm-lint-configs/health-check-schema.yaml"
+ DEPENDENCIES: "mosip,https://mosip.github.io/mosip-helm;"
+ secrets:
+ TOKEN: ${{ secrets.ACTION_PAT }}
+ SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK }}
\ No newline at end of file
diff --git a/deploy/apitestrig/README.md b/deploy/apitestrig/README.md
new file mode 100644
index 0000000000..ef7619b8ae
--- /dev/null
+++ b/deploy/apitestrig/README.md
@@ -0,0 +1,44 @@
+# APITESTRIG
+
+## Introduction
+ApiTestRig will test the working of APIs of the MOSIP modules.
+
+## Install
+* Review `values.yaml` and, Make sure to enable required modules for apitestrig operation.
+* Install
+```sh
+./install.sh
+```
+* During the execution of the `install.sh` script, a prompt appears requesting information regarding the presence of a public domain and a valid SSL certificate on the server.
+* If the server lacks a public domain and a valid SSL certificate, it is advisable to select the `n` option. Opting it will enable the `init-container` with an `emptyDir` volume and include it in the deployment process.
+* The init-container will proceed to download the server's self-signed SSL certificate and mount it to the specified location within the container's Java keystore (i.e., `cacerts`) file.
+* This particular functionality caters to scenarios where the script needs to be employed on a server utilizing self-signed SSL certificates.
+
+## Uninstall
+* To uninstall ApiTestRig, run `delete.sh` script.
+```sh
+./delete.sh
+```
+
+## Run apitestrig manually
+
+#### Rancher UI
+* Run apitestrig manually via Rancher UI.
+ ![apitestrig-2.png](../../docs/_images/apitestrig-2.png)
+* There are two modes of apitestrig `smoke` & `smokeAndRegression`.
+* By default, apitestrig will execute with `smokeAndRegression`.
+ If you want to run apitestrig with only `smoke`.
+ You have to update the `apitestrig` configmap and rerun the specific apitestrig job.
+
+#### CLI
+* Download Kubernetes cluster `kubeconfig` file from `rancher dashboard` to your local.
+ ![apitestrig-1.png](../../docs/_images/apitestrig-1.png)
+* Install `kubectl` package to your local machine.
+* Run apitestrig manually via CLI by creating a new job from an existing k8s cronjob.
+ ```
+ kubectl --kubeconfig= -n apitestrig create job --from=cronjob/
+ ```
+ example:
+ ```
+ kubectl --kubeconfig=/home/xxx/Downloads/qa4.config -n apitestrig create job --from=cronjob/cronjob-apitestrig-masterdata cronjob-apitestrig-masterdata
+ ```
diff --git a/deploy/apitestrig/copy_cm.sh b/deploy/apitestrig/copy_cm.sh
new file mode 100644
index 0000000000..450e385ca0
--- /dev/null
+++ b/deploy/apitestrig/copy_cm.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copy configmaps from other namespaces
+# DST_NS: Destination namespace
+
+function copying_cm() {
+ UTIL_URL=https://raw.githubusercontent.com/mosip/mosip-infra/master/deployment/v3/utils/copy_cm_func.sh
+ COPY_UTIL=./copy_cm_func.sh
+ DST_NS=apitestrig
+
+ wget -q $UTIL_URL -O copy_cm_func.sh && chmod +x copy_cm_func.sh
+
+ $COPY_UTIL configmap global default $DST_NS
+ $COPY_UTIL configmap keycloak-host keycloak $DST_NS
+ $COPY_UTIL configmap artifactory-share artifactory $DST_NS
+ $COPY_UTIL configmap config-server-share config-server $DST_NS
+ return 0
+}
+
+# set commands for error handling.
+set -e
+set -o errexit ## set -e : exit the script if any statement returns a non-true return value
+set -o nounset ## set -u : exit the script if you try to use an uninitialised variable
+set -o errtrace # trace ERR through 'time command' and other functions
+set -o pipefail # trace ERR through pipes
+copying_cm # calling function
\ No newline at end of file
diff --git a/deploy/apitestrig/copy_secrets.sh b/deploy/apitestrig/copy_secrets.sh
new file mode 100644
index 0000000000..f0948b5f6d
--- /dev/null
+++ b/deploy/apitestrig/copy_secrets.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copy secrets from other namespaces
+# DST_NS: Destination namespace
+
+function copying_secrets() {
+ UTIL_URL=https://raw.githubusercontent.com/mosip/mosip-infra/master/deployment/v3/utils/copy_cm_func.sh
+ COPY_UTIL=./copy_cm_func.sh
+ DST_NS=apitestrig
+
+ wget -q $UTIL_URL -O copy_cm_func.sh && chmod +x copy_cm_func.sh
+
+ $COPY_UTIL secret keycloak-client-secrets keycloak $DST_NS
+ $COPY_UTIL secret s3 s3 $DST_NS
+ $COPY_UTIL secret postgres-postgresql postgres $DST_NS
+ return 0
+}
+
+# set commands for error handling.
+set -e
+set -o errexit ## set -e : exit the script if any statement returns a non-true return value
+set -o nounset ## set -u : exit the script if you try to use an uninitialised variable
+set -o errtrace # trace ERR through 'time command' and other functions
+set -o pipefail # trace ERR through pipes
+copying_secrets # calling function
\ No newline at end of file
diff --git a/deploy/apitestrig/delete.sh b/deploy/apitestrig/delete.sh
new file mode 100644
index 0000000000..6a28aa852f
--- /dev/null
+++ b/deploy/apitestrig/delete.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Uninstalls apitestrig
+## Usage: ./delete.sh [kubeconfig]
+
+if [ $# -ge 1 ] ; then
+ export KUBECONFIG=$1
+fi
+
+function deleting_apitestrig() {
+ NS=apitestrig
+ while true; do
+ read -p "Are you sure you want to delete apitestrig helm charts?(Y/n) " yn
+ if [ $yn = "Y" ]
+ then
+ helm -n $NS delete apitestrig
+ break
+ else
+ break
+ fi
+ done
+ return 0
+}
+
+# set commands for error handling.
+set -e
+set -o errexit ## set -e : exit the script if any statement returns a non-true return value
+set -o nounset ## set -u : exit the script if you try to use an uninitialised variable
+set -o errtrace # trace ERR through 'time command' and other functions
+set -o pipefail # trace ERR through pipes
+deleting_apitestrig # calling function
\ No newline at end of file
diff --git a/deploy/apitestrig/install.sh b/deploy/apitestrig/install.sh
new file mode 100644
index 0000000000..cd6d4783af
--- /dev/null
+++ b/deploy/apitestrig/install.sh
@@ -0,0 +1,124 @@
+#!/bin/bash
+# Installs apitestrig
+## Usage: ./install.sh [kubeconfig]
+
+if [ $# -ge 1 ] ; then
+ export KUBECONFIG=$1
+fi
+
+NS=apitestrig
+CHART_VERSION=0.0.1-develop
+
+echo Create $NS namespace
+kubectl create ns $NS
+
+function installing_apitestrig() {
+ echo Istio label
+ kubectl label ns $NS istio-injection=disabled --overwrite
+ helm repo update
+
+ echo Copy configmaps
+ ./copy_cm.sh
+
+ echo Copy secrets
+ ./copy_secrets.sh
+
+ echo "Delete s3, db, & apitestrig configmap if exists"
+ kubectl -n $NS delete --ignore-not-found=true configmap s3
+ kubectl -n $NS delete --ignore-not-found=true configmap db
+ kubectl -n $NS delete --ignore-not-found=true configmap apitestrig
+
+ DB_HOST=$( kubectl -n default get cm global -o json |jq -r '.data."mosip-api-internal-host"' )
+ API_INTERNAL_HOST=$( kubectl -n default get cm global -o json |jq -r '.data."mosip-api-internal-host"' )
+ ENV_USER=$( kubectl -n default get cm global -o json |jq -r '.data."mosip-api-internal-host"' | awk -F '.' '/api-internal/{print $1"."$2}')
+
+ read -p "Please enter the time(hr) to run the cronjob every day (time: 0-23) : " time
+ if [ -z "$time" ]; then
+ echo "ERROT: Time cannot be empty; EXITING;";
+ exit 1;
+ fi
+ if ! [ $time -eq $time ] 2>/dev/null; then
+ echo "ERROR: Time $time is not a number; EXITING;";
+ exit 1;
+ fi
+ if [ $time -gt 23 ] || [ $time -lt 0 ] ; then
+ echo "ERROR: Time should be in range ( 0-23 ); EXITING;";
+ exit 1;
+ fi
+
+ echo "Do you have public domain & valid SSL? (Y/n) "
+ echo "Y: if you have public domain & valid ssl certificate"
+ echo "n: If you don't have a public domain and a valid SSL certificate. Note: It is recommended to use this option only in development environments."
+ read -p "" flag
+
+ if [ -z "$flag" ]; then
+ echo "'flag' was provided; EXITING;"
+ exit 1;
+ fi
+ ENABLE_INSECURE=''
+ if [ "$flag" = "n" ]; then
+ ENABLE_INSECURE='--set enable_insecure=true';
+ fi
+
+ read -p "Please provide the retention days to remove old reports ( Default: 3 )" reportExpirationInDays
+
+ if [[ -z $reportExpirationInDays ]]; then
+ reportExpirationInDays=3
+ fi
+ if ! [[ $reportExpirationInDays =~ ^[0-9]+$ ]]; then
+ echo "The variable \"reportExpirationInDays\" should contain only number; EXITING";
+ exit 1;
+ fi
+
+ read -p "Please provide slack webhook URL to notify server end issues on your slack channel : " slackWebhookUrl
+
+ if [ -z $slackWebhookUrl ]; then
+ echo "slack webhook URL not provided; EXITING;"
+ exit 1;
+ fi
+
+ valid_inputs=("yes" "no")
+ eSignetDeployed=""
+
+ while [[ ! " ${valid_inputs[@]} " =~ " ${eSignetDeployed} " ]]; do
+ read -p "Is the eSignet service deployed? (yes/no): " eSignetDeployed
+ eSignetDeployed=${eSignetDeployed,,} # Convert input to lowercase
+ done
+
+ if [[ $eSignetDeployed == "yes" ]]; then
+ echo "eSignet service is deployed. Proceeding with installation..."
+ else
+ echo "eSignet service is not deployed. hence will be skipping esignet related test-cases..."
+ fi
+
+ echo Installing apitestrig
+ helm -n $NS install apitestrig mosip/apitestrig \
+ --set crontime="0 $time * * *" \
+ -f values.yaml \
+ --version $CHART_VERSION \
+ --set apitestrig.configmaps.s3.s3-host='http://minio.minio:9000' \
+ --set apitestrig.configmaps.s3.s3-user-key='admin' \
+ --set apitestrig.configmaps.s3.s3-region='' \
+ --set apitestrig.configmaps.db.db-server="$DB_HOST" \
+ --set apitestrig.configmaps.db.db-su-user="postgres" \
+ --set apitestrig.configmaps.db.db-port="5432" \
+ --set apitestrig.configmaps.apitestrig.ENV_USER="$ENV_USER" \
+ --set apitestrig.configmaps.apitestrig.ENV_ENDPOINT="https://$API_INTERNAL_HOST" \
+ --set apitestrig.configmaps.apitestrig.ENV_TESTLEVEL="smokeAndRegression" \
+ --set apitestrig.configmaps.apitestrig.reportExpirationInDays="$reportExpirationInDays" \
+ --set apitestrig.configmaps.apitestrig.slack-webhook-url="$slackWebhookUrl" \
+ --set apitestrig.configmaps.apitestrig.eSignetDeployed="$eSignetDeployed" \
+ --set apitestrig.configmaps.apitestrig.NS="$NS" \
+ $ENABLE_INSECURE
+
+ echo Installed apitestrig.
+ return 0
+}
+
+# set commands for error handling.
+set -e
+set -o errexit ## set -e : exit the script if any statement returns a non-true return value
+set -o nounset ## set -u : exit the script if you try to use an uninitialised variable
+set -o errtrace # trace ERR through 'time command' and other functions
+set -o pipefail # trace ERR through pipes
+installing_apitestrig # calling function
\ No newline at end of file
diff --git a/deploy/apitestrig/values.yaml b/deploy/apitestrig/values.yaml
new file mode 100644
index 0000000000..fd2bca2911
--- /dev/null
+++ b/deploy/apitestrig/values.yaml
@@ -0,0 +1,55 @@
+modules:
+ prereg:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-prereg
+ tag: develop
+ pullPolicy: Always
+ masterdata:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-masterdata
+ tag: develop
+ pullPolicy: Always
+ idrepo:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-idrepo
+ tag: develop
+ pullPolicy: Always
+ partner:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-pms
+ tag: develop
+ pullPolicy: Always
+ pms:
+ enabled: false
+ image:
+ repository: mosipdev/apitest-pms
+ tag: develop
+ pullPolicy: Always
+ resident:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-resident
+ tag: develop
+ pullPolicy: Always
+ auth:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-auth
+ tag: develop
+ pullPolicy: Always
+ esignet:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-esignet
+ tag: develop
+ pullPolicy: Always
+ mimoto:
+ enabled: false
+ image:
+ repository: mosipqa/apitest-mimoto
+ tag: develop
+ pullPolicy: Always
diff --git a/deploy/uitestrig/README.md b/deploy/uitestrig/README.md
new file mode 100644
index 0000000000..c1dc01550b
--- /dev/null
+++ b/deploy/uitestrig/README.md
@@ -0,0 +1,42 @@
+# UITESTRIG
+
+## Introduction
+UITESTRIG will test end-to-end functional flows involving multiple UI modules.
+
+## Install
+* Install
+```sh
+./install.sh
+```
+
+## Uninstall
+* To uninstall UITESTRIG, run `delete.sh` script.
+```sh
+./delete.sh
+```
+
+## Run UITESTRIG manually
+
+#### CLI
+* Download Kubernetes cluster `kubeconfig` file from `rancher dashboard` to your local.
+* Install `kubectl` package to your local machine.
+* Run UITESTRIG manually via CLI by creating a new job from an existing k8s cronjob.
+ ```
+ kubectl --kubeconfig= -n UITESTRIG create job --from=cronjob/
+ ```
+ example:
+ ```
+ kubectl --kubeconfig=/home/xxx/Downloads/qa4.config -n UITESTRIG create job --from=cronjob/cronjob-uitestrig cronjob-uitestrig
+ ```
+
+## Run ui-testrig using rancher-ui
+
+#### Rancher UI
+* Run ui-testrig manually via Rancher UI.
+ ![ui-testrig.png](../../docs/_images/ui-testrig.png)
+* There are 3 cronjobs for ui-testrig:
+ - adminui
+ - pmp-ui
+ - residentui
+* select the cronjob which you want to initiate and select the `Run Now` button as shown in the above picture.
+* Once the job is completed the reports will be avaiable in `S3/minio`.
diff --git a/deploy/uitestrig/copy_cm.sh b/deploy/uitestrig/copy_cm.sh
new file mode 100644
index 0000000000..828bb6d6de
--- /dev/null
+++ b/deploy/uitestrig/copy_cm.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copy configmaps from other namespaces
+# DST_NS: Destination namespace
+
+function copying_cm() {
+ UTIL_URL=https://raw.githubusercontent.com/mosip/mosip-infra/master/deployment/v3/utils/copy_cm_func.sh
+ COPY_UTIL=./copy_cm_func.sh
+ DST_NS=uitestrig
+
+ wget -q $UTIL_URL -O copy_cm_func.sh && chmod +x copy_cm_func.sh
+
+ $COPY_UTIL configmap global default $DST_NS
+ $COPY_UTIL configmap keycloak-host keycloak $DST_NS
+ $COPY_UTIL configmap artifactory-share artifactory $DST_NS
+ $COPY_UTIL configmap config-server-share config-server $DST_NS
+ return 0
+}
+
+# set commands for error handling.
+set -e
+set -o errexit ## set -e : exit the script if any statement returns a non-true return value
+set -o nounset ## set -u : exit the script if you try to use an uninitialised variable
+set -o errtrace # trace ERR through 'time command' and other functions
+set -o pipefail # trace ERR through pipes
+copying_cm # calling function
\ No newline at end of file
diff --git a/deploy/uitestrig/copy_secrets.sh b/deploy/uitestrig/copy_secrets.sh
new file mode 100644
index 0000000000..47561e6ade
--- /dev/null
+++ b/deploy/uitestrig/copy_secrets.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copy secrets from other namespaces
+# DST_NS: Destination namespace
+
+function copying_secrets() {
+ UTIL_URL=https://raw.githubusercontent.com/mosip/mosip-infra/master/deployment/v3/utils/copy_cm_func.sh
+ COPY_UTIL=./copy_cm_func.sh
+ DST_NS=uitestrig
+
+ wget -q $UTIL_URL -O copy_cm_func.sh && chmod +x copy_cm_func.sh
+
+ $COPY_UTIL secret keycloak-client-secrets keycloak $DST_NS
+ $COPY_UTIL secret s3 s3 $DST_NS
+ $COPY_UTIL secret postgres-postgresql postgres $DST_NS
+ return 0
+}
+
+# set commands for error handling.
+set -e
+set -o errexit ## set -e : exit the script if any statement returns a non-true return value
+set -o nounset ## set -u : exit the script if you try to use an uninitialised variable
+set -o errtrace # trace ERR through 'time command' and other functions
+set -o pipefail # trace ERR through pipes
+copying_secrets # calling function
\ No newline at end of file
diff --git a/deploy/uitestrig/delete.sh b/deploy/uitestrig/delete.sh
new file mode 100644
index 0000000000..fac7ba3b56
--- /dev/null
+++ b/deploy/uitestrig/delete.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Uninstalls uitestrig
+## Usage: ./delete.sh [kubeconfig]
+
+if [ $# -ge 1 ] ; then
+ export KUBECONFIG=$1
+fi
+
+function deleting_uitestrig() {
+ NS=uitestrig
+ while true; do
+ read -p "Are you sure you want to delete uitestrig helm charts?(Y/n) " yn
+ if [ $yn = "Y" ]
+ then
+ helm -n $NS delete uitestrig
+ break
+ else
+ break
+ fi
+ done
+ return 0
+}
+
+# set commands for error handling.
+set -e
+set -o errexit ## set -e : exit the script if any statement returns a non-true return value
+set -o nounset ## set -u : exit the script if you try to use an uninitialised variable
+set -o errtrace # trace ERR through 'time command' and other functions
+set -o pipefail # trace ERR through pipes
+deleting_uitestrig # calling function
\ No newline at end of file
diff --git a/deploy/uitestrig/install.sh b/deploy/uitestrig/install.sh
new file mode 100644
index 0000000000..98c0577ff1
--- /dev/null
+++ b/deploy/uitestrig/install.sh
@@ -0,0 +1,91 @@
+#!/bin/bash
+# Installs uitestrig automation
+## Usage: ./install.sh [kubeconfig]
+
+if [ $# -ge 1 ] ; then
+ export KUBECONFIG=$1
+fi
+
+NS=uitestrig
+CHART_VERSION=0.0.1-develop
+
+echo Create $NS namespace
+kubectl create ns $NS
+
+
+function installing_uitestrig() {
+ ENV_NAME=$( kubectl -n default get cm global -o json |jq -r '.data."installation-domain"')
+
+ read -p "Please enter the time(hr) to run the cronjob every day (time: 0-23) : " time
+ if [ -z "$time" ]; then
+ echo "ERROR: Time cannot be empty; EXITING;";
+ exit 1;
+ fi
+ if ! [ $time -eq $time ] 2>/dev/null; then
+ echo "ERROR: Time $time is not a number; EXITING;";
+ exit 1;
+ fi
+ if [ $time -gt 23 ] || [ $time -lt 0 ] ; then
+ echo "ERROR: Time should be in range ( 0-23 ); EXITING;";
+ exit 1;
+ fi
+
+ echo "Do you have public domain & valid SSL? (Y/n) "
+ echo "Y: if you have public domain & valid ssl certificate"
+ echo "n: if you don't have public domain & valid ssl certificate"
+ read -p "" flag
+
+ if [ -z "$flag" ]; then
+ echo "'flag' was provided; EXITING;"
+ exit 1;
+ fi
+ ENABLE_INSECURE=''
+ if [ "$flag" = "n" ]; then
+ ENABLE_INSECURE='--set uitestrig.configmaps.uitestrig.ENABLE_INSECURE=true';
+ fi
+
+ echo Istio label
+ kubectl label ns $NS istio-injection=disabled --overwrite
+ helm repo update
+
+ echo Copy configmaps
+ ./copy_cm.sh
+
+ echo Copy secrets
+ ./copy_secrets.sh
+
+ DB_HOST=$( kubectl -n default get cm global -o json |jq -r '.data."mosip-api-internal-host"' )
+ PMP_HOST=$(kubectl -n default get cm global -o json |jq -r '.data."mosip-pmp-host"')
+ ADMIN_HOST=$(kubectl -n default get cm global -o json |jq -r '.data."mosip-admin-host"')
+ RESIDENT_HOST=$(kubectl -n default get cm global -o json |jq -r '.data."mosip-resident-host"')
+ API_INTERNAL_HOST=$( kubectl -n default get cm global -o json |jq -r '.data."mosip-api-internal-host"' )
+
+ echo Installing uitestrig
+ helm -n $NS install uitestrig mosip/uitestrig \
+ --set crontime="0 $time * * *" \
+ -f values.yaml \
+ --version $CHART_VERSION \
+ --set uitestrig.configmaps.s3.s3-host='http://minio.minio:9000' \
+ --set uitestrig.configmaps.s3.s3-user-key='admin' \
+ --set uitestrig.configmaps.s3.s3-region='' \
+ --set uitestrig.configmaps.db.db-server="$DB_HOST" \
+ --set uitestrig.configmaps.db.db-su-user="postgres" \
+ --set uitestrig.configmaps.db.db-port="5432" \
+ --set uitestrig.configmaps.uitestrig.apiInternalEndPoint="https://$API_INTERNAL_HOST" \
+ --set uitestrig.configmaps.uitestrig.apiEnvUser="$API_INTERNAL_HOST" \
+ --set uitestrig.configmaps.uitestrig.PmpPortalPath="https://$PMP_HOST" \
+ --set uitestrig.configmaps.uitestrig.adminPortalPath="https://$ADMIN_HOST" \
+ --set uitestrig.configmaps.uitestrig.residentPortalPath="https://$RESIDENT_HOST" \
+ --set uitestrig.configmaps.uitestrig.NS="$NS" \
+ $ENABLE_INSECURE
+
+ return 0
+}
+
+# set commands for error handling.
+set -e
+set -o errexit ## set -e : exit the script if any statement returns a non-true return value
+set -o nounset ## set -u : exit the script if you try to use an uninitialised variable
+set -o errtrace # trace ERR through 'time command' and other functions
+set -o pipefail # trace ERR through pipes
+installing_uitestrig # calling function
\ No newline at end of file
diff --git a/deploy/uitestrig/values.yaml b/deploy/uitestrig/values.yaml
new file mode 100644
index 0000000000..014727441d
--- /dev/null
+++ b/deploy/uitestrig/values.yaml
@@ -0,0 +1,22 @@
+modules:
+ - name: admin-ui
+ enabled: false
+ image:
+ registry: docker.io
+ repository: mosipqa/uitest-admin
+ tag: develop
+ pullPolicy: Always
+ - name: pmp-ui
+ enabled: false
+ image:
+ registry: docker.io
+ repository: mosipqa/uitest-pmp
+ tag: develop
+ pullPolicy: Always
+ - name: resident-ui
+ enabled: false
+ image:
+ registry: docker.io
+ repository: mosipqa/uitest-resident
+ tag: develop
+ pullPolicy: Always
diff --git a/docs/_images/apitestrig-1.png b/docs/_images/apitestrig-1.png
new file mode 100644
index 0000000000..a7f36f6f05
Binary files /dev/null and b/docs/_images/apitestrig-1.png differ
diff --git a/docs/_images/apitestrig-2.png b/docs/_images/apitestrig-2.png
new file mode 100644
index 0000000000..c6e48e63d3
Binary files /dev/null and b/docs/_images/apitestrig-2.png differ
diff --git a/docs/_images/ui-testrig.png b/docs/_images/ui-testrig.png
new file mode 100644
index 0000000000..d2010016f7
Binary files /dev/null and b/docs/_images/ui-testrig.png differ
diff --git a/helm/apitestrig/.gitignore b/helm/apitestrig/.gitignore
new file mode 100644
index 0000000000..ee3892e879
--- /dev/null
+++ b/helm/apitestrig/.gitignore
@@ -0,0 +1 @@
+charts/
diff --git a/helm/apitestrig/.helmignore b/helm/apitestrig/.helmignore
new file mode 100644
index 0000000000..f0c1319444
--- /dev/null
+++ b/helm/apitestrig/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/apitestrig/Chart.yaml b/helm/apitestrig/Chart.yaml
new file mode 100644
index 0000000000..ffa48e31d3
--- /dev/null
+++ b/helm/apitestrig/Chart.yaml
@@ -0,0 +1,19 @@
+apiVersion: v2
+name: apitestrig
+description: A Helm chart to deploy APITESTRIG for MOSIP modules
+type: application
+version: 0.0.1-develop
+appVersion: ""
+dependencies:
+ - name: common
+ repository: https://charts.bitnami.com/bitnami
+ tags:
+ - bitnami-common
+ version: 1.x.x
+home: https://mosip.io
+keywords:
+ - mosip
+ - apitestrig
+maintainers:
+ - email: info@mosip.io
+ name: MOSIP
diff --git a/helm/apitestrig/README.md b/helm/apitestrig/README.md
new file mode 100644
index 0000000000..25c35e3591
--- /dev/null
+++ b/helm/apitestrig/README.md
@@ -0,0 +1,10 @@
+# APITESTRIG
+
+Helm chart to deploy APITESTRIG for `MOSIP` modules
+
+## TL;DR
+
+```console
+$ helm repo add mosip https://mosip.github.io
+$ helm install my-release mosip/apitestrig
+```
diff --git a/helm/apitestrig/templates/NOTES.txt b/helm/apitestrig/templates/NOTES.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/helm/apitestrig/templates/NOTES.txt
@@ -0,0 +1 @@
+
diff --git a/helm/apitestrig/templates/_helpers.tpl b/helm/apitestrig/templates/_helpers.tpl
new file mode 100644
index 0000000000..d99caf0c43
--- /dev/null
+++ b/helm/apitestrig/templates/_helpers.tpl
@@ -0,0 +1,63 @@
+{{/*
+Return the proper image name
+*/}}
+{{- define "apitestrig.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "apitestrig.volumePermissions.image" -}}
+{{- include "common.images.image" ( dict "imageRoot" .Values.volumePermissions.image "global" .Values.global ) -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "apitestrig.imagePullSecrets" -}}
+{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "apitestrig.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (printf "%s" (include "common.names.fullname" .)) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Compile all warnings into a single message.
+*/}}
+{{- define "apitestrig.validateValues" -}}
+{{- $messages := list -}}
+{{- $messages := append $messages (include "apitestrig.validateValues.foo" .) -}}
+{{- $messages := append $messages (include "apitestrig.validateValues.bar" .) -}}
+{{- $messages := without $messages "" -}}
+{{- $message := join "\n" $messages -}}
+
+{{- if $message -}}
+{{- printf "\nVALUES VALIDATION:\n%s" $message -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return podAnnotations
+*/}}
+{{- define "apitestrig.podAnnotations" -}}
+{{- if .Values.podAnnotations }}
+{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }}
+{{- end }}
+{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }}
+{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }}
+{{- end }}
+{{- end -}}
+
+{{/* Create the name for restart cronjob */}}
+{{- define "apitestrig.cronjob" -}}
+{{ default (printf "cronjob-%s" (include "common.names.fullname" .)) .Values.serviceAccount.name }}
+{{- end -}}
\ No newline at end of file
diff --git a/helm/apitestrig/templates/clusterrole.yaml b/helm/apitestrig/templates/clusterrole.yaml
new file mode 100644
index 0000000000..da268fdf58
--- /dev/null
+++ b/helm/apitestrig/templates/clusterrole.yaml
@@ -0,0 +1,10 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "apitestrig.serviceAccountName" . }}-{{ .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+rules:
+ - apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","patch","list","watch"]
diff --git a/helm/apitestrig/templates/clusterrolebinding.yaml b/helm/apitestrig/templates/clusterrolebinding.yaml
new file mode 100644
index 0000000000..12594c8d18
--- /dev/null
+++ b/helm/apitestrig/templates/clusterrolebinding.yaml
@@ -0,0 +1,19 @@
+kind: ClusterRoleBinding
+apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
+metadata:
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ name: {{ template "common.names.fullname" . }}-{{ .Release.Namespace }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "apitestrig.serviceAccountName" . }}-{{ .Release.Namespace }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "apitestrig.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
diff --git a/helm/apitestrig/templates/configmaps.yaml b/helm/apitestrig/templates/configmaps.yaml
new file mode 100644
index 0000000000..4925083776
--- /dev/null
+++ b/helm/apitestrig/templates/configmaps.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.apitestrig.configmaps }}
+{{- range $cm_name, $cm_value := .Values.apitestrig.configmaps }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ $cm_name }}
+ namespace: {{ $.Release.Namespace }}
+ labels: {{- include "common.labels.standard" $ | nindent 8 }}
+ {{- if $.Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if $.Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+data:
+ {{- range $key, $value := $cm_value }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+{{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm/apitestrig/templates/cronjob.yaml b/helm/apitestrig/templates/cronjob.yaml
new file mode 100644
index 0000000000..cb3ce9a2cc
--- /dev/null
+++ b/helm/apitestrig/templates/cronjob.yaml
@@ -0,0 +1,108 @@
+{{- range $modulename, $module := $.Values.modules }}
+{{- if $module.enabled }}
+---
+apiVersion: {{ include "common.capabilities.cronjob.apiVersion" $ }}
+kind: CronJob
+metadata:
+ name: {{ template "apitestrig.cronjob" $ }}-{{ $modulename }}
+ namespace: {{ $.Release.Namespace }}
+ annotations:
+ {{- if $.Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ labels: {{- include "common.labels.standard" $ | nindent 4 }}
+ {{- if $.Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+
+spec:
+ concurrencyPolicy: Forbid
+ successfulJobsHistoryLimit: 1 # remove jobs which are successfully executed
+ failedJobsHistoryLimit: 1 # except 1 recent failed job, remove jobs which are not successfully executed
+ #schedule: '*/3 * * * *' # cron spec of time, here, 8 o'clock
+ schedule: {{ $.Values.crontime }}
+ jobTemplate:
+ spec:
+ backoffLimit: 0 # this has very low chance of failing, as all this does
+ # is prompt kubernetes to schedule new replica set for
+ # the deployment
+ # activeDeadlineSeconds: 600 # timeout, makes most sense with
+ # "waiting for rollout" variant specified below
+ template:
+ spec:
+ # account configured above
+ restartPolicy: Never
+ serviceAccountName: {{ template "apitestrig.serviceAccountName" $ }}
+ initContainers:
+ {{- if $.Values.enable_insecure }}
+ {{- include "common.tplvalues.render" (dict "value" $.Values.initContainers "context" $) | nindent 12 }}
+ {{- end }}
+ containers:
+ - name: {{ template "apitestrig.serviceAccountName" $ }}-{{ $modulename }}
+ image: {{ $module.image.repository }}:{{ $module.image.tag }}
+ imagePullPolicy: {{ $module.image.pullPolicy }}
+ {{- if $.Values.lifecycleHooks }}
+ lifecycle: {{- include "common.tpvalues.render" (dict "value" $.Values.lifecycleHooks "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if $.Values.containerSecurityContext.enabled }}
+ securityContext: {{- omit $.Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if $.Values.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" $.Values.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if $.Values.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" $.Values.args "context" $) | nindent 12 }}
+ {{- end }}
+ env:
+ - name: container_user
+ value: {{ $.Values.containerSecurityContext.runAsUser }}
+ - name: JDK_JAVA_OPTIONS
+ value: {{ $.Values.additionalResources.javaOpts }}
+ - name: MODULES
+ value: {{ $modulename }}
+ {{- if $.Values.extraEnvVars }}
+ {{- include "common.tpvalues.render" (dict "value" $.Values.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+ envFrom:
+ {{- if $.Values.extraEnvVarsCM }}
+ {{- range $.Values.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- if $.Values.extraEnvVarsSecret }}
+ {{- range $.Values.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ . }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: spring-service
+ containerPort: {{ $.Values.springServicePort }}
+ volumeMounts:
+ {{- if $.Values.enable_insecure }}
+ - mountPath: /usr/local/openjdk-11/lib/security/cacerts
+ name: cacerts
+ subPath: cacerts
+ {{- end }}
+ {{- if $.Values.apitestrig.volumes }}
+ {{- range $volume_name, $volume_value := $.Values.apitestrig.volumes.configmaps }}
+ - name: {{ $volume_name }}
+ mountPath: {{ $volume_value.volumeMounts.mountPath }}
+ {{- end }}
+ {{- end }}
+ volumes:
+ {{- if $.Values.enable_insecure }}
+ - name: cacerts
+ emptyDir: {}
+ {{- end }}
+ {{- if $.Values.apitestrig.volumes }}
+ {{- range $volume_name, $volume_value := $.Values.apitestrig.volumes.configmaps }}
+ - name: {{ $volume_name }}
+ configMap:
+ defaultMode: {{ $volume_value.defaultMode }}
+ name: {{ $volume_name }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/helm/apitestrig/templates/extra-list.yaml b/helm/apitestrig/templates/extra-list.yaml
new file mode 100644
index 0000000000..9ac65f9e16
--- /dev/null
+++ b/helm/apitestrig/templates/extra-list.yaml
@@ -0,0 +1,4 @@
+{{- range .Values.extraDeploy }}
+---
+{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
+{{- end }}
diff --git a/helm/apitestrig/templates/secrets.yaml b/helm/apitestrig/templates/secrets.yaml
new file mode 100644
index 0000000000..1ef8dc9893
--- /dev/null
+++ b/helm/apitestrig/templates/secrets.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.apitestrig.secrets }}
+{{- range $secret_name, $secret_value := .Values.apitestrig.secrets }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ $secret_name }}
+ namespace: {{ $.Release.Namespace }}
+ labels: {{- include "common.labels.standard" $ | nindent 8 }}
+ {{- if $.Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if $.Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+type: Opaque
+data:
+ {{- range $key, $value := $secret_value }}
+ {{ $key }}: {{ $value | b64enc | quote }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/helm/apitestrig/templates/service-account.yaml b/helm/apitestrig/templates/service-account.yaml
new file mode 100644
index 0000000000..466590df49
--- /dev/null
+++ b/helm/apitestrig/templates/service-account.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ name: {{ template "apitestrig.serviceAccountName" . }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ namespace: {{ .Release.Namespace }}
diff --git a/helm/apitestrig/values.yaml b/helm/apitestrig/values.yaml
new file mode 100644
index 0000000000..8b5beb6f8f
--- /dev/null
+++ b/helm/apitestrig/values.yaml
@@ -0,0 +1,545 @@
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry and imagePullSecrets
+##
+# global:
+# imageRegistry: myRegistryName
+# imagePullSecrets:
+# - myRegistryKeySecretName
+# storageClass: myStorageClass
+
+## Add labels to all the deployed resources
+##
+commonLabels:
+ app.kubernetes.io/component: mosip
+
+## Add annotations to all the deployed resources
+##
+commonAnnotations: {}
+
+## Kubernetes Cluster Domain
+##
+clusterDomain: cluster.local
+
+## Extra objects to deploy (value evaluated as a template)
+##
+extraDeploy: []
+
+## Number of nodes
+##
+replicaCount: 1
+
+service:
+ type: ClusterIP
+ port: 80
+ ## loadBalancerIP for the SuiteCRM Service (optional, cloud specific)
+ ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
+ ##
+ ## loadBalancerIP:
+ ##
+ ## nodePorts:
+ ## http:
+ ## https:
+ ##
+ nodePorts:
+ http: ""
+ https: ""
+ ## Enable client source IP preservation
+ ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+
+## Port on which this particular spring service module is running.
+springServicePort: 8083
+
+## Configure extra options for liveness and readiness probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
+##
+
+##
+# existingConfigmap:
+
+## Command and args for running the container (set to default if not set). Use array form
+##
+command: ['/bin/bash']
+args: ['-c', "/home/${container_user}/scripts/fetch_docker_image_hash_ids.sh"]
+
+## Deployment pod host aliases
+## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+##
+hostAliases: []
+
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+##
+resources:
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ limits:
+ cpu: 1500m
+ memory: 3500Mi
+ requests:
+ cpu: 1000m
+ memory: 3500Mi
+
+additionalResources:
+ ## Specify any JAVA_OPTS string here. These typically will be specified in conjunction with above resources
+ ## Example: java_opts: "-Xms500M -Xmx500M"
+ javaOpts: "-Xms2600M -Xmx2600M"
+
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+## Clamav container already runs as 'mosip' user, so we may not need to enable this
+containerSecurityContext:
+ enabled: false
+ runAsUser: mosip
+ runAsNonRoot: true
+
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+##
+podSecurityContext:
+ enabled: false
+ fsGroup: 1001
+
+## Pod affinity preset
+## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+## Allowed values: soft, hard
+##
+podAffinityPreset: ""
+
+## Pod anti-affinity preset
+## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+## Allowed values: soft, hard
+##
+podAntiAffinityPreset: soft
+
+## Node affinity preset
+## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+## Allowed values: soft, hard
+##
+nodeAffinityPreset:
+ ## Node affinity type
+ ## Allowed values: soft, hard
+ ##
+ type: ""
+ ## Node label key to match
+ ## E.g.
+ ## key: "kubernetes.io/e2e-az-name"
+ ##
+ key: ""
+ ## Node label values to match
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+
+## Affinity for pod assignment. Evaluated as a template.
+## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+##
+affinity: {}
+
+## Node labels for pod assignment. Evaluated as a template.
+## ref: https://kubernetes.io/docs/user-guide/node-selection/
+##
+nodeSelector: {}
+
+## Tolerations for pod assignment. Evaluated as a template.
+## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+##
+tolerations: []
+
+## Pod extra labels
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+##
+podLabels: {}
+
+## Annotations for server pods.
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+##
+podAnnotations: {}
+
+## pods' priority.
+## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+##
+# priorityClassName: ""
+
+## lifecycleHooks for the container to automate configuration before or after startup.
+##
+lifecycleHooks: {}
+
+## Custom Liveness probes for
+##
+customLivenessProbe: {}
+
+## Custom Rediness probes
+##
+customReadinessProbe: {}
+
+## Update strategy - only really applicable for deployments with RWO PVs attached
+## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
+## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
+## terminate the single previous pod, so that the new, incoming pod can attach to the PV
+##
+updateStrategy:
+ type: RollingUpdate
+
+## Additional environment variables to set
+## Example:
+## extraEnvVars:
+## - name: FOO
+## value: "bar"
+##
+extraEnvVars: []
+
+## ConfigMap with extra environment variables
+##
+extraEnvVarsCM:
+ - global
+ - s3
+ - keycloak-host
+ - db
+ - apitestrig
+ - config-server-share
+ - artifactory-share
+## Secret with extra environment variables
+##
+extraEnvVarsSecret:
+ - apitestrig
+ - s3
+ - keycloak-client-secrets
+ - postgres-postgresql
+
+## Extra volumes to add to the deployment
+##
+extraVolumes: []
+
+## Extra volume mounts to add to the container
+##
+extraVolumeMounts: []
+
+## Add init containers to the pods.
+## Example:
+## initContainers:
+## - name: your-image-name
+## image: your-image
+## imagePullPolicy: Always
+## ports:
+## - name: portname
+## containerPort: 1234
+##
+initContainers:
+ - command:
+ - /bin/bash
+ - -c
+ - if [ "$ENABLE_INSECURE" = "true" ]; then HOST=$( env | grep "mosip-api-internal-host"
+ |sed "s/mosip-api-internal-host=//g"); if [ -z "$HOST" ]; then echo "HOST
+ $HOST is empty; EXITING"; exit 1; fi; openssl s_client -servername "$HOST"
+ -connect "$HOST":443 > "$HOST.cer" 2>/dev/null & sleep 2 ; sed -i -ne '/-BEGIN
+ CERTIFICATE-/,/-END CERTIFICATE-/p' "$HOST.cer"; cat "$HOST.cer"; /usr/local/openjdk-11/bin/keytool
+ -delete -alias "$HOST" -keystore $JAVA_HOME/lib/security/cacerts -storepass
+ changeit; /usr/local/openjdk-11/bin/keytool -trustcacerts -keystore "$JAVA_HOME/lib/security/cacerts"
+ -storepass changeit -noprompt -importcert -alias "$HOST" -file "$HOST.cer"
+ ; if [ $? -gt 0 ]; then echo "Failed to add SSL certificate for host $host;
+ EXITING"; exit 1; fi; cp /usr/local/openjdk-11/lib/security/cacerts /cacerts;
+ fi
+ env:
+ - name: ENABLE_INSECURE
+ value: "true"
+ envFrom:
+ - configMapRef:
+ name: global
+ image: docker.io/openjdk:11-jre
+ imagePullPolicy: Always
+ name: cacerts
+ resources: {}
+ securityContext:
+ runAsUser: 0
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /cacerts
+ name: cacerts
+
+## Add sidecars to the pods.
+## Example:
+## sidecars:
+## - name: your-image-name
+## image: your-image
+## imagePullPolicy: Always
+## ports:
+## - name: portname
+## containerPort: 1234
+##
+sidecars: {}
+
+persistence:
+ enabled: false
+ ## If defined, storageClassName:
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack).
+ ##
+ # storageClass: "-"
+ ##
+ ## If you want to reuse an existing claim, you can pass the name of the PVC using
+ ## the existingClaim variable
+ # existingClaim: your-claim
+ ## ReadWriteMany not supported by AWS gp2
+ storageClass:
+ accessModes:
+ - ReadWriteOnce
+ size: 10M
+ existingClaim:
+ # Dir where config and keys are written inside container
+ mountDir:
+
+## Init containers parameters:
+## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
+##
+volumePermissions:
+ enabled: false
+ image:
+ registry: docker.io
+ repository: bitnami/bitnami-shell
+ tag: "10"
+ pullPolicy: Always
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ pullSecrets: []
+ ## - myRegistryKeySecretName
+ ## Init containers' resource requests and limits
+ ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+ ##
+ resources:
+ ## We usually recommend not to specify default resources and to leave this as a conscious
+ ## choice for the user. This also increases chances charts run on environments with little
+ ## resources, such as Minikube. If you do want to specify resources, uncomment the following
+ ## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ ##
+ limits: {}
+ ## cpu: 100m
+ ## memory: 128Mi
+ ##
+ requests: {}
+ ## cpu: 100m
+ ## memory: 128Mi
+ ##
+
+## Specifies whether RBAC resources should be created
+##
+rbac:
+ create: true
+
+## Specifies whether a ServiceAccount should be created
+##
+serviceAccount:
+ create: true
+ ## The name of the ServiceAccount to use.
+ ## If not set and create is true, a name is generated using the fullname template
+ ##
+ name:
+
+## Prometheus Metrics
+##
+metrics:
+ enabled: false
+ ## Prometheus pod annotations
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ ##
+ podAnnotations:
+ prometheus.io/scrape: "true"
+
+ endpointPath:
+
+ ## Prometheus Service Monitor
+ ## ref: https://github.com/coreos/prometheus-operator
+ ##
+ serviceMonitor:
+ ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry
+ ##
+ enabled: true
+ ## Specify the namespace in which the serviceMonitor resource will be created
+ ##
+ # namespace: ""
+ ## Specify the interval at which metrics should be scraped
+ ##
+ interval: 10s
+ ## Specify the timeout after which the scrape is ended
+ ##
+ # scrapeTimeout: 30s
+ ## Specify Metric Relabellings to add to the scrape endpoint
+ ##
+ # relabellings:
+ ## Specify honorLabels parameter to add the scrape endpoint
+ ##
+ honorLabels: false
+ ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
+ ##
+ additionalLabels: {}
+
+ ## Custom PrometheusRule to be defined
+ ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
+ ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
+ ##
+ prometheusRule:
+ enabled: false
+ additionalLabels: {}
+ namespace: ''
+ ## List of rules, used as template by Helm.
+ ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html
+ # rules:
+ # - alert: RabbitmqDown
+ # expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0
+ # for: 5m
+ # labels:
+ # severity: error
+ rules: []
+
+## Admin swagger should have only internal access. Hence linked to internal gateway
+istio:
+ enabled: false
+ gateways:
+ - istio-system/internal
+ prefix:
+ corsPolicy:
+ allowOrigins:
+ - prefix: https://api-internal.sandbox.xyz.net
+ allowCredentials: true
+ allowHeaders:
+ - Accept
+ - Accept-Encoding
+ - Accept-Language
+ - Connection
+ - Content-Type
+ - Cookie
+ - Host
+ - Referer
+ - Sec-Fetch-Dest
+ - Sec-Fetch-Mode
+ - Sec-Fetch-Site
+ - Sec-Fetch-User
+ - Origin
+ - Upgrade-Insecure-Requests
+ - User-Agent
+ - sec-ch-ua
+ - sec-ch-ua-mobile
+ - sec-ch-ua-platform
+ - x-xsrf-token
+ - xsrf-token
+ allowMethods:
+ - GET
+ - POST
+ - PATCH
+ - PUT
+ - DELETE
+
+modules:
+ prereg:
+ enabled: true
+ image:
+ repository: mosipqa/apitest-prereg
+ tag: develop
+ pullPolicy: Always
+ masterdata:
+ enabled: true
+ image:
+ repository: mosipqa/apitest-masterdata
+ tag: develop
+ pullPolicy: Always
+ idrepo:
+ enabled: true
+ image:
+ repository: mosipqa/apitest-idrepo
+ tag: develop
+ pullPolicy: Always
+ partner:
+ enabled: true
+ image:
+ repository: mosipqa/apitest-pms
+ tag: develop
+ pullPolicy: Always
+ pms:
+ enabled: true
+ image:
+ repository: mosipdev/apitest-pms
+ tag: develop
+ pullPolicy: Always
+ resident:
+ enabled: true
+ image:
+ repository: mosipqa/apitest-resident
+ tag: develop
+ pullPolicy: Always
+ auth:
+ enabled: true
+ image:
+ repository: mosipqa/apitest-auth
+ tag: develop
+ pullPolicy: Always
+ esignet:
+ enabled: true
+ image:
+ repository: mosipqa/apitest-esignet
+ tag: develop
+ pullPolicy: Always
+ mimoto:
+ enabled: true
+ image:
+ repository: mosipqa/apitest-mimoto
+ tag: develop
+ pullPolicy: Always
+
+crontime: "0 3 * * *" ## run cronjob every day at 3 AM (time hr: 0-23 )
+
+apitestrig:
+ configmaps:
+ s3:
+ s3-host: 'http://minio.minio:9000'
+ s3-user-key: 'admin'
+ s3-region: ''
+ db:
+ db-port: '5432'
+ db-su-user: 'postgres'
+ db-server: 'api-internal.sandbox.xyz.net'
+ apitestrig:
+ ENV_USER: 'api-internal.sandbox'
+ ENV_ENDPOINT: 'https://api-internal.sandbox.xyz.net'
+ ENV_TESTLEVEL: 'smokeAndRegression'
+ authDemoServiceBaseURL: http://authdemo.authdemo
+ authDemoServicePort: 80
+ eSignetDeployed: yes or no
+ push-reports-to-s3: 'yes'
+ authCertsPath: '/home/mosip/authcerts'
+ scripts:
+ fetch_docker_image_hash_ids.sh: |
+ #!/bin/bash
+ sleep 5
+ export DOCKER_HASH_ID=$( kubectl get pod "$HOSTNAME" -n "$NS" -o jsonpath='{.status.containerStatuses[*].imageID}' | sed 's/ /\n/g' | grep -v 'istio' | sed 's/docker\-pullable\:\/\///g' )
+ export DOCKER_IMAGE=$( kubectl get pod "$HOSTNAME" -n "$NS" -o jsonpath='{.status.containerStatuses[*].image}' | sed 's/ /\n/g' | grep -v 'istio' | sed 's/docker\-pullable\:\/\///g' )
+ if [[ -z $DOCKER_HASH_ID ]]; then
+ echo "DOCKER_HASH_ID IS EMPTY;EXITING";
+ exit 1;
+ fi
+ echo "DOCKER_HASH_ID ; $DOCKER_HASH_ID"
+ echo "DOCKER_IMAGE : $DOCKER_IMAGE"
+ kubectl get pods -A -o=jsonpath='{range .items[*]}{.metadata.namespace}{","}{.metadata.labels.app\.kubernetes\.io\/name}{","}{.status.containerStatuses[?(@.name!="istio-proxy")].image}{","}{.status.containerStatuses[?(@.name!="istio-proxy")].imageID}{","}{.metadata.creationTimestamp}{"\n"}' | sed 's/ /\n/g' | grep -vE 'istio*|longhorn*|cattle*|rancher|kube' | sed 's/docker\-pullable\:\/\///g' | sort -u | sed '/,,,/d' | awk -F ',' 'BEGIN {print "{ \"POD_NAME\": \"'$(echo $HOSTNAME)'\", \"DOCKER_IMAGE\": \"'$(echo $DOCKER_IMAGE)'\", \"DOCKER_HASH_ID\": \"'$(echo $DOCKER_HASH_ID)'\", \"k8s-cluster-image-list\": ["} {print "{"} {print "\"namespace\": \"" $1 "\","} {print "\"app_name\": \"" $2 "\","} {print "\"docker_image_name\": \"" $3 "\","} {print "\"docker_image_id\": \"" $4 "\","} {print "\"creation_timestamp\": \"" $5 "\"" } {print "},"} END {print "]}"}' | sed -z 's/},\n]/}\n]/g' | jq -r . | tee -a images-list.json
+ ## run entrypoint script
+ sleep 5
+ cd /home/${container_user}/
+ bash ./entrypoint.sh
+ secrets:
+ apitestrig:
+ volumes:
+ configmaps:
+ scripts:
+ defaultMode: 0777
+ volumeMounts:
+ mountPath: '/home/mosip/scripts/'
+
+enable_insecure: false
diff --git a/helm/uitestrig/.helmignore b/helm/uitestrig/.helmignore
new file mode 100644
index 0000000000..f0c1319444
--- /dev/null
+++ b/helm/uitestrig/.helmignore
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
diff --git a/helm/uitestrig/Chart.yaml b/helm/uitestrig/Chart.yaml
new file mode 100644
index 0000000000..8763cc302a
--- /dev/null
+++ b/helm/uitestrig/Chart.yaml
@@ -0,0 +1,20 @@
+apiVersion: v2
+name: uitestrig
+description: A Helm chart to deploy uitestrig to test working of MOSIP modules
+type: application
+version: 0.0.1-develop
+appVersion: ""
+dependencies:
+ - name: common
+ repository: https://charts.bitnami.com/bitnami
+ tags:
+ - bitnami-common
+ version: 1.x.x
+home: https://mosip.io
+keywords:
+ - mosip
+ - uitestrig
+ - testrig
+maintainers:
+ - email: info@mosip.io
+ name: MOSIP
diff --git a/helm/uitestrig/README.md b/helm/uitestrig/README.md
new file mode 100644
index 0000000000..c313265108
--- /dev/null
+++ b/helm/uitestrig/README.md
@@ -0,0 +1,10 @@
+# UITESTRIG
+
+Helm chart to deploy UITESTRIG for `MOSIP` modules
+
+## TL;DR
+
+```console
+$ helm repo add mosip https://mosip.github.io
+$ helm install my-release mosip/uitestrig
+```
diff --git a/helm/uitestrig/templates/NOTES.txt b/helm/uitestrig/templates/NOTES.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/helm/uitestrig/templates/NOTES.txt
@@ -0,0 +1 @@
+
diff --git a/helm/uitestrig/templates/_helpers.tpl b/helm/uitestrig/templates/_helpers.tpl
new file mode 100644
index 0000000000..4a344cd5ff
--- /dev/null
+++ b/helm/uitestrig/templates/_helpers.tpl
@@ -0,0 +1,63 @@
+{{/*
+Return the proper image name
+*/}}
+{{- define "uitestrig.image" -}}
+{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }}
+{{- end -}}
+
+{{/*
+Return the proper image name (for the init container volume-permissions image)
+*/}}
+{{- define "uitestrig.volumePermissions.image" -}}
+{{- include "common.images.image" ( dict "imageRoot" .Values.volumePermissions.image "global" .Values.global ) -}}
+{{- end -}}
+
+{{/*
+Return the proper Docker Image Registry Secret Names
+*/}}
+{{- define "uitestrig.imagePullSecrets" -}}
+{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "uitestrig.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (printf "%s" (include "common.names.fullname" .)) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Compile all warnings into a single message.
+*/}}
+{{- define "uitestrig.validateValues" -}}
+{{- $messages := list -}}
+{{- $messages := append $messages (include "uitestrig.validateValues.foo" .) -}}
+{{- $messages := append $messages (include "uitestrig.validateValues.bar" .) -}}
+{{- $messages := without $messages "" -}}
+{{- $message := join "\n" $messages -}}
+
+{{- if $message -}}
+{{- printf "\nVALUES VALIDATION:\n%s" $message -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Return podAnnotations
+*/}}
+{{- define "uitestrig.podAnnotations" -}}
+{{- if .Values.podAnnotations }}
+{{ include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) }}
+{{- end }}
+{{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }}
+{{ include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) }}
+{{- end }}
+{{- end -}}
+
+{{/* Create the name for restart cronjob */}}
+{{- define "uitestrig.cronjob" -}}
+{{ default (printf "cronjob-%s" (include "common.names.fullname" .)) .Values.serviceAccount.name }}
+{{- end -}}
diff --git a/helm/uitestrig/templates/clusterrole.yaml b/helm/uitestrig/templates/clusterrole.yaml
new file mode 100644
index 0000000000..9ba5e13433
--- /dev/null
+++ b/helm/uitestrig/templates/clusterrole.yaml
@@ -0,0 +1,10 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ template "uitestrig.serviceAccountName" . }}-{{ .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+rules:
+ - apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["get","patch","list","watch"]
diff --git a/helm/uitestrig/templates/clusterrolebinding.yaml b/helm/uitestrig/templates/clusterrolebinding.yaml
new file mode 100644
index 0000000000..13f43a28ab
--- /dev/null
+++ b/helm/uitestrig/templates/clusterrolebinding.yaml
@@ -0,0 +1,19 @@
+kind: ClusterRoleBinding
+apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }}
+metadata:
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ name: {{ template "common.names.fullname" . }}-{{ .Release.Namespace }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ template "uitestrig.serviceAccountName" . }}-{{ .Release.Namespace }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ template "uitestrig.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
diff --git a/helm/uitestrig/templates/configmaps.yaml b/helm/uitestrig/templates/configmaps.yaml
new file mode 100644
index 0000000000..30d26167fe
--- /dev/null
+++ b/helm/uitestrig/templates/configmaps.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.uitestrig.configmaps }}
+{{- range $cm_name, $cm_value := .Values.uitestrig.configmaps }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ $cm_name }}
+ namespace: {{ $.Release.Namespace }}
+ labels: {{- include "common.labels.standard" $ | nindent 8 }}
+ {{- if $.Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if $.Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+data:
+ {{- range $key, $value := $cm_value }}
+ {{ $key }}: {{ $value | quote }}
+ {{- end }}
+{{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm/uitestrig/templates/cronjob.yaml b/helm/uitestrig/templates/cronjob.yaml
new file mode 100644
index 0000000000..02bdf3a7e0
--- /dev/null
+++ b/helm/uitestrig/templates/cronjob.yaml
@@ -0,0 +1,106 @@
+{{- range $module := $.Values.modules }}
+{{- if $module.enabled }}
+---
+apiVersion: {{ include "common.capabilities.cronjob.apiVersion" $ }}
+kind: CronJob
+metadata:
+ name: {{ template "uitestrig.cronjob" $ }}-{{ $module.name }}
+ namespace: {{ $.Release.Namespace }}
+ annotations:
+ {{- if $.Values.commonAnnotations }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ labels: {{- include "common.labels.standard" $ | nindent 4 }}
+ {{- if $.Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+spec:
+ concurrencyPolicy: Forbid
+ successfulJobsHistoryLimit: 1 # remove jobs which are successfully executed
+ failedJobsHistoryLimit: 1 # except 1 recent failed job, remove jobs which are not successfully executed
+ #schedule: '*/3 * * * *' # cron spec of time
+ schedule: {{ $.Values.crontime }}
+ jobTemplate:
+ spec:
+ backoffLimit: 0 # this has very low chance of failing, as all this does
+ # is prompt kubernetes to schedule new replica set for
+ # the deployment
+ # activeDeadlineSeconds: 600 # timeout, makes most sense with
+ # "waiting for rollout" variant specified below
+ template:
+ spec:
+ # account configured above
+ serviceAccountName: {{ template "uitestrig.serviceAccountName" $ }}
+ restartPolicy: Never
+ containers:
+ - name: {{ $module.name }}
+ image: {{ $module.image.registry }}/{{ $module.image.repository }}:{{ $module.image.tag }}
+ imagePullPolicy: {{ $module.image.pullPolicy }}
+ {{- if $.Values.lifecycleHooks }}
+ lifecycle: {{- include "common.tpvalues.render" (dict "value" $.Values.lifecycleHooks "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if $.Values.containerSecurityContext.enabled }}
+ securityContext: {{- omit $.Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
+ {{- end }}
+ {{- if $.Values.command }}
+ command: {{- include "common.tplvalues.render" (dict "value" $.Values.command "context" $) | nindent 12 }}
+ {{- end }}
+ {{- if $.Values.args }}
+ args: {{- include "common.tplvalues.render" (dict "value" $.Values.args "context" $) | nindent 12 }}
+ {{- end }}
+ env:
+ - name: container_user
+ value: "{{ $.Values.containerSecurityContext.runAsUser }}"
+ - name: JDK_JAVA_OPTIONS
+ value: "{{ $.Values.additionalResources.javaOpts }}"
+ - name: modules
+ value: "{{ $module.name }}"
+ {{- if $.Values.extraEnvVars }}
+ {{- include "common.tpvalues.render" (dict "value" $.Values.extraEnvVars "context" $) | nindent 12 }}
+ {{- end }}
+
+ envFrom:
+ {{- if $.Values.extraEnvVarsCM }}
+ {{- range $.Values.extraEnvVarsCM }}
+ - configMapRef:
+ name: {{ . }}
+ {{- end }}
+ {{- end }}
+ {{- if $.Values.extraEnvVarsSecret }}
+ {{- range $.Values.extraEnvVarsSecret }}
+ - secretRef:
+ name: {{ . }}
+ {{- end }}
+ {{- end }}
+ ports:
+ - name: spring-service
+ containerPort: {{ $.Values.springServicePort }}
+ volumeMounts:
+ {{- if $.Values.enable_insecure }}
+ - mountPath: /usr/local/openjdk-11/lib/security/cacerts
+ name: cacerts
+ subPath: cacerts
+ {{- end }}
+ {{- if $.Values.uitestrig.volumes }}
+ {{- range $volume_name, $volume_value := $.Values.uitestrig.volumes.configmaps }}
+ - name: {{ $volume_name }}
+ mountPath: {{ $volume_value.volumeMounts.mountPath }}
+ {{- end }}
+ {{- end }}
+ volumes:
+ {{- if $.Values.enable_insecure }}
+ - name: cacerts
+ emptyDir: {}
+ {{- end }}
+ {{- if $.Values.uitestrig.volumes }}
+ {{- range $volume_name, $volume_value := $.Values.uitestrig.volumes.configmaps }}
+ - name: {{ $volume_name }}
+ configMap:
+ defaultMode: {{ $volume_value.defaultMode }}
+ name: {{ $volume_name }}
+ {{- end }}
+ {{- end }}
+{{- end }}
+{{- end }}
+
+
diff --git a/helm/uitestrig/templates/extra-list.yaml b/helm/uitestrig/templates/extra-list.yaml
new file mode 100644
index 0000000000..9ac65f9e16
--- /dev/null
+++ b/helm/uitestrig/templates/extra-list.yaml
@@ -0,0 +1,4 @@
+{{- range .Values.extraDeploy }}
+---
+{{ include "common.tplvalues.render" (dict "value" . "context" $) }}
+{{- end }}
diff --git a/helm/uitestrig/templates/secrets.yaml b/helm/uitestrig/templates/secrets.yaml
new file mode 100644
index 0000000000..8200c6701d
--- /dev/null
+++ b/helm/uitestrig/templates/secrets.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.uitestrig.secrets }}
+{{- range $secret_name, $secret_value := .Values.uitestrig.secrets }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ $secret_name }}
+ namespace: {{ $.Release.Namespace }}
+ labels: {{- include "common.labels.standard" $ | nindent 8 }}
+ {{- if $.Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 8 }}
+ {{- end }}
+ {{- if $.Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 8 }}
+ {{- end }}
+type: Opaque
+data:
+ {{- range $key, $value := $secret_value }}
+ {{ $key }}: {{ $value | b64enc | quote }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/helm/uitestrig/templates/service-account.yaml b/helm/uitestrig/templates/service-account.yaml
new file mode 100644
index 0000000000..28bff8af43
--- /dev/null
+++ b/helm/uitestrig/templates/service-account.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels: {{- include "common.labels.standard" . | nindent 4 }}
+ {{- if .Values.commonLabels }}
+ {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
+ {{- end }}
+ name: {{ template "uitestrig.serviceAccountName" . }}
+ {{- if .Values.commonAnnotations }}
+ annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
+ {{- end }}
+ namespace: {{ .Release.Namespace }}
diff --git a/helm/uitestrig/values.yaml b/helm/uitestrig/values.yaml
new file mode 100644
index 0000000000..75e39c80a1
--- /dev/null
+++ b/helm/uitestrig/values.yaml
@@ -0,0 +1,511 @@
+## Global Docker image parameters
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value
+## Current available global Docker image parameters: imageRegistry and imagePullSecrets
+##
+# global:
+# imageRegistry: myRegistryName
+# imagePullSecrets:
+# - myRegistryKeySecretName
+# storageClass: myStorageClass
+
+## Add labels to all the deployed resources
+##
+commonLabels:
+ app.kubernetes.io/component: mosip
+
+## Add annotations to all the deployed resources
+##
+commonAnnotations: {}
+
+## Kubernetes Cluster Domain
+##
+clusterDomain: cluster.local
+
+## Extra objects to deploy (value evaluated as a template)
+##
+extraDeploy: []
+
+## Number of nodes
+##
+replicaCount: 1
+
+service:
+ type: ClusterIP
+ port: 80
+ ## loadBalancerIP for the SuiteCRM Service (optional, cloud specific)
+ ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer
+ ##
+ ## loadBalancerIP:
+ ##
+ ## nodePorts:
+ ## http:
+ ## https:
+ ##
+ nodePorts:
+ http: ""
+ https: ""
+ ## Enable client source IP preservation
+ ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
+ ##
+ externalTrafficPolicy: Cluster
+
+## Port on which this particular spring service module is running.
+springServicePort: 8083
+
+## Configure extra options for liveness and readiness probes
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
+##
+
+##
+# existingConfigmap:
+
+## Command and args for running the container (set to default if not set). Use array form
+##
+command: ['/bin/bash']
+args: ['-c', "/home/${container_user}/scripts/fetch_docker_image_hash_ids.sh"]
+
+## Deployment pod host aliases
+## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/
+##
+hostAliases: []
+
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+##
+resources:
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ limits:
+ cpu: 1000m
+ memory: 3500Mi
+ requests:
+ cpu: 1000m
+ memory: 3500Mi
+
+additionalResources:
+ ## Specify any JAVA_OPTS string here. These typically will be specified in conjunction with above resources
+ ## Example: java_opts: "-Xms500M -Xmx500M"
+ javaOpts: "-Xms3500M -Xmx3500M"
+
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
+## Clamav container already runs as 'mosip' user, so we may not need to enable this
+containerSecurityContext:
+ enabled: false
+ runAsUser: mosip
+ runAsNonRoot: true
+
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
+##
+podSecurityContext:
+ enabled: false
+ fsGroup: 1001
+
+## Pod affinity preset
+## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+## Allowed values: soft, hard
+##
+podAffinityPreset: ""
+
+## Pod anti-affinity preset
+## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+## Allowed values: soft, hard
+##
+podAntiAffinityPreset: soft
+
+## Node affinity preset
+## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+## Allowed values: soft, hard
+##
+nodeAffinityPreset:
+ ## Node affinity type
+ ## Allowed values: soft, hard
+ ##
+ type: ""
+ ## Node label key to match
+ ## E.g.
+ ## key: "kubernetes.io/e2e-az-name"
+ ##
+ key: ""
+ ## Node label values to match
+ ## E.g.
+ ## values:
+ ## - e2e-az1
+ ## - e2e-az2
+ ##
+ values: []
+
+## Affinity for pod assignment. Evaluated as a template.
+## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
+##
+affinity: {}
+
+## Node labels for pod assignment. Evaluated as a template.
+## ref: https://kubernetes.io/docs/user-guide/node-selection/
+##
+nodeSelector: {}
+
+## Tolerations for pod assignment. Evaluated as a template.
+## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+##
+tolerations: []
+
+## Pod extra labels
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+##
+podLabels: {}
+
+## Annotations for server pods.
+## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+##
+podAnnotations: {}
+
+## pods' priority.
+## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
+##
+# priorityClassName: ""
+
+## lifecycleHooks for the container to automate configuration before or after startup.
+##
+lifecycleHooks: {}
+
+## Custom Liveness probes for
+##
+customLivenessProbe: {}
+
+## Custom Rediness probes
+##
+customReadinessProbe: {}
+
+## Update strategy - only really applicable for deployments with RWO PVs attached
+## If replicas = 1, an update can get "stuck", as the previous pod remains attached to the
+## PV, and the "incoming" pod can never start. Changing the strategy to "Recreate" will
+## terminate the single previous pod, so that the new, incoming pod can attach to the PV
+##
+updateStrategy:
+ type: RollingUpdate
+
+## Additional environment variables to set
+## Example:
+## extraEnvVars:
+## - name: FOO
+## value: "bar"
+##
+extraEnvVars: []
+
+## ConfigMap with extra environment variables
+##
+extraEnvVarsCM:
+ - global
+ - s3
+ - keycloak-host
+ - db
+ - uitestrig
+ - config-server-share
+ - artifactory-share
+## Secret with extra environment variables
+##
+extraEnvVarsSecret:
+ - s3
+ - keycloak-client-secrets
+ - postgres-postgresql
+
+## Extra volumes to add to the deployment
+##
+extraVolumes: []
+
+## Extra volume mounts to add to the container
+##
+extraVolumeMounts: []
+
+## Add init containers to the pods.
+## Example:
+## initContainers:
+## - name: your-image-name
+## image: your-image
+## imagePullPolicy: Always
+## ports:
+## - name: portname
+## containerPort: 1234
+##
+initContainers:
+ - command:
+ - /bin/bash
+ - -c
+ - if [ "$ENABLE_INSECURE" = "true" ]; then HOST=$( env | grep "mosip-api-internal-host"
+ |sed "s/mosip-api-internal-host=//g"); if [ -z "$HOST" ]; then echo "HOST
+ $HOST is empty; EXITING"; exit 1; fi; openssl s_client -servername "$HOST"
+ -connect "$HOST":443 > "$HOST.cer" 2>/dev/null & sleep 2 ; sed -i -ne '/-BEGIN
+ CERTIFICATE-/,/-END CERTIFICATE-/p' "$HOST.cer"; cat "$HOST.cer"; /usr/local/openjdk-11/bin/keytool
+ -delete -alias "$HOST" -keystore $JAVA_HOME/lib/security/cacerts -storepass
+ changeit; /usr/local/openjdk-11/bin/keytool -trustcacerts -keystore "$JAVA_HOME/lib/security/cacerts"
+ -storepass changeit -noprompt -importcert -alias "$HOST" -file "$HOST.cer"
+ ; if [ $? -gt 0 ]; then echo "Failed to add SSL certificate for host $host;
+ EXITING"; exit 1; fi; cp /usr/local/openjdk-11/lib/security/cacerts /cacerts;
+ fi
+ env:
+ - name: ENABLE_INSECURE
+ value: "true"
+ envFrom:
+ - configMapRef:
+ name: global
+ - configMapRef:
+ name: uitestrig
+ image: docker.io/openjdk:11-jre
+ imagePullPolicy: Always
+ name: cacerts
+ resources: {}
+ securityContext:
+ runAsUser: 0
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /cacerts
+ name: cacerts
+
+## Add sidecars to the pods.
+## Example:
+## sidecars:
+## - name: your-image-name
+## image: your-image
+## imagePullPolicy: Always
+## ports:
+## - name: portname
+## containerPort: 1234
+##
+sidecars: {}
+
+persistence:
+ enabled: true
+ ## If defined, storageClassName:
+ ## If set to "-", storageClassName: "", which disables dynamic provisioning
+ ## If undefined (the default) or set to null, no storageClassName spec is
+ ## set, choosing the default provisioner. (gp2 on AWS, standard on
+ ## GKE, AWS & OpenStack).
+ ##
+ # storageClass: "-"
+ ##
+ ## If you want to reuse an existing claim, you can pass the name of the PVC using
+ ## the existingClaim variable
+ # existingClaim: your-claim
+ ## ReadWriteMany not supported by AWS gp2
+ storageClass:
+ accessModes:
+ - ReadWriteOnce
+ size: 100m
+
+## Init containers parameters:
+## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.
+##
+volumePermissions:
+ enabled: false
+ image:
+ registry: docker.io
+ repository: bitnami/bitnami-shell
+ tag: "10"
+ pullPolicy: Always
+ ## Optionally specify an array of imagePullSecrets.
+ ## Secrets must be manually created in the namespace.
+ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+ ##
+ pullSecrets: []
+ ## - myRegistryKeySecretName
+ ## Init containers' resource requests and limits
+ ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+ ##
+ resources:
+ ## We usually recommend not to specify default resources and to leave this as a conscious
+ ## choice for the user. This also increases chances charts run on environments with little
+ ## resources, such as Minikube. If you do want to specify resources, uncomment the following
+ ## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ ##
+ limits: {}
+ ## cpu: 100m
+ ## memory: 128Mi
+ ##
+ requests: {}
+ ## cpu: 100m
+ ## memory: 128Mi
+ ##
+
+## Specifies whether RBAC resources should be created
+##
+rbac:
+ create: true
+
+## Specifies whether a ServiceAccount should be created
+##
+serviceAccount:
+ create: true
+ ## The name of the ServiceAccount to use.
+ ## If not set and create is true, a name is generated using the fullname template
+ ##
+ name:
+
+## Prometheus Metrics
+##
+metrics:
+ enabled: false
+ ## Prometheus pod annotations
+ ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+ ##
+ podAnnotations:
+ prometheus.io/scrape: "true"
+
+ endpointPath:
+
+ ## Prometheus Service Monitor
+ ## ref: https://github.com/coreos/prometheus-operator
+ ##
+ serviceMonitor:
+ ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry
+ ##
+ enabled: true
+ ## Specify the namespace in which the serviceMonitor resource will be created
+ ##
+ # namespace: ""
+ ## Specify the interval at which metrics should be scraped
+ ##
+ interval: 10s
+ ## Specify the timeout after which the scrape is ended
+ ##
+ # scrapeTimeout: 30s
+ ## Specify Metric Relabellings to add to the scrape endpoint
+ ##
+ # relabellings:
+ ## Specify honorLabels parameter to add the scrape endpoint
+ ##
+ honorLabels: false
+ ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with
+ ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
+ ##
+ additionalLabels: {}
+
+ ## Custom PrometheusRule to be defined
+ ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
+ ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
+ ##
+ prometheusRule:
+ enabled: false
+ additionalLabels: {}
+ namespace: ''
+ ## List of rules, used as template by Helm.
+ ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html
+ # rules:
+ # - alert: RabbitmqDown
+ # expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0
+ # for: 5m
+ # labels:
+ # severity: error
+ rules: []
+
+## Admin swagger should have only internal access. Hence linked to internal gateway
+istio:
+ enabled: false
+ gateways:
+ - istio-system/internal
+ prefix:
+ corsPolicy:
+ allowOrigins:
+ - prefix: https://api-internal.sandbox.xyz.net
+ allowCredentials: true
+ allowHeaders:
+ - Accept
+ - Accept-Encoding
+ - Accept-Language
+ - Connection
+ - Content-Type
+ - Cookie
+ - Host
+ - Referer
+ - Sec-Fetch-Dest
+ - Sec-Fetch-Mode
+ - Sec-Fetch-Site
+ - Sec-Fetch-User
+ - Origin
+ - Upgrade-Insecure-Requests
+ - User-Agent
+ - sec-ch-ua
+ - sec-ch-ua-mobile
+ - sec-ch-ua-platform
+ - x-xsrf-token
+ - xsrf-token
+ allowMethods:
+ - GET
+ - POST
+ - PATCH
+ - PUT
+ - DELETE
+
+modules:
+ - name: admin-ui
+ enabled: true
+ image:
+ registry: docker.io
+ repository: mosipqa/uitest-admin
+ tag: develop
+ pullPolicy: Always
+ - name: pmp-ui
+ enabled: true
+ image:
+ registry: docker.io
+ repository: mosipqa/uitest-pmp
+ tag: develop
+ pullPolicy: Always
+ - name: resident-ui
+ enabled: true
+ image:
+ registry: docker.io
+ repository: mosipqa/uitest-resident
+ tag: develop
+ pullPolicy: Always
+
+crontime: "0 3 * * *" ## run cronjob every day at 3 AM (time hr: 0-23 )
+
+uitestrig:
+ configmaps:
+ s3:
+ s3-host: 'http://minio.minio:9000'
+ s3-user-key: 'admin'
+ s3-region: ''
+ db:
+ db-port: '5432'
+ db-su-user: 'postgres'
+ db-server: 'api-internal.sandbox.xyz.net'
+ uitestrig:
+ apiInternalEndPoint: 'https://api-internal.sandbox.xyz.net'
+ apiEnvUser: 'api-internal.sandbox.xyz.net'
+ PmpPortalPath: 'https://pmp.sandbox.xyz.net'
+ adminPortalPath: 'https://admin.sandbox.xyz.net'
+ residentPortalPath: 'https://resident.sandbox.xyz.net'
+ CHROME_DRIVER_CPU_LIMIT: "2"
+ CHROME_DRIVER_MEMORY: 3g
+ loginlang: sin
+ push-reports-to-s3: 'yes'
+ s3-account: uitestrig
+ scripts:
+ fetch_docker_image_hash_ids.sh: |
+ #!/bin/bash
+ sleep 5
+ export DOCKER_HASH_ID=$( kubectl get pod "$HOSTNAME" -n "$NS" -o jsonpath='{.status.containerStatuses[*].imageID}' | sed 's/ /\n/g' | grep -v 'istio' | sed 's/docker\-pullable\:\/\///g' )
+ export DOCKER_IMAGE=$( kubectl get pod "$HOSTNAME" -n "$NS" -o jsonpath='{.status.containerStatuses[*].image}' | sed 's/ /\n/g' | grep -v 'istio' | sed 's/docker\-pullable\:\/\///g' )
+ if [[ -z $DOCKER_HASH_ID ]]; then
+ echo "DOCKER_HASH_ID IS EMPTY;EXITING";
+ exit 1;
+ fi
+ echo "DOCKER_HASH_ID ; $DOCKER_HASH_ID"
+ echo "DOCKER_IMAGE : $DOCKER_IMAGE"
+ kubectl get pods -A -o=jsonpath='{range .items[*]}{.metadata.namespace}{","}{.metadata.labels.app\.kubernetes\.io\/name}{","}{.status.containerStatuses[?(@.name!="istio-proxy")].image}{","}{.status.containerStatuses[?(@.name!="istio-proxy")].imageID}{","}{.metadata.creationTimestamp}{"\n"}' | sed 's/ /\n/g' | grep -vE 'istio*|longhorn*|cattle*|rancher|kube' | sed 's/docker\-pullable\:\/\///g' | sort -u | sed '/,,,/d' | awk -F ',' 'BEGIN {print "{ \"POD_NAME\": \"'$(echo $HOSTNAME)'\", \"DOCKER_IMAGE\": \"'$(echo $DOCKER_IMAGE)'\", \"DOCKER_HASH_ID\": \"'$(echo $DOCKER_HASH_ID)'\", \"k8s-cluster-image-list\": ["} {print "{"} {print "\"namespace\": \"" $1 "\","} {print "\"app_name\": \"" $2 "\","} {print "\"docker_image_name\": \"" $3 "\","} {print "\"docker_image_id\": \"" $4 "\","} {print "\"creation_timestamp\": \"" $5 "\"" } {print "},"} END {print "]}"}' | sed -z 's/},\n]/}\n]/g' | jq -r . | tee -a images-list.json
+ ## run entrypoint script
+ sleep 5
+ cd /home/${container_user}/
+ bash ./entrypoint.sh
+ secrets:
+ volumes:
+ configmaps:
+ scripts:
+ defaultMode: 0777
+ volumeMounts:
+ mountPath: '/home/mosip/scripts/'
+
+enable_insecure: false