diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 82abc4d29d5..e489c736560 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -245,3 +245,41 @@ jobs: - name: Test ${{ matrix.search-version }} run: yarn --silent test:${{ matrix.search-version }} --node-version ${{ matrix.node-version }} working-directory: ./e2e + + e2e-k8s-tests: + runs-on: ubuntu-latest + strategy: + # opensearch is finiky, keep testing others if it fails + fail-fast: false + matrix: + node-version: [16.19.1, 18.16.0] + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Setup Node ${{ matrix.node-version }} + uses: actions/setup-node@v3 + with: + node-version: ${{ matrix.node-version }} + cache: 'yarn' + + # we login to docker to avoid docker pull limit rates + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Install and build packages + run: yarn setup + env: + YARN_SETUP_ARGS: "--prod=false --silent" + + - name: Install Kind and Kubectl + uses: helm/kind-action@v1.8.0 + with: + install_only: "true" + + - name: Test k8s elasticsearch7 + run: yarn --silent test:k8s --node-version ${{ matrix.node-version }} + working-directory: ./e2e diff --git a/docker-compose.yml b/docker-compose.yml index fcbcecdd76b..5c57163ffd8 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -73,16 +73,32 @@ services: cap_add: - IPC_LOCK kafka: - image: terascope/kafka-zookeeper:v1.1.0 - platform: linux/amd64 # platform specified to force amd64 on arm MacOS docker + image: confluentinc/cp-kafka:7.2.0 # Has kafka 3.2.0 + ports: + - "9094:9094" + restart: unless-stopped + depends_on: + - zookeeper + networks: + - cluster + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 + KAFKA_LISTENERS: INTERNAL://kafka:9092,OUTSIDE://0.0.0.0:9094 + KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:9092,OUTSIDE://localhost:9094 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,OUTSIDE:PLAINTEXT + KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + zookeeper: + image: confluentinc/cp-zookeeper:7.2.0 ports: - "2181:2181" - - "9092:9092" restart: unless-stopped networks: - cluster - volumes: - - kafka-data:/kafka + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + ZOOKEEPER_TICK_TIME: 2000 minio: image: minio/minio:RELEASE.2023-09-30T07-02-29Z ports: diff --git a/e2e/jest.config.js b/e2e/jest.config.js index 0a4be3a992a..f6947d860d2 100644 --- a/e2e/jest.config.js +++ b/e2e/jest.config.js @@ -2,6 +2,10 @@ const config = require('../jest.config.base')(__dirname); +// TODO: update arrays to run tests specific to platform. +// First array is for tests skipped in kubernetes. +// Second array is for tests skipped in native. +config.testPathIgnorePatterns = process.env.TEST_PLATFORM === 'kubernetes' ? ['data/recovery-spec', 'cluster/worker-allocation-spec', 'cluster/state-spec'] : []; config.collectCoverage = false; delete config.transform; module.exports = config; diff --git a/e2e/k8s/elasticsearchDeployment.yaml b/e2e/k8s/elasticsearchDeployment.yaml new file mode 100644 index 00000000000..0ba2dc9969d --- /dev/null +++ b/e2e/k8s/elasticsearchDeployment.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: elasticsearch + labels: + app.kubernetes.io/name: elasticsearch + app.kubernetes.io/component: master +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: elasticsearch + app.kubernetes.io/component: master + template: + metadata: + labels: + app.kubernetes.io/name: elasticsearch + app.kubernetes.io/component: master + spec: + containers: + - name: elasticsearch + image: elasticsearch:7.9.3 + ports: + - containerPort: 9200 + env: + - name: ES_JAVA_OPTS + value: "-Xms512m -Xmx512m" + - name: discovery.type + value: single-node +--- +kind: Service +apiVersion: v1 +metadata: + name: elasticsearch + labels: + app.kubernetes.io/name: elasticsearch +spec: + selector: + app.kubernetes.io/name: elasticsearch + app.kubernetes.io/component: master + ports: + - port: 9200 + targetPort: 9200 + nodePort: 30200 # the external port teraslice can be accessed on + type: NodePort diff --git a/e2e/k8s/kafkaDeployment.yaml b/e2e/k8s/kafkaDeployment.yaml new file mode 100644 index 00000000000..9324aa418f6 --- /dev/null +++ b/e2e/k8s/kafkaDeployment.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cpkafka + labels: + app.kubernetes.io/name: cpkafka + app.kubernetes.io/component: master +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cpkafka + app.kubernetes.io/component: master + template: + metadata: + labels: + app.kubernetes.io/name: cpkafka + app.kubernetes.io/component: master + spec: + containers: + - name: cpkafka + image: confluentinc/cp-kafka:7.1.9 + env: + - name: KAFKA_BROKER_ID + value: "1" + - name: KAFKA_ZOOKEEPER_CONNECT + value: zookeeper:2181 + - name: KAFKA_ADVERTISED_LISTENERS + value: INTERNAL://cpkafka.services-dev1:9092 + - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP + value: INTERNAL:PLAINTEXT + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: INTERNAL + - name: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR + value: "1" + ports: + - containerPort: 9092 +--- +kind: Service +apiVersion: v1 +metadata: + name: cpkafka + labels: + app.kubernetes.io/name: cpkafka +spec: + type: NodePort + selector: + app.kubernetes.io/name: cpkafka + app.kubernetes.io/component: master + ports: + - port: 9092 + name: cpkafka + targetPort: 9092 + nodePort: 30092 diff --git a/e2e/k8s/kindConfig.yaml b/e2e/k8s/kindConfig.yaml new file mode 100644 index 00000000000..54fd983a1f4 --- /dev/null +++ b/e2e/k8s/kindConfig.yaml @@ -0,0 +1,12 @@ +kind: Cluster +name: k8se2e +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + extraPortMappings: + - containerPort: 30200 + hostPort: 49200 + - containerPort: 30678 + hostPort: 45678 + - containerPort: 30092 + hostPort: 49092 diff --git a/e2e/k8s/masterConfig/teraslice.yaml b/e2e/k8s/masterConfig/teraslice.yaml new file mode 100644 index 00000000000..2fe3eac4add --- /dev/null +++ b/e2e/k8s/masterConfig/teraslice.yaml @@ -0,0 +1,35 @@ +terafoundation: + environment: 'development' + log_level: debug + connectors: + elasticsearch: + default: + apiVersion: "5.6" + host: + - "elasticsearch.services-dev1:9200" + elasticsearch-next: + default: + node: + - "http://elasticsearch.services-dev1:9200" + kafka: + default: + brokers: + - "cpkafka.services-dev1:9092" +teraslice: + worker_disconnect_timeout: 60000 + node_disconnect_timeout: 60000 + slicer_timeout: 60000 + shutdown_timeout: 30000 + assets_directory: '/app/assets/' + cluster_manager_type: "kubernetes" + master: true + master_hostname: "127.0.0.1" + kubernetes_image: "teraslice-workspace:e2e" + kubernetes_image_pull_secrets: + - "docker-tera1-secret" + kubernetes_namespace: "ts-dev1" + kubernetes_overrides_enabled: true + kubernetes_priority_class_name: 'high-priority' + name: "ts-dev1" + cpu: 1 + memory: 536870912 diff --git a/e2e/k8s/masterDeployment.yaml b/e2e/k8s/masterDeployment.yaml new file mode 100644 index 00000000000..e4576906427 --- /dev/null +++ b/e2e/k8s/masterDeployment.yaml @@ -0,0 +1,53 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: teraslice-master + labels: + app.kubernetes.io/name: teraslice + app.kubernetes.io/component: master +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: teraslice + app.kubernetes.io/component: master + template: + metadata: + labels: + app.kubernetes.io/name: teraslice + app.kubernetes.io/component: master + app.kubernetes.io/instance: k8se2e + spec: + containers: + - name: teraslice-master + image: teraslice-workspace:e2e + ports: + - containerPort: 5678 + volumeMounts: + - mountPath: /app/config # defines the directory + name: config + volumes: + - name: config + configMap: + name: teraslice-master + items: + - key: teraslice.yaml + path: teraslice.yaml # the filename that the configMap gets written to, inside the matching mountPath + imagePullSecrets: + - name: docker-tera1-secret +--- +kind: Service +apiVersion: v1 +metadata: + name: teraslice-master + labels: + app.kubernetes.io/name: teraslice +spec: + selector: + app.kubernetes.io/name: teraslice + app.kubernetes.io/component: master + ports: + - port: 5678 + targetPort: 5678 + nodePort: 30678 # the external port teraslice can be accessed on + type: NodePort diff --git a/e2e/k8s/priorityClass.yaml b/e2e/k8s/priorityClass.yaml new file mode 100644 index 00000000000..0747825c87d --- /dev/null +++ b/e2e/k8s/priorityClass.yaml @@ -0,0 +1,7 @@ +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: high-priority +value: 1000000 +globalDefault: false +description: "This priority class is for Teraslice pods." diff --git a/e2e/k8s/role.yaml b/e2e/k8s/role.yaml new file mode 100644 index 00000000000..725cec875a5 --- /dev/null +++ b/e2e/k8s/role.yaml @@ -0,0 +1,9 @@ +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: teraslice-all-ts-dev1 + namespace: ts-dev1 +rules: + - apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] \ No newline at end of file diff --git a/e2e/k8s/roleBinding.yaml b/e2e/k8s/roleBinding.yaml new file mode 100644 index 00000000000..b46683aeb41 --- /dev/null +++ b/e2e/k8s/roleBinding.yaml @@ -0,0 +1,13 @@ +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: teraslice-all-ts-dev1 + namespace: ts-dev1 +subjects: + - kind: ServiceAccount + name: default + namespace: ts-dev1 +roleRef: + kind: Role + name: teraslice-all-ts-dev1 + apiGroup: "rbac.authorization.k8s.io" \ No newline at end of file diff --git a/e2e/k8s/services-ns.yaml b/e2e/k8s/services-ns.yaml new file mode 100644 index 00000000000..cd979b11d2b --- /dev/null +++ b/e2e/k8s/services-ns.yaml @@ -0,0 +1,4 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: services-dev1 diff --git a/e2e/k8s/ts-ns.yaml b/e2e/k8s/ts-ns.yaml new file mode 100644 index 00000000000..1af803c0c4f --- /dev/null +++ b/e2e/k8s/ts-ns.yaml @@ -0,0 +1,4 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: ts-dev1 \ No newline at end of file diff --git a/e2e/k8s/workerConfig/teraslice.yaml b/e2e/k8s/workerConfig/teraslice.yaml new file mode 100644 index 00000000000..26a51e52b3a --- /dev/null +++ b/e2e/k8s/workerConfig/teraslice.yaml @@ -0,0 +1,33 @@ +terafoundation: + environment: 'development' + log_level: debug + connectors: + elasticsearch: + default: + apiVersion: "5.6" + host: + - "elasticsearch.services-dev1:9200" + elasticsearch-next: + default: + node: + - "http://elasticsearch.services-dev1:9200" + kafka: + default: + brokers: + - "cpkafka.services-dev1:9092" +teraslice: + worker_disconnect_timeout: 60000 + node_disconnect_timeout: 60000 + slicer_timeout: 60000 + shutdown_timeout: 30000 + assets_directory: '/app/assets/' + cluster_manager_type: "kubernetes" + master: false + master_hostname: "teraslice-master" + kubernetes_image: "teraslice-workspace:e2e" + kubernetes_namespace: "ts-dev1" + kubernetes_overrides_enabled: true + kubernetes_priority_class_name: 'high-priority' + name: "ts-dev1" + cpu: 1 + memory: 536870912 diff --git a/e2e/k8s/zookeeperDeployment.yaml b/e2e/k8s/zookeeperDeployment.yaml new file mode 100644 index 00000000000..9199994e951 --- /dev/null +++ b/e2e/k8s/zookeeperDeployment.yaml @@ -0,0 +1,46 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: zookeeper + labels: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/component: master +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/component: master + template: + metadata: + labels: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/component: master + spec: + containers: + - name: zookeeper + image: confluentinc/cp-zookeeper:7.1.9 + env: + - name: ZOOKEEPER_CLIENT_PORT + value: "2181" + - name: ZOOKEEPER_TICK_TIME + value: "2000" + ports: + - containerPort: 2181 +--- +kind: Service +apiVersion: v1 +metadata: + name: zookeeper + labels: + app.kubernetes.io/name: zookeeper +spec: + type: NodePort + selector: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/component: master + ports: + - port: 2181 + name: zookeeper + targetPort: 2181 + nodePort: 32181 diff --git a/e2e/package.json b/e2e/package.json index 2f6277098c9..91565c7e313 100644 --- a/e2e/package.json +++ b/e2e/package.json @@ -28,6 +28,8 @@ "test:debug": "TEST_ELASTICSEARCH='true' TEST_KAFKA='true' ts-scripts test --suite e2e --debug --", "test:elasticsearch6": "TEST_ELASTICSEARCH='true' TEST_KAFKA='true' ts-scripts test --suite e2e --", "test:elasticsearch7": "TEST_ELASTICSEARCH='true' ELASTICSEARCH_VERSION='7.9.3' TEST_KAFKA='true' ts-scripts test --suite e2e --", + "test:k8s": "TEST_ELASTICSEARCH='true' ELASTICSEARCH_VERSION='7.9.3' KAFKA_VERSION='3.1' TEST_KAFKA='true' TEST_PLATFORM='kubernetes' ts-scripts test --suite e2e --", + "test:k8sNoBuild": "SKIP_DOCKER_BUILD_IN_E2E='true' TEST_ELASTICSEARCH='true' ELASTICSEARCH_VERSION='7.9.3' KAFKA_VERSION='3.1' TEST_KAFKA='true' TEST_PLATFORM='kubernetes' ts-scripts test --suite e2e --", "test:opensearch1": "TEST_OPENSEARCH='true' TEST_KAFKA='true' ts-scripts test --suite e2e --", "test:opensearch2": "TEST_OPENSEARCH='true' OPENSEARCH_VERSION='2.8.0' TEST_KAFKA='true' ts-scripts test --suite e2e --", "test:watch": "TEST_ELASTICSEARCH='true' TEST_KAFKA='true' ts-scripts test --suite e2e --watch --" diff --git a/e2e/test/config.js b/e2e/test/config.js index b97ffbb415d..7e967403bff 100644 --- a/e2e/test/config.js +++ b/e2e/test/config.js @@ -34,7 +34,9 @@ const { KAFKA_BROKER = 'locahost:9092', HOST_IP = '127.0.0.1', GENERATE_ONLY, - TEST_OPENSEARCH = false + TEST_OPENSEARCH = false, + TEST_PLATFORM = 'native', + KEEP_OPEN = false } = process.env; const TEST_HOST = TEST_OPENSEARCH ? OPENSEARCH_HOST : ELASTICSEARCH_HOST; @@ -76,5 +78,7 @@ module.exports = { OPENSEARCH_VERSION, GENERATE_ONLY, newId, - TEST_HOST + TEST_HOST, + TEST_PLATFORM, + KEEP_OPEN }; diff --git a/e2e/test/fixtures/jobs/generate-to-es.json b/e2e/test/fixtures/jobs/generate-to-es.json index d8301baa243..6942f681560 100644 --- a/e2e/test/fixtures/jobs/generate-to-es.json +++ b/e2e/test/fixtures/jobs/generate-to-es.json @@ -5,6 +5,9 @@ "workers": 2, "analytics": false, "assets": ["elasticsearch", "standard"], + "resources_requests_cpu": 0.1, + "resources_limits_cpu": 0.5, + "cpu_execution_controller": 0.2, "max_retries": 0, "operations": [ { diff --git a/e2e/test/fixtures/jobs/generator-asset.json b/e2e/test/fixtures/jobs/generator-asset.json index e0cbf88bdfc..831eee754da 100644 --- a/e2e/test/fixtures/jobs/generator-asset.json +++ b/e2e/test/fixtures/jobs/generator-asset.json @@ -4,6 +4,9 @@ "lifecycle": "persistent", "workers": 2, "assets": ["ex1", "standard"], + "resources_requests_cpu": 0.1, + "resources_limits_cpu": 0.5, + "cpu_execution_controller": 0.2, "max_retries": 0, "analytics": false, "operations": [ diff --git a/e2e/test/fixtures/jobs/generator.json b/e2e/test/fixtures/jobs/generator.json index 28bcf98d2c1..f88e73801aa 100644 --- a/e2e/test/fixtures/jobs/generator.json +++ b/e2e/test/fixtures/jobs/generator.json @@ -5,6 +5,9 @@ "workers": 3, "analytics": false, "assets": ["standard"], + "resources_requests_cpu": 0.1, + "resources_limits_cpu": 0.5, + "cpu_execution_controller": 0.2, "max_retries": 0, "operations": [ { diff --git a/e2e/test/fixtures/jobs/id.json b/e2e/test/fixtures/jobs/id.json index 844482d35e3..218b3619d4b 100644 --- a/e2e/test/fixtures/jobs/id.json +++ b/e2e/test/fixtures/jobs/id.json @@ -5,6 +5,9 @@ "slicers": 2, "workers": 4, "assets": ["elasticsearch"], + "resources_requests_cpu": 0.1, + "resources_limits_cpu": 0.5, + "cpu_execution_controller": 0.2, "operations": [ { "_op": "id_reader", diff --git a/e2e/test/fixtures/jobs/kafka-reader.json b/e2e/test/fixtures/jobs/kafka-reader.json index f1eeb469ceb..23fd02344c2 100644 --- a/e2e/test/fixtures/jobs/kafka-reader.json +++ b/e2e/test/fixtures/jobs/kafka-reader.json @@ -4,6 +4,9 @@ "workers": 1, "analytics": true, "assets": ["kafka", "elasticsearch"], + "resources_requests_cpu": 0.1, + "resources_limits_cpu": 0.5, + "cpu_execution_controller": 0.2, "max_retries": 0, "operations": [ { diff --git a/e2e/test/fixtures/jobs/kafka-sender.json b/e2e/test/fixtures/jobs/kafka-sender.json index f799564a770..d2d4085bf6d 100644 --- a/e2e/test/fixtures/jobs/kafka-sender.json +++ b/e2e/test/fixtures/jobs/kafka-sender.json @@ -4,6 +4,9 @@ "workers": 1, "analytics": true, "assets": ["kafka", "elasticsearch"], + "resources_requests_cpu": 0.1, + "resources_limits_cpu": 0.5, + "cpu_execution_controller": 0.2, "max_retries": 0, "operations": [ { diff --git a/e2e/test/fixtures/jobs/multisend.json b/e2e/test/fixtures/jobs/multisend.json index 52be220596b..482e9fd8c92 100644 --- a/e2e/test/fixtures/jobs/multisend.json +++ b/e2e/test/fixtures/jobs/multisend.json @@ -4,6 +4,9 @@ "lifecycle": "once", "analytics": false, "assets": ["elasticsearch"], + "resources_requests_cpu": 0.1, + "resources_limits_cpu": 0.5, + "cpu_execution_controller": 0.2, "operations": [ { "_op": "elasticsearch_reader", diff --git a/e2e/test/fixtures/jobs/reindex.json b/e2e/test/fixtures/jobs/reindex.json index 9fda19219c7..a93030347ea 100644 --- a/e2e/test/fixtures/jobs/reindex.json +++ b/e2e/test/fixtures/jobs/reindex.json @@ -4,6 +4,9 @@ "workers": 1, "analytics": true, "assets": ["elasticsearch"], + "resources_requests_cpu": 0.1, + "resources_limits_cpu": 0.5, + "cpu_execution_controller": 0.2, "operations": [ { "_op": "elasticsearch_reader", diff --git a/e2e/test/global.setup.js b/e2e/test/global.setup.js index bcd66773d55..7c0512ff716 100644 --- a/e2e/test/global.setup.js +++ b/e2e/test/global.setup.js @@ -1,6 +1,10 @@ 'use strict'; const { pDelay } = require('@terascope/utils'); +const { + deployK8sTeraslice, + setAliasAndBaseAssets +} = require('@terascope/scripts'); const fse = require('fs-extra'); const TerasliceHarness = require('./teraslice-harness'); const globalTeardown = require('./global.teardown'); @@ -8,11 +12,12 @@ const { dockerUp } = require('./docker-helpers'); const signale = require('./signale'); const setupTerasliceConfig = require('./setup-config'); const downloadAssets = require('./download-assets'); -const { CONFIG_PATH, ASSETS_PATH } = require('./config'); +const { + CONFIG_PATH, ASSETS_PATH, TEST_PLATFORM, HOST_IP +} = require('./config'); module.exports = async () => { const teraslice = new TerasliceHarness(); - await teraslice.init(); await globalTeardown(teraslice.client); @@ -33,10 +38,16 @@ module.exports = async () => { fse.ensureDir(CONFIG_PATH), ]); - await Promise.all([setupTerasliceConfig(), downloadAssets()]); + if (TEST_PLATFORM === 'kubernetes') { + await deployK8sTeraslice(); // here + await teraslice.waitForTeraslice(); + await setAliasAndBaseAssets(HOST_IP); + } else { + await Promise.all([setupTerasliceConfig(), downloadAssets()]); + await dockerUp(); + await teraslice.waitForTeraslice(); + } - await dockerUp(); - await teraslice.waitForTeraslice(); await pDelay(2000); await teraslice.resetState(); diff --git a/e2e/test/global.teardown.js b/e2e/test/global.teardown.js index e59a84a9462..b4a7e2563dd 100644 --- a/e2e/test/global.teardown.js +++ b/e2e/test/global.teardown.js @@ -1,9 +1,12 @@ 'use strict'; const { ElasticsearchTestHelpers } = require('elasticsearch-store'); +const { deleteTerasliceNamespace } = require('@terascope/scripts'); const fse = require('fs-extra'); -const { KEEP_OPEN, CONFIG_PATH, ASSETS_PATH } = require('./config'); -const { tearDown, TEST_INDEX_PREFIX } = require('./docker-helpers'); +const { + KEEP_OPEN, CONFIG_PATH, ASSETS_PATH, TEST_INDEX_PREFIX, TEST_PLATFORM +} = require('./config'); +const { tearDown } = require('./docker-helpers'); const signale = require('./signale'); const { cleanupIndex, makeClient } = ElasticsearchTestHelpers; @@ -14,20 +17,24 @@ async function getClient(client) { } async function globalTeardown(testClient) { - const client = await getClient(testClient); - if (KEEP_OPEN) { return; } + const client = await getClient(testClient); + const errors = []; try { - await tearDown(); + if (TEST_PLATFORM === 'kubernetes') { + await deleteTerasliceNamespace(); + await cleanupIndex(client, 'ts-dev1_*'); + } else { + await tearDown(); + } } catch (err) { errors.push(err); } - await cleanupIndex(client, `${TEST_INDEX_PREFIX}*`); if (fse.existsSync(CONFIG_PATH)) { diff --git a/e2e/test/teraslice-harness.js b/e2e/test/teraslice-harness.js index 36a96061aaf..e0aecd5f680 100644 --- a/e2e/test/teraslice-harness.js +++ b/e2e/test/teraslice-harness.js @@ -5,6 +5,9 @@ const { pDelay, uniq, toString, cloneDeep, isEmpty, castArray } = require('@terascope/utils'); +const { + deployK8sTeraslice, showState +} = require('@terascope/scripts'); const { createClient, ElasticsearchTestHelpers } = require('elasticsearch-store'); const { TerasliceClient } = require('teraslice-client-js'); const path = require('path'); @@ -12,7 +15,7 @@ const fse = require('fs-extra'); const { TEST_HOST, HOST_IP, SPEC_INDEX_PREFIX, DEFAULT_NODES, newId, DEFAULT_WORKERS, GENERATE_ONLY, - EXAMPLE_INDEX_SIZES, EXAMPLE_INDEX_PREFIX + EXAMPLE_INDEX_SIZES, EXAMPLE_INDEX_PREFIX, TEST_PLATFORM } = require('./config'); const { scaleWorkers, getElapsed } = require('./docker-helpers'); const signale = require('./signale'); @@ -85,42 +88,68 @@ module.exports = class TerasliceHarness { async resetState() { const startTime = Date.now(); - const state = await this.teraslice.cluster.state(); - await Promise.all([ - pDelay(800), - cleanupIndex(this.client, `${SPEC_INDEX_PREFIX}*`), - (async () => { - const cleanupExIds = []; - Object.values(state).forEach((node) => { - const { assignment, ex_id: exId } = node; - - const isWorker = ['execution_controller', 'worker'].includes(assignment); - if (isWorker) { - cleanupExIds.push(exId); - } - }); + if (TEST_PLATFORM === 'kubernetes') { + try { + await cleanupIndex(this.client, `${SPEC_INDEX_PREFIX}*`); + await cleanupIndex(this.client, 'ts-dev1__ex'); + await cleanupIndex(this.client, 'ts-dev1__jobs'); + await cleanupIndex(this.client, 'ts-dev1__analytics*'); + await cleanupIndex(this.client, 'ts-dev1__state*'); + await this.clearNonBaseAssets(); + } catch (err) { + signale.error('Failure to clean indices and assets', err); + throw err; + } + try { + await deployK8sTeraslice(); + await this.waitForTeraslice(); + await showState(HOST_IP); + } catch (err) { + signale.error('Failure to reset teraslice state', err); + throw err; + } + // TODO: If tests are ever implemented to scale nodes in Kind, + // a scaleWorkers implementation will need to be created that works with Kind. + // As of Oct 2023 Kind doesn't let you scale nodes w/o restarting the cluster. + } else { + const state = await this.teraslice.cluster.state(); - await Promise.all( - uniq(cleanupExIds).map(async (exId) => { - signale.warn(`resetting ex ${exId}`); - try { - await this.teraslice.executions.wrap(exId).stop({ blocking: true }); - } catch (err) { - // ignore error; + await Promise.all([ + pDelay(800), + cleanupIndex(this.client, `${SPEC_INDEX_PREFIX}*`), + (async () => { + const cleanupExIds = []; + Object.values(state).forEach((node) => { + const { assignment, ex_id: exId } = node; + + const isWorker = ['execution_controller', 'worker'].includes(assignment); + if (isWorker) { + cleanupExIds.push(exId); } - }) - ); - })(), - (async () => { - const count = Object.keys(state).length; - if (count !== DEFAULT_NODES) { - signale.warn(`resetting cluster state of ${count} nodes`); - await scaleWorkers(); - await this.forWorkers(); - } - })() - ]); + }); + + await Promise.all( + uniq(cleanupExIds).map(async (exId) => { + signale.warn(`resetting ex ${exId}`); + try { + await this.teraslice.executions.wrap(exId).stop({ blocking: true }); + } catch (err) { + // ignore error; + } + }) + ); + })(), + (async () => { + const count = Object.keys(state).length; + if (count !== DEFAULT_NODES) { + signale.warn(`resetting cluster state of ${count} nodes`); + await scaleWorkers(); + await this.forWorkers(); + } + })() + ]); + } const elapsed = Date.now() - startTime; if (elapsed > 1000) { @@ -350,6 +379,11 @@ module.exports = class TerasliceHarness { return _waitForClusterState(); } + if (TEST_PLATFORM === 'kubernetes') { + // A get request to 'cluster/state' will return an empty object in kubernetes. + // Therefore nodes will be 0. + if (nodes === 0) return nodes; + } if (nodes >= DEFAULT_NODES) return nodes; return _waitForClusterState(); }; @@ -371,7 +405,7 @@ module.exports = class TerasliceHarness { return count; } } catch (err) { - // it probably okay + // it probably okay } await pDelay(50); @@ -390,7 +424,12 @@ module.exports = class TerasliceHarness { signale.pending('Waiting for Teraslice...'); const nodes = await this.waitForClusterState(); - signale.success(`Teraslice is ready to go with ${nodes} nodes`, getElapsed(startTime)); + + if (TEST_PLATFORM === 'kubernetes') { + signale.success('Teraslice is ready to go', getElapsed(startTime)); + } else { + signale.success(`Teraslice is ready to go with ${nodes} nodes`, getElapsed(startTime)); + } } async postJob(jobSpec) { @@ -412,6 +451,9 @@ module.exports = class TerasliceHarness { lifecycle: 'once', workers: 1, assets: ['elasticsearch', 'standard'], + resources_requests_cpu: 0.1, + resources_limits_cpu: 0.5, + cpu_execution_controller: 0.2, operations: [ { _op: 'data_generator', @@ -471,4 +513,15 @@ module.exports = class TerasliceHarness { throw err; } } + + async clearNonBaseAssets() { + const assetList = await this.teraslice.assets.list(); + const baseAssets = ['standard', 'elasticsearch', 'kafka']; + const assetsToDelete = assetList.filter((assetObj) => !baseAssets.includes(assetObj.name)); + + for (const asset of assetsToDelete) { + const response = await this.teraslice.assets.remove(asset.id); + signale.debug(`Deleted asset with id ${response._id}`); + } + } }; diff --git a/package.json b/package.json index a3cb7e0a674..c80facbbf97 100644 --- a/package.json +++ b/package.json @@ -9,7 +9,8 @@ }, "workspaces": [ "packages/*", - "e2e" + "e2e", + "k8se2e" ], "scripts": { "prebuild": "./packages/xlucene-parser/scripts/generate-engine.js", @@ -69,6 +70,7 @@ "tests": { "suites": { "e2e": [], + "k8se2e": [], "elasticsearch": [], "search": [], "restrained": [], diff --git a/packages/elasticsearch-store/test/client-functions-spec.ts b/packages/elasticsearch-store/test/client-functions-spec.ts index b7c229ab799..91feab5dd35 100644 --- a/packages/elasticsearch-store/test/client-functions-spec.ts +++ b/packages/elasticsearch-store/test/client-functions-spec.ts @@ -56,11 +56,7 @@ describe('creates client that exposes elasticsearch and opensearch functions', ( const resp = await client.info(); if (clientMetadata.distribution === 'elasticsearch') { - if (clientMetadata.version.split('.')[0] === '8') { - expect(resp.cluster_name).toBe('docker-cluster'); - } else { - expect(resp.cluster_name).toBe(clientMetadata.distribution); - } + expect(resp.cluster_name).toBe('docker-cluster'); } if (clientMetadata.distribution === 'opensearch') { diff --git a/packages/scripts/package.json b/packages/scripts/package.json index cf4b187082f..8d9ee38e49e 100644 --- a/packages/scripts/package.json +++ b/packages/scripts/package.json @@ -1,7 +1,7 @@ { "name": "@terascope/scripts", "displayName": "Scripts", - "version": "0.60.0", + "version": "0.60.1", "description": "A collection of terascope monorepo scripts", "homepage": "https://github.com/terascope/teraslice/tree/master/packages/scripts#readme", "bugs": { diff --git a/packages/scripts/src/cmds/test.ts b/packages/scripts/src/cmds/test.ts index 9f3c24c7f75..b76efb9d6ae 100644 --- a/packages/scripts/src/cmds/test.ts +++ b/packages/scripts/src/cmds/test.ts @@ -26,7 +26,8 @@ type Options = { 'node-version': string; 'use-existing-services': boolean; packages?: PackageInfo[]; - 'ignore-mount': boolean + 'ignore-mount': boolean; + 'test-platform': string; }; const jestArgs = getExtraArgs(); @@ -130,6 +131,11 @@ const cmd: CommandModule = { type: 'boolean', default: false, }) + .option('test-platform', { + description: 'Clustering platform for e2e tests', + type: 'string', + default: config.TEST_PLATFORM, + }) .positional('packages', { description: 'Runs the tests for one or more package and/or an asset, if none specified it will run all of the tests', coerce(arg) { @@ -159,6 +165,7 @@ const cmd: CommandModule = { const nodeVersion = hoistJestArg(argv, 'node-version', 'string'); const forceSuite = hoistJestArg(argv, 'force-suite', 'string'); const ignoreMount = hoistJestArg(argv, 'ignore-mount', 'boolean'); + const testPlatform = hoistJestArg(argv, 'test-platform', 'string'); if (debug && watch) { throw new Error('--debug and --watch conflict, please set one or the other'); @@ -176,6 +183,8 @@ const cmd: CommandModule = { elasticsearchVersion, elasticsearchAPIVersion, kafkaVersion, + kafkaImageVersion: config.KAFKA_IMAGE_VERSION, + zookeeperVersion: config.ZOOKEEPER_VERSION, minioVersion, rabbitmqVersion, opensearchVersion, @@ -183,7 +192,8 @@ const cmd: CommandModule = { all: !argv.packages || !argv.packages.length, reportCoverage, jestArgs, - ignoreMount + ignoreMount, + testPlatform }); }, }; diff --git a/packages/scripts/src/helpers/config.ts b/packages/scripts/src/helpers/config.ts index 889cce8bc74..88fb18fa454 100644 --- a/packages/scripts/src/helpers/config.ts +++ b/packages/scripts/src/helpers/config.ts @@ -8,7 +8,23 @@ const forceColor = process.env.FORCE_COLOR || '1'; export const FORCE_COLOR = toBoolean(forceColor) ? '1' : '0'; - +const kafkaMapper = { + 3: { + 0: '7.0.11', + 1: '7.1.9', + 2: '7.2.7', + 3: '7.3.5', + 4: '7.4.2', + 5: '7.5.1' + }, + 2: { + 4: '5.4.10', + 5: '5.5.12', + 6: '6.0.15', + 7: '6.1.13', + 8: '6.2.12' + } +}; /** The timeout for how long a service has to stand up */ export const SERVICE_UP_TIMEOUT = process.env.SERVICE_UP_TIMEOUT ?? '2m'; @@ -25,7 +41,7 @@ export const ELASTICSEARCH_PORT = process.env.ELASTICSEARCH_PORT || '49200'; export const ELASTICSEARCH_HOST = `http://${ELASTICSEARCH_HOSTNAME}:${ELASTICSEARCH_PORT}`; export const ELASTICSEARCH_VERSION = process.env.ELASTICSEARCH_VERSION || '6.8.6'; export const ELASTICSEARCH_API_VERSION = process.env.ELASTICSEARCH_API_VERSION || '6.5'; -export const ELASTICSEARCH_DOCKER_IMAGE = process.env.ELASTICSEARCH_DOCKER_IMAGE || 'blacktop/elasticsearch'; +export const ELASTICSEARCH_DOCKER_IMAGE = process.env.ELASTICSEARCH_DOCKER_IMAGE || 'elasticsearch'; export const RESTRAINED_ELASTICSEARCH_PORT = process.env.RESTRAINED_ELASTICSEARCH_PORT || '49202'; export const RESTRAINED_ELASTICSEARCH_HOST = `http://${ELASTICSEARCH_HOSTNAME}:${RESTRAINED_ELASTICSEARCH_PORT}`; @@ -35,7 +51,19 @@ export const KAFKA_HOSTNAME = process.env.KAFKA_HOSTNAME || HOST_IP; export const KAFKA_PORT = process.env.KAFKA_PORT || '49092'; export const KAFKA_BROKER = `${KAFKA_HOSTNAME}:${KAFKA_PORT}`; export const KAFKA_VERSION = process.env.KAFKA_VERSION || '3.1'; -export const KAFKA_DOCKER_IMAGE = process.env.KAFKA_DOCKER_IMAGE || 'blacktop/kafka'; +export const KAFKA_IMAGE_VERSION = kafkaMapper[KAFKA_VERSION.charAt(0)][KAFKA_VERSION.charAt(2)]; +export const KAFKA_DOCKER_IMAGE = process.env.KAFKA_DOCKER_IMAGE || 'confluentinc/cp-kafka'; +export const ZOOKEEPER_VERSION = kafkaMapper[KAFKA_VERSION.charAt(0)][KAFKA_VERSION.charAt(2)]; +export const ZOOKEEPER_CLIENT_PORT = process.env.ZOOKEEPER_CLIENT_PORT || '42181'; +export const ZOOKEEPER_TICK_TIME = process.env.ZOOKEEPER_TICK_TIME || '2000'; +export const ZOOKEEPER_DOCKER_IMAGE = process.env.ZOOKEEPER_DOCKER_IMAGE || 'confluentinc/cp-zookeeper'; +export const KAFKA_BROKER_ID = process.env.KAFKA_BROKER_ID || '1'; +export const KAFKA_ZOOKEEPER_CONNECT = `${KAFKA_HOSTNAME}:${ZOOKEEPER_CLIENT_PORT}`; +export const KAFKA_LISTENERS = `INTERNAL://:${KAFKA_PORT}`; +export const KAFKA_ADVERTISED_LISTENERS = `INTERNAL://${KAFKA_HOSTNAME}:${KAFKA_PORT}`; +export const KAFKA_LISTENER_SECURITY_PROTOCOL_MAP = 'INTERNAL:PLAINTEXT'; +export const KAFKA_INTER_BROKER_LISTENER_NAME = process.env.KAFKA_INTER_BROKER_LISTENER_NAME || 'INTERNAL'; +export const KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR = process.env.KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR || '1'; export const MINIO_NAME = process.env.MINIO_NAME || 'minio'; export const MINIO_HOSTNAME = process.env.MINIO_HOSTNAME || HOST_IP; @@ -63,7 +91,7 @@ export const OPENSEARCH_HOSTNAME = process.env.OPENSEARCH_HOSTNAME || HOST_IP; export const OPENSEARCH_PORT = process.env.OPENSEARCH_PORT || '49210'; export const OPENSEARCH_USER = process.env.OPENSEARCH_USER || 'admin'; export const OPENSEARCH_PASSWORD = process.env.OPENSEARCH_PASSWORD || 'admin'; -export const OPENSEARCH_VERSION = process.env.OPENSEARCH_VERSION || '1.3.6'; +export const OPENSEARCH_VERSION = process.env.OPENSEARCH_VERSION || '1.3.10'; export const OPENSEARCH_HOST = `http://${OPENSEARCH_USER}:${OPENSEARCH_PASSWORD}@${OPENSEARCH_HOSTNAME}:${OPENSEARCH_PORT}`; export const OPENSEARCH_DOCKER_IMAGE = process.env.OPENSEARCH_DOCKER_IMAGE || 'opensearchproject/opensearch'; @@ -133,6 +161,8 @@ export const ENV_SERVICES = [ testOpensearch ? Service.Opensearch : undefined, testElasticsearch ? Service.Elasticsearch : undefined, toBoolean(TEST_KAFKA) ? Service.Kafka : undefined, + /// couple kafa with zookeeper + toBoolean(TEST_KAFKA) ? Service.Zookeeper : undefined, toBoolean(TEST_MINIO) ? Service.Minio : undefined, testRestrainedOpensearch ? Service.RestrainedOpensearch : undefined, testRestrainedElasticsearch ? Service.RestrainedElasticsearch : undefined, @@ -158,3 +188,5 @@ export const SEARCH_TEST_HOST = testHost; // https://github.com/terascope/base-docker-image // This overrides the value in the Dockerfile export const NODE_VERSION = process.env.NODE_VERSION || '18.16.0'; + +export const { TEST_PLATFORM = 'native' } = process.env; diff --git a/packages/scripts/src/helpers/interfaces.ts b/packages/scripts/src/helpers/interfaces.ts index 6c8c1022c86..605d0ff1b32 100644 --- a/packages/scripts/src/helpers/interfaces.ts +++ b/packages/scripts/src/helpers/interfaces.ts @@ -37,6 +37,7 @@ export type PackageInfo = { export enum Service { Kafka = 'kafka', + Zookeeper = 'zookeeper', Elasticsearch = 'elasticsearch', Minio = 'minio', RabbitMQ = 'rabbitmq', diff --git a/packages/scripts/src/helpers/packages.ts b/packages/scripts/src/helpers/packages.ts index 0e7197730f4..b1bca1e2621 100644 --- a/packages/scripts/src/helpers/packages.ts +++ b/packages/scripts/src/helpers/packages.ts @@ -15,6 +15,7 @@ import * as i from './interfaces'; let _packages: i.PackageInfo[] = []; let _e2eDir: string|undefined; +let _e2e_k8s_dir: string|undefined; export function getE2EDir(): string|undefined { if (_e2eDir) return _e2eDir; @@ -27,6 +28,17 @@ export function getE2EDir(): string|undefined { return undefined; } +export function getE2eK8sDir(): string|undefined { + if (_e2e_k8s_dir) return _e2e_k8s_dir; + + if (fs.existsSync(path.join(misc.getRootDir(), 'e2e/k8s'))) { + _e2e_k8s_dir = path.join(misc.getRootDir(), 'e2e/k8s'); + return _e2e_k8s_dir; + } + + return undefined; +} + function _loadPackage(packagePath: string): i.PackageInfo|undefined { const pkgJsonPath = path.join(packagePath, 'package.json'); if (fs.existsSync(pkgJsonPath)) { diff --git a/packages/scripts/src/helpers/scripts.ts b/packages/scripts/src/helpers/scripts.ts index 3118ed56c26..5b2758e52cb 100644 --- a/packages/scripts/src/helpers/scripts.ts +++ b/packages/scripts/src/helpers/scripts.ts @@ -14,6 +14,7 @@ import { TSCommands, PackageInfo } from './interfaces'; import { getRootDir } from './misc'; import signale from './signale'; import * as config from './config'; +import { getE2eK8sDir } from '../helpers/packages'; const logger = debugLogger('ts-scripts:cmd'); @@ -532,3 +533,223 @@ export async function yarnPublish( } }); } + +export async function createKindCluster(): Promise { + const e2eK8sDir = getE2eK8sDir(); + if (!e2eK8sDir) { + throw new Error('Missing k8s e2e test directory'); + } + + const configPath = path.join(e2eK8sDir, 'kindConfig.yaml'); + const subprocess = await execa.command(`kind create cluster --config ${configPath}`); + logger.debug(subprocess.stderr); +} + +export async function destroyKindCluster(): Promise { + const subprocess = await execa.command('kind delete cluster --name k8se2e'); + logger.debug(subprocess.stderr); +} + +export async function isKindInstalled(): Promise { + try { + const subprocess = await execa.command('command -v kind'); + return !!subprocess.stdout; + } catch (err) { + return false; + } +} + +export async function isKubectlInstalled(): Promise { + try { + const subprocess = await execa.command('command -v kubectl'); + return !!subprocess.stdout; + } catch (err) { + return false; + } +} + +// TODO: check that image is loaded before we continue +export async function loadTerasliceImage(terasliceImage: string): Promise { + const subprocess = await execa.command(`kind load docker-image ${terasliceImage} --name k8se2e`); + logger.debug(subprocess.stderr); +} + +export async function kindStopService(serviceName: string): Promise { + const e2eK8sDir = getE2eK8sDir(); + if (!e2eK8sDir) { + throw new Error('Missing k8s e2e test directory'); + } + + try { + // Any new service's yaml file must be named 'Deployment.yaml' + const yamlFile = `${serviceName}Deployment.yaml`; + const subprocess = await execa.command(`kubectl delete -n services-dev1 -f ${path.join(e2eK8sDir, yamlFile)}`); + logger.debug(subprocess.stdout); + } catch (err) { + // Do nothing. This should fail because no services should be up yet. + } +} + +// TODO: Image versions are currently hard coded into yaml files +// TODO: check that image is loaded before we continue +export async function kindLoadServiceImage( + serviceName: string, serviceImage: string, version: string +): Promise { + try { + const subprocess = await execa.command(`kind load docker-image ${serviceImage}:${version} --name k8se2e`); + logger.debug(subprocess.stderr); + } catch (err) { + logger.debug(`The service ${serviceName} could not be loaded. It may not be present locally`); + } +} + +export async function kindStartService(serviceName: string): Promise { + // Any new service's yaml file must be named 'Deployment.yaml' + const yamlFile = `${serviceName}Deployment.yaml`; + + const e2eK8sDir = getE2eK8sDir(); + if (!e2eK8sDir) { + throw new Error('Missing k8s e2e test directory'); + } + + try { + const subprocess = await execa.command(`kubectl create -n services-dev1 -f ${path.join(e2eK8sDir, yamlFile)}`); + logger.debug(subprocess.stdout); + } catch (err) { + logger.error(`The service ${serviceName} could not be started: `, err); + } + + if (serviceName === 'kafka') { + await waitForKafkaRunning(240000); + } +} + +function waitForKafkaRunning(timeoutMs = 120000): Promise { + const endAt = Date.now() + timeoutMs; + + const _waitForKafkaRunning = async (): Promise => { + if (Date.now() > endAt) { + throw new Error(`Failure to communicate with kafka after ${timeoutMs}ms`); + } + + let kafkaRunning = false; + try { + const kubectlResponse = await execa.command('kubectl -n services-dev1 get pods -l app.kubernetes.io/name=cpkafka -o=jsonpath="{.items[?(@.status.containerStatuses)].status.containerStatuses[0].ready}"'); + const kafkaReady = kubectlResponse.stdout; + if (kafkaReady === '"true"') { + kafkaRunning = true; + } + } catch (err) { + await pDelay(3000); + return _waitForKafkaRunning(); + } + + if (kafkaRunning) { + return true; + } + await pDelay(3000); + return _waitForKafkaRunning(); + }; + + return _waitForKafkaRunning(); +} + +export async function createNamespace(namespaceYaml: string) { + const e2eK8sDir = getE2eK8sDir(); + if (!e2eK8sDir) { + throw new Error('Missing k8s e2e test directory'); + } + const subprocess = await execa.command(`kubectl create -f ${path.join(e2eK8sDir, namespaceYaml)}`); + logger.debug(subprocess.stdout); +} + +export async function k8sSetup(): Promise { + const e2eK8sDir = getE2eK8sDir(); + if (!e2eK8sDir) { + throw new Error('Missing k8s e2e test directory'); + } + + let subprocess = await execa.command(`kubectl create -f ${path.join(e2eK8sDir, 'role.yaml')}`); + logger.debug(subprocess.stdout); + subprocess = await execa.command(`kubectl create -f ${path.join(e2eK8sDir, 'roleBinding.yaml')}`); + logger.debug(subprocess.stdout); + subprocess = await execa.command(`kubectl apply -f ${path.join(e2eK8sDir, 'priorityClass.yaml')}`); + logger.debug(subprocess.stdout); +} + +export async function deployK8sTeraslice() { + const e2eK8sDir = getE2eK8sDir(); + if (!e2eK8sDir) { + throw new Error('Missing k8s e2e test directory'); + } + + await deleteTerasliceNamespace(); + await createNamespace('ts-ns.yaml'); + await k8sSetup(); + try { + /// Creates configmap for terasclice-master + let subprocess = await execa.command(`kubectl create -n ts-dev1 configmap teraslice-master --from-file=${path.join(e2eK8sDir, 'masterConfig', 'teraslice.yaml')}`); + logger.debug(subprocess.stdout); + + /// Creates configmap for teraslice-worker + subprocess = await execa.command(`kubectl create -n ts-dev1 configmap teraslice-worker --from-file=${path.join(e2eK8sDir, 'workerConfig', 'teraslice.yaml')}`); + logger.debug(subprocess.stdout); + + /// Creates deployment for teraslice + subprocess = await execa.command(`kubectl create -n ts-dev1 -f ${path.join(e2eK8sDir, 'masterDeployment.yaml')}`); + logger.debug(subprocess.stdout); + } catch (err) { + logger.error('Error deploying Teraslice'); + logger.error(err); + process.exit(1); + } +} + +export async function setAliasAndBaseAssets(hostIP: string) { + await setAlias(hostIP); + await deployAssets('elasticsearch'); + await deployAssets('standard'); + await deployAssets('kafka'); +} + +async function setAlias(hostIP: string) { + let subprocess = await execa.command('earl aliases remove k8se2e 2> /dev/null || true', { shell: true }); + logger.debug(subprocess.stdout); + subprocess = await execa.command(`earl aliases add k8se2e http://${hostIP}:45678`); + logger.debug(subprocess.stdout); +} + +async function deployAssets(assetName: string) { + const subprocess = await execa.command(`earl assets deploy k8se2e --blocking terascope/${assetName}-assets`); + logger.debug(subprocess.stdout); +} + +export async function deleteTerasliceNamespace() { + try { + const subprocess = await execa.command('kubectl delete namespace ts-dev1'); + logger.debug(subprocess.stdout); + } catch (err) { + logger.debug('Teraslice namespace cannot be deleted because it does not exist'); + } +} + +export async function showState(hostIP: string) { + const subprocess = await execa.command('kubectl get deployments,po,svc --all-namespaces --show-labels -o wide'); + logger.debug(subprocess.stdout); + await showESIndices(hostIP); + await showAssets(hostIP); +} + +async function showESIndices(hostIP: string) { + const subprocess = await execa.command(`curl ${hostIP}:49200/_cat/indices?v`); + logger.debug(subprocess.stdout); +} + +async function showAssets(hostIP: string) { + try { + const subprocess = await execa.command(`curl ${hostIP}:45678/v1/assets`); + logger.debug(subprocess.stdout); + } catch (err) { + logger.debug(err); + } +} diff --git a/packages/scripts/src/helpers/test-runner/index.ts b/packages/scripts/src/helpers/test-runner/index.ts index 45d26a3628c..d3b7e8d0a78 100644 --- a/packages/scripts/src/helpers/test-runner/index.ts +++ b/packages/scripts/src/helpers/test-runner/index.ts @@ -9,21 +9,29 @@ import { import { ensureServices, pullServices } from './services'; import { PackageInfo } from '../interfaces'; import { TestOptions } from './interfaces'; -import { runJest, dockerTag } from '../scripts'; +import { + createKindCluster, + runJest, + dockerTag, + isKindInstalled, + isKubectlInstalled, + createNamespace, + loadTerasliceImage, + destroyKindCluster, +} from '../scripts'; import { getArgs, filterBySuite, globalTeardown, reportCoverage, logE2E, getEnv, groupBySuite } from './utils'; import signale from '../signale'; -import { getE2EDir, readPackageInfo, listPackages } from '../packages'; +import { + getE2EDir, readPackageInfo, listPackages +} from '../packages'; import { buildDevDockerImage } from '../publish/utils'; import { PublishOptions, PublishType } from '../publish/interfaces'; import { TestTracker } from './tracker'; -import { - MAX_PROJECTS_PER_BATCH, - SKIP_DOCKER_BUILD_IN_E2E -} from '../config'; +import { MAX_PROJECTS_PER_BATCH, SKIP_DOCKER_BUILD_IN_E2E } from '../config'; const logger = debugLogger('ts-scripts:cmd:test'); @@ -201,11 +209,31 @@ async function runE2ETest( throw new Error('Missing e2e test directory'); } + if (options.testPlatform === 'kubernetes') { + try { + const kindInstalled = await isKindInstalled(); + if (!kindInstalled && !isCI) { + signale.error('Please install Kind before running k8s tests. https://kind.sigs.k8s.io/docs/user/quick-start'); + process.exit(1); + } + + const kubectlInstalled = await isKubectlInstalled(); + if (!kubectlInstalled && !isCI) { + signale.error('Please install kubectl before running k8s tests. https://kubernetes.io/docs/tasks/tools/'); + process.exit(1); + } + + await createKindCluster(); + await createNamespace('services-ns.yaml'); + } catch (err) { + tracker.addError(err); + } + } + const rootInfo = getRootInfo(); - // const e2eImage = `${rootInfo.name}:e2e-nodev${options.nodeVersion}`; const e2eImage = `${rootInfo.name}:e2e`; - if (isCI) { + if (isCI && options.testPlatform === 'native') { // pull the services first in CI await pullServices(suite, options); } @@ -227,6 +255,14 @@ async function runE2ETest( tracker.addError(err); } + if (options.testPlatform === 'kubernetes') { + try { + await loadTerasliceImage(e2eImage); + } catch (err) { + tracker.addError(err); + } + } + try { tracker.addCleanup( 'e2e:services', @@ -285,6 +321,10 @@ async function runE2ETest( }]); }); } + + if (options.testPlatform === 'kubernetes' && !options.keepOpen) { + await destroyKindCluster(); + } } function printAndGetEnv(suite: string, options: TestOptions) { diff --git a/packages/scripts/src/helpers/test-runner/interfaces.ts b/packages/scripts/src/helpers/test-runner/interfaces.ts index 5ce208e7a3e..861dea54b33 100644 --- a/packages/scripts/src/helpers/test-runner/interfaces.ts +++ b/packages/scripts/src/helpers/test-runner/interfaces.ts @@ -14,12 +14,15 @@ export type TestOptions = { elasticsearchVersion: string; elasticsearchAPIVersion: string; kafkaVersion: string; + kafkaImageVersion: any; + zookeeperVersion: string; minioVersion: string; rabbitmqVersion: string; opensearchVersion: string; nodeVersion: string; jestArgs?: string[]; - ignoreMount: boolean + ignoreMount: boolean; + testPlatform: string; }; export type GroupedPackages = { diff --git a/packages/scripts/src/helpers/test-runner/services.ts b/packages/scripts/src/helpers/test-runner/services.ts index 5e2f67f96f2..6c3e450de92 100644 --- a/packages/scripts/src/helpers/test-runner/services.ts +++ b/packages/scripts/src/helpers/test-runner/services.ts @@ -10,7 +10,10 @@ import { DockerRunOptions, getContainerInfo, dockerStop, - dockerPull + dockerPull, + kindLoadServiceImage, + kindStartService, + kindStopService } from '../scripts'; import { TestOptions } from './interfaces'; import { Service } from '../interfaces'; @@ -85,7 +88,8 @@ const services: Readonly>> = { 'http.port': config.OPENSEARCH_PORT, 'discovery.type': 'single-node', DISABLE_INSTALL_DEMO_CONFIG: 'true', - DISABLE_SECURITY_PLUGIN: 'true' + DISABLE_SECURITY_PLUGIN: 'true', + DISABLE_PERFORMANCE_ANALYZER_AGENT_CLI: 'true' }, network: config.DOCKER_NETWORK_NAME }, @@ -97,12 +101,27 @@ const services: Readonly>> = { : undefined, ports: [`${config.KAFKA_PORT}:${config.KAFKA_PORT}`], env: { - KAFKA_HEAP_OPTS: config.SERVICE_HEAP_OPTS, - KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true', + KAFKA_BROKER_ID: config.KAFKA_BROKER_ID, KAFKA_ADVERTISED_HOST_NAME: config.HOST_IP, - KAFKA_ADVERTISED_PORT: config.KAFKA_PORT, - KAFKA_PORT: config.KAFKA_PORT, - KAFKA_NUM_PARTITIONS: '2', + KAFKA_ZOOKEEPER_CONNECT: config.KAFKA_ZOOKEEPER_CONNECT, + KAFKA_LISTENERS: config.KAFKA_LISTENERS, + KAFKA_ADVERTISED_LISTENERS: config.KAFKA_ADVERTISED_LISTENERS, + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: config.KAFKA_LISTENER_SECURITY_PROTOCOL_MAP, + KAFKA_INTER_BROKER_LISTENER_NAME: config.KAFKA_INTER_BROKER_LISTENER_NAME, + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: config.KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR + }, + network: config.DOCKER_NETWORK_NAME + }, + [Service.Zookeeper]: { + image: config.ZOOKEEPER_DOCKER_IMAGE, + name: `${config.TEST_NAMESPACE}_zookeeper`, + tmpfs: config.SERVICES_USE_TMPFS + ? ['/tmp/zookeeper-logs'] + : undefined, + ports: [`${config.ZOOKEEPER_CLIENT_PORT}:${config.ZOOKEEPER_CLIENT_PORT}`], + env: { + ZOOKEEPER_CLIENT_PORT: config.ZOOKEEPER_CLIENT_PORT, + ZOOKEEPER_TICK_TIME: config.ZOOKEEPER_TICK_TIME }, network: config.DOCKER_NETWORK_NAME }, @@ -161,7 +180,12 @@ export async function pullServices(suite: string, options: TestOptions): Promise } if (launchServices.includes(Service.Kafka)) { - const image = `${config.KAFKA_DOCKER_IMAGE}:${options.kafkaVersion}`; + const image = `${config.KAFKA_DOCKER_IMAGE}:${options.kafkaImageVersion}`; + images.push(image); + } + + if (launchServices.includes(Service.Zookeeper)) { + const image = `${config.ZOOKEEPER_DOCKER_IMAGE}:${options.zookeeperVersion}`; images.push(image); } @@ -221,6 +245,10 @@ export async function ensureServices(suite: string, options: TestOptions): Promi promises.push(ensureKafka(options)); } + if (launchServices.includes(Service.Zookeeper)) { + promises.push(ensureZookeeper(options)); + } + if (launchServices.includes(Service.Minio)) { promises.push(ensureMinio(options)); } @@ -242,15 +270,23 @@ export async function ensureServices(suite: string, options: TestOptions): Promi } export async function ensureKafka(options: TestOptions): Promise<() => void> { - let fn = () => {}; + let fn = () => { }; const startTime = Date.now(); fn = await startService(options, Service.Kafka); await checkKafka(options, startTime); return fn; } +export async function ensureZookeeper(options: TestOptions): Promise<() => void> { + let fn = () => { }; + const startTime = Date.now(); + fn = await startService(options, Service.Zookeeper); + await checkZookeeper(options, startTime); + return fn; +} + export async function ensureMinio(options: TestOptions): Promise<() => void> { - let fn = () => {}; + let fn = () => { }; const startTime = Date.now(); fn = await startService(options, Service.Minio); await checkMinio(options, startTime); @@ -258,7 +294,7 @@ export async function ensureMinio(options: TestOptions): Promise<() => void> { } export async function ensureElasticsearch(options: TestOptions): Promise<() => void> { - let fn = () => {}; + let fn = () => { }; const startTime = Date.now(); fn = await startService(options, Service.Elasticsearch); await checkElasticsearch(options, startTime); @@ -266,7 +302,7 @@ export async function ensureElasticsearch(options: TestOptions): Promise<() => v } export async function ensureRestrainedElasticsearch(options: TestOptions): Promise<() => void> { - let fn = () => {}; + let fn = () => { }; const startTime = Date.now(); fn = await startService(options, Service.RestrainedElasticsearch); await checkRestrainedElasticsearch(options, startTime); @@ -274,7 +310,7 @@ export async function ensureRestrainedElasticsearch(options: TestOptions): Promi } export async function ensureRestrainedOpensearch(options: TestOptions): Promise<() => void> { - let fn = () => {}; + let fn = () => { }; const startTime = Date.now(); fn = await startService(options, Service.RestrainedOpensearch); await checkRestrainedOpensearch(options, startTime); @@ -282,7 +318,7 @@ export async function ensureRestrainedOpensearch(options: TestOptions): Promise< } export async function ensureOpensearch(options: TestOptions): Promise<() => void> { - let fn = () => {}; + let fn = () => { }; const startTime = Date.now(); fn = await startService(options, Service.Opensearch); await checkOpensearch(options, startTime); @@ -290,7 +326,7 @@ export async function ensureOpensearch(options: TestOptions): Promise<() => void } export async function ensureRabbitMQ(options: TestOptions): Promise<() => void> { - let fn = () => {}; + let fn = () => { }; const startTime = Date.now(); fn = await startService(options, Service.RabbitMQ); await checkRabbitMQ(options, startTime); @@ -679,6 +715,11 @@ async function checkKafka(options: TestOptions, startTime: number) { signale.success(`kafka@${options.kafkaVersion} *might* be running at ${config.KAFKA_BROKER}, took ${took}`); } +async function checkZookeeper(options: TestOptions, startTime: number) { + const took = ts.toHumanTime(Date.now() - startTime); + signale.success(` zookeeper*might* be running, took ${took}`); +} + async function startService(options: TestOptions, service: Service): Promise<() => void> { let serviceName = service; @@ -689,14 +730,25 @@ async function startService(options: TestOptions, service: Service): Promise<() if (serviceName === 'restrained_opensearch') { serviceName = Service.Opensearch; } - - const version = options[`${serviceName}Version`] as string; + let version:string; + if (serviceName === 'kafka') { + version = options[`${serviceName}ImageVersion`] as string; + signale.pending(`starting ${service}@${options.kafkaVersion} service...`); + } else { + version = options[`${serviceName}Version`] as string; + signale.pending(`starting ${service}@${version} service...`); + } if (options.useExistingServices) { signale.warn(`expecting ${service}@${version} to be running (this can be dangerous)...`); - return () => {}; + return () => { }; } - signale.pending(`starting ${service}@${version} service...`); + if (options.testPlatform === 'kubernetes') { + await kindStopService(service); + await kindLoadServiceImage(service, services[service].image, version); + await kindStartService(service); + return () => { }; + } await stopService(service); diff --git a/packages/scripts/test/service-spec.ts b/packages/scripts/test/service-spec.ts index a0587bfedba..5b158627eeb 100644 --- a/packages/scripts/test/service-spec.ts +++ b/packages/scripts/test/service-spec.ts @@ -15,11 +15,14 @@ describe('services', () => { elasticsearchVersion: 'bad-version', elasticsearchAPIVersion: '6.8', kafkaVersion: 'very-bad-version', + kafkaImageVersion: 'very-bad-version', + zookeeperVersion: 'very-bad-version', minioVersion: 'very-bad-version', rabbitmqVersion: 'very-bad-version', opensearchVersion: 'very-bad-version', nodeVersion: 'very-bad-version', - ignoreMount: false + ignoreMount: false, + testPlatform: 'native' }; describe('pullServices', () => { diff --git a/packages/scripts/test/test-runner-spec.ts b/packages/scripts/test/test-runner-spec.ts index 3c8259af576..3623d444725 100644 --- a/packages/scripts/test/test-runner-spec.ts +++ b/packages/scripts/test/test-runner-spec.ts @@ -21,11 +21,14 @@ describe('Test Runner Helpers', () => { elasticsearchAPIVersion: '', elasticsearchVersion: '', kafkaVersion: '', + kafkaImageVersion: '', + zookeeperVersion: '', minioVersion: '', rabbitmqVersion: '', opensearchVersion: '', nodeVersion: '', - ignoreMount: true + ignoreMount: true, + testPlatform: 'native' }; function makeTestOptions(input: Partial): TestOptions {