From 502544d6633a7d5592e3fc524e48c7ce8dce5dd6 Mon Sep 17 00:00:00 2001 From: zhuwenxing Date: Mon, 16 Oct 2023 02:44:20 -0500 Subject: [PATCH] [ci]Update perf test (#222) Signed-off-by: zhuwenxing --- .github/workflows/main.yaml | 28 ---------- .github/workflows/nightly.yaml | 2 +- .github/workflows/perf.yaml | 13 ++++- tests/requirements.txt | 4 +- tests/testcases/test_backup_perf.py | 86 ++++++++++++++--------------- 5 files changed, 58 insertions(+), 75 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 25c79079..e12cb9d3 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -2,37 +2,9 @@ name: Test on: push: - paths: - - 'build/**' - - 'cmd/**' - - 'configs/**' - - 'core/**' - - 'example/**' - - 'internal/**' - - 'tests/**' - - '!**.md' - - '.github/workflows/main.yaml' - - 'deployment/**' - - '**/*.go' - - '**/go.mod' - - '**/go.sum' branches: - main pull_request: - paths: - - 'build/**' - - 'cmd/**' - - 'configs/**' - - 'core/**' - - 'example/**' - - 'internal/**' - - 'tests/**' - - '!**.md' - - '.github/workflows/main.yaml' - - 'deployment/**' - - '**/*.go' - - '**/go.mod' - - '**/go.sum' branches: - main workflow_dispatch: diff --git a/.github/workflows/nightly.yaml b/.github/workflows/nightly.yaml index f1dcb5f5..99f15292 100644 --- a/.github/workflows/nightly.yaml +++ b/.github/workflows/nightly.yaml @@ -93,7 +93,7 @@ jobs: shell: bash working-directory: tests run: | - pytest -s -v --tags L0, L1, L2, L3 + pytest -s -v --tags L0 L1 L2 L3 - name: Get Milvus status shell: bash diff --git a/.github/workflows/perf.yaml b/.github/workflows/perf.yaml index 7dc524d7..fe6e8719 100644 --- a/.github/workflows/perf.yaml +++ b/.github/workflows/perf.yaml @@ -1,9 +1,18 @@ name: Perf Test on: + push: + branches: + - main + pull_request: + branches: + - main workflow_dispatch: schedule: - - cron: '0 2 * * *' + - cron: '0 4 * * *' +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true jobs: test-backup-restore-api: @@ -91,7 +100,7 @@ jobs: shell: bash working-directory: tests run: | - pytest -s -v --tags Perf + pytest -s -v --tags Perf --log-cli-level=INFO --capture=no - name: Get Milvus status shell: bash diff --git a/tests/requirements.txt b/tests/requirements.txt index e64bf717..0cd96874 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -42,4 +42,6 @@ protobuf==3.20.0 minio==7.1.5 # for benchmark -h5py==3.1.0 \ No newline at end of file +h5py==3.1.0 +pytest-benchmark==4.0.0 + diff --git a/tests/testcases/test_backup_perf.py b/tests/testcases/test_backup_perf.py index 5ec2735b..4d537eb4 100644 --- a/tests/testcases/test_backup_perf.py +++ b/tests/testcases/test_backup_perf.py @@ -1,12 +1,12 @@ from time import sleep import pytest - +import time from base.client_base import TestcaseBase from common import common_func as cf from common.common_type import CaseLabel from utils.util_log import test_log as log from api.milvus_backup import MilvusBackupClient -from checker import Op, BackupCreateChecker, BackupRestoreChecker, start_monitor_threads +from checker import BackupCreateChecker, BackupRestoreChecker c_name_prefix = "perf_backup" backup_prefix = "backup" @@ -17,35 +17,18 @@ class TestPerf(TestcaseBase): """ Test case of performance""" - def test_milvus_create_backup_perf(self): - # prepare data - total_nb = 10000 - cnt = 10 - coll_num = 2 - collections_to_backup = [] - for i in range(coll_num): - collection_to_backup = cf.gen_unique_str(c_name_prefix) - for j in range(cnt): - self.prepare_data(collection_to_backup, nb=total_nb // cnt) - collections_to_backup.append(collection_to_backup) - checkers = { - Op.create: BackupCreateChecker(collections_to_backup) - } - start_monitor_threads(checkers) - log.info("*********************Perf Test Start**********************") - sleep(360) - for k, v in checkers.items(): - v.check_result() - for k, v in checkers.items(): - v.terminate() - sleep(10) - log.info("*********************Perf Test End**********************") + prepare_data_done = False - def test_milvus_restore_backup_perf(self): - # prepare data - total_nb = 10000 + def setup_perf(self, nb=1000): + log.info(f"*****************Test Perf Setup With nb {nb}*****************") + if self.prepare_data_done: + log.info(f"*****************Test Perf Setup With nb {nb} Done, Skip*****************") + return + else: + log.info(f"*****************Test Perf Setup With nb {nb} Start*****************") + total_nb = nb cnt = 10 - coll_num = 2 + coll_num = 1 collections_to_backup = [] for i in range(coll_num): collection_to_backup = cf.gen_unique_str(c_name_prefix) @@ -53,18 +36,35 @@ def test_milvus_restore_backup_perf(self): self.prepare_data(collection_to_backup, nb=total_nb // cnt) collections_to_backup.append(collection_to_backup) backup_name = cf.gen_unique_str(backup_prefix) - suffix = "_bak" - client.create_backup({"async": False, "backup_name": backup_name, "collection_names": collections_to_backup}) - checkers = { - Op.restore: BackupRestoreChecker(backup_name, suffix, collections_to_backup) - } - start_monitor_threads(checkers) - log.info("*********************Perf Test Start**********************") - sleep(360) - for k, v in checkers.items(): - v.check_result() - for k, v in checkers.items(): - v.terminate() - sleep(10) - log.info("*********************Perf Test End**********************") + self.collections_to_backup = collections_to_backup + self.backup_name = backup_name + self.prepare_data_done = True + + def backup_perf(self): + log.info("*****************Test Backup Perf Start*****************") + t0 = time.perf_counter() + res, result = BackupCreateChecker(self.collections_to_backup).run_task() + t1 = time.perf_counter() + log.info(f"create backup time: {t1 - t0} with {res}, {result}") + return res, result + + def restore_perf(self): + log.info("*****************Test Restore Perf Start*****************") + t0 = time.perf_counter() + res, result= BackupRestoreChecker(self.backup_name, "_bak", self.collections_to_backup).run_task() + t1 = time.perf_counter() + log.info(f"create backup time: {t1 - t0} with {res}, {result}") + return res, result + + @pytest.mark.parametrize("nb", [100000]) + def test_milvus_create_backup_perf(self, benchmark, nb): + self.setup_perf(nb=nb) + res, result = benchmark.pedantic(self.backup_perf, iterations=1, rounds=5) + assert result is True + + @pytest.mark.parametrize("nb", [100000]) + def test_milvus_restore_backup_perf(self, benchmark, nb): + self.setup_perf(nb=nb) + res, result = benchmark.pedantic(self.restore_perf, setup=self.setup_perf, iterations=1, rounds=5) + assert result is True