diff --git a/vectordb_bench/backend/runner/read_write_runner.py b/vectordb_bench/backend/runner/read_write_runner.py index 4c7cb7cd..e4e7d0fc 100644 --- a/vectordb_bench/backend/runner/read_write_runner.py +++ b/vectordb_bench/backend/runner/read_write_runner.py @@ -83,19 +83,20 @@ def run_read_write(self): def run_search_by_sig(self, q): res = [] total_batch = math.ceil(self.data_volume / self.insert_rate) - batch = 1 + batch = 0 recall = 'x' for idx, stage in enumerate(self.search_stage): target_batch = int(total_batch * stage) while q.get(block=True): + batch += 1 if batch >= target_batch: perc = int(stage * 100) log.info(f"Insert {perc}% done, total batch={total_batch}") log.info(f"[{batch:5}/{total_batch}] Serial search - {perc}% start") recall, ndcg, p99 =self.serial_search_runner.run() - if idx < len(self.search_stage): + if idx < len(self.search_stage) - 1: stage_search_dur = (self.data_volume * (self.search_stage[idx + 1] - stage) // self.insert_rate) // len(self.concurrencies) if stage_search_dur < 30: log.warning(f"Search duration too short, please reduce concurrency count or insert rate, or increase dataset volume: dur={stage_search_dur}, concurrencies={len(self.concurrencies)}, insert_rate={self.insert_rate}") @@ -108,6 +109,4 @@ def run_search_by_sig(self, q): max_qps = self.run_by_dur(stage_search_dur) res.append((perc, max_qps, recall)) break - - batch += 1 return res