diff --git a/requirements-hpu.txt b/requirements-hpu.txt index 07f9c31117e49..ddf1caccf41d8 100644 --- a/requirements-hpu.txt +++ b/requirements-hpu.txt @@ -8,7 +8,5 @@ pandas tabulate setuptools>=61 setuptools-scm>=8 -vllm-hpu-extension @ git+https://github.com/HabanaAI/vllm-hpu-extension.git@61334c5 +vllm-hpu-extension @ git+https://github.com/HabanaAI/vllm-hpu-extension.git@ac9740d neural-compressor @ git+https://github.com/intel/neural-compressor.git@b196432 - - diff --git a/vllm/worker/hpu_worker.py b/vllm/worker/hpu_worker.py index 2b8f955265792..1004af0eca40a 100644 --- a/vllm/worker/hpu_worker.py +++ b/vllm/worker/hpu_worker.py @@ -166,7 +166,9 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: if is_fake_hpu(): cache_block_size = self.get_cache_block_size_bytes() fake_hpu_cache_alloc = 4 * 2**30 # take 4 GiB flat on fake hpu - return fake_hpu_cache_alloc // cache_block_size, 0 + num_fake_hpu_blocks = fake_hpu_cache_alloc // cache_block_size + self.model_runner.bucketing_ctx.num_hpu_blocks = num_fake_hpu_blocks + return num_fake_hpu_blocks, 0 with HabanaMemoryProfiler() as m: self.model_runner.profile_run() torch.hpu.synchronize() @@ -203,6 +205,8 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: num_hpu_blocks = max(num_hpu_blocks, 0) num_cpu_blocks = max(num_cpu_blocks, 0) + self.model_runner.bucketing_ctx.num_hpu_blocks = num_hpu_blocks + if self.model_runner.lora_manager: self.model_runner.remove_all_loras()