From e8ce81e8cd3f29f6c74792db3e780b414240697a Mon Sep 17 00:00:00 2001 From: Agata Dobrzyniewicz Date: Tue, 17 Dec 2024 15:17:41 +0200 Subject: [PATCH] formating --- vllm/worker/hpu_model_runner.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/vllm/worker/hpu_model_runner.py b/vllm/worker/hpu_model_runner.py index 83d9af5896725..2eb54ceabf46f 100755 --- a/vllm/worker/hpu_model_runner.py +++ b/vllm/worker/hpu_model_runner.py @@ -43,7 +43,8 @@ from vllm.model_executor.models import supports_multimodal from vllm.model_executor.sampling_metadata import SequenceGroupToSample from vllm.multimodal import (MULTIMODAL_REGISTRY, BatchedTensorInputs, - MultiModalKwargs, MultiModalRegistry, MultiModalPlaceholderMap) + MultiModalKwargs, MultiModalPlaceholderMap, + MultiModalRegistry) from vllm.sampling_params import SamplingParams from vllm.sequence import (CompletionSequenceGroupOutput, IntermediateTensors, Logprob, SequenceData, SequenceGroupMetadata, @@ -794,9 +795,9 @@ def _prepare_prompt( prefix_block_tables: List[List[int]] = [] multi_modal_kwargs_list: List[MultiModalKwargs] = [] multi_modal_placeholder_maps: Dict[ - str, - MultiModalPlaceholderMap] = collections.defaultdict(MultiModalPlaceholderMap) - + str, MultiModalPlaceholderMap] = collections.defaultdict( + MultiModalPlaceholderMap) + if len(seq_group_metadata_list) == 0: return PreparePromptMetadata.empty() @@ -853,8 +854,8 @@ def _prepare_prompt( # is always the first token in the sequence. input_positions.append(list(range(context_len, seq_len))) - mm_data = seq_group_metadata.multi_modal_data - if mm_data: + mm_data = seq_group_metadata.multi_modal_data + if mm_data: mm_kwargs = self.multi_modal_input_mapper(mm_data) multi_modal_kwargs_list.append(mm_kwargs) @@ -865,10 +866,7 @@ def _prepare_prompt( mm_data, placeholder_maps = MultiModalPlaceholderMap \ .from_seq_group(seq_group_metadata, positions_range) - print(mm_data) - - if self.mm_registry.has_processor( - self.model_config): + if self.mm_registry.has_processor(self.model_config): mm_kwargs = mm_data else: mm_kwargs = self.multi_modal_input_mapper(