From 8f53deece71c6d416261173e7d0537db747224d0 Mon Sep 17 00:00:00 2001 From: Karol Damaszke Date: Wed, 8 Jan 2025 12:55:29 +0100 Subject: [PATCH] Add mllama support to benchmark_throughput (#668) --- benchmarks/benchmark_throughput.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/benchmarks/benchmark_throughput.py b/benchmarks/benchmark_throughput.py index 0c053e1cf5955..732eba9e25f51 100644 --- a/benchmarks/benchmark_throughput.py +++ b/benchmarks/benchmark_throughput.py @@ -59,6 +59,8 @@ def _get_prompt_for_image_model(question: str, *, model: str) -> str: return f"[INST]{question}\n[IMG][/INST]" elif "llava" in model: return f"USER: \n{question}\nASSISTANT:" + elif "llama-3.2" in model: + return f"<|image|><|begin_of_text|>{question}" raise ValueError(f"Unsupported model {model}")