From 40825e3107c1f439b9a8c98e2c5b9b226b825592 Mon Sep 17 00:00:00 2001 From: QianqianNie <44004519+QianqianNie@users.noreply.github.com> Date: Fri, 18 Oct 2024 21:38:00 -0700 Subject: [PATCH] Release fmi version 55 (#3487) * Release fmi 55 that works vision models * release fmi 55 * upgrade to 0.2.14 --- .../foundation-model-inference/context/Dockerfile | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/assets/training/model_management/environments/foundation-model-inference/context/Dockerfile b/assets/training/model_management/environments/foundation-model-inference/context/Dockerfile index e1e2bfc47c..4c92e54b11 100644 --- a/assets/training/model_management/environments/foundation-model-inference/context/Dockerfile +++ b/assets/training/model_management/environments/foundation-model-inference/context/Dockerfile @@ -47,12 +47,7 @@ RUN pip install git+https://github.com/stanford-futuredata/megablocks.git@5897cd # RUN pip install -e ./ --no-cache-dir # When copied to assets repo, change to install from public pypi -RUN pip install llm-optimized-inference==0.2.12 --no-cache-dir - -RUN pip uninstall transformers -y -RUN pip uninstall -y vllm -RUN pip install https://automlsamplenotebookdata.blob.core.windows.net/vllm/transformers-4.45.0.dev0-py3-none-any.whl -RUN pip install https://automlsamplenotebookdata.blob.core.windows.net/vllm/vllm-0.6.1.post2+cu124-cp38-abi3-manylinux1_x86_64.whl +RUN pip install llm-optimized-inference==0.2.14 --no-cache-dir # clean conda and pip caches RUN rm -rf ~/.cache/pip