diff --git a/.github/workflows/_comps-workflow.yml b/.github/workflows/_comps-workflow.yml index 12db04eb7..0e693e75e 100644 --- a/.github/workflows/_comps-workflow.yml +++ b/.github/workflows/_comps-workflow.yml @@ -65,7 +65,6 @@ jobs: fi if [[ $(grep -c "vllm-gaudi:" ${docker_compose_yml}) != 0 ]]; then git clone https://github.com/HabanaAI/vllm-fork.git vllm-fork - cd vllm-fork && git checkout 3c39626 && cd ../ fi - name: Get build list id: get-build-list diff --git a/comps/third_parties/vllm/src/build_docker_vllm.sh b/comps/third_parties/vllm/src/build_docker_vllm.sh index bcbf20c4a..20d4f8df5 100644 --- a/comps/third_parties/vllm/src/build_docker_vllm.sh +++ b/comps/third_parties/vllm/src/build_docker_vllm.sh @@ -37,7 +37,6 @@ fi if [ "$hw_mode" = "hpu" ]; then git clone https://github.com/HabanaAI/vllm-fork.git cd ./vllm-fork/ - git checkout 3c39626 docker build -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy cd .. rm -rf vllm-fork diff --git a/tests/llms/test_llms_doc-summarization_langchain_vllm_on_intel_hpu.sh b/tests/llms/test_llms_doc-summarization_langchain_vllm_on_intel_hpu.sh index ad9d72d0f..0e97d8e13 100644 --- a/tests/llms/test_llms_doc-summarization_langchain_vllm_on_intel_hpu.sh +++ b/tests/llms/test_llms_doc-summarization_langchain_vllm_on_intel_hpu.sh @@ -12,7 +12,6 @@ function build_docker_images() { cd $WORKPATH git clone https://github.com/HabanaAI/vllm-fork.git cd vllm-fork/ - git checkout 3c39626 docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm-gaudi built fail" diff --git a/tests/llms/test_llms_faq-generation_langchain_vllm_on_intel_hpu.sh b/tests/llms/test_llms_faq-generation_langchain_vllm_on_intel_hpu.sh index 37d3be22d..57d4f4207 100644 --- a/tests/llms/test_llms_faq-generation_langchain_vllm_on_intel_hpu.sh +++ b/tests/llms/test_llms_faq-generation_langchain_vllm_on_intel_hpu.sh @@ -12,7 +12,6 @@ function build_docker_images() { cd $WORKPATH git clone https://github.com/HabanaAI/vllm-fork.git cd vllm-fork/ - git checkout 3c39626 docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm-gaudi built fail" diff --git a/tests/llms/test_llms_text-generation_opea_vllm_on_intel_hpu.sh b/tests/llms/test_llms_text-generation_opea_vllm_on_intel_hpu.sh index eb5911bb6..05c644ef0 100644 --- a/tests/llms/test_llms_text-generation_opea_vllm_on_intel_hpu.sh +++ b/tests/llms/test_llms_text-generation_opea_vllm_on_intel_hpu.sh @@ -12,7 +12,6 @@ function build_docker_images() { cd $WORKPATH git clone https://github.com/HabanaAI/vllm-fork.git cd vllm-fork/ - git checkout 3c39626 docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . if [ $? -ne 0 ]; then echo "opea/vllm-gaudi built fail"