diff --git a/.github/workflows/_run-docker-compose.yml b/.github/workflows/_run-docker-compose.yml index daf87add83..b62c961336 100644 --- a/.github/workflows/_run-docker-compose.yml +++ b/.github/workflows/_run-docker-compose.yml @@ -134,6 +134,7 @@ jobs: SERVING_TOKEN: ${{ secrets.SERVING_TOKEN }} IMAGE_REPO: ${{ inputs.registry }} IMAGE_TAG: ${{ inputs.tag }} + opea_branch: "refactor_comps" example: ${{ inputs.example }} hardware: ${{ inputs.hardware }} test_case: ${{ matrix.test_case }} diff --git a/.github/workflows/pr-check-duplicated-image.yml b/.github/workflows/pr-check-duplicated-image.yml index 0cdba415a2..834336606c 100644 --- a/.github/workflows/pr-check-duplicated-image.yml +++ b/.github/workflows/pr-check-duplicated-image.yml @@ -5,7 +5,7 @@ name: Check Duplicated Images on: pull_request: - branches: [main] + branches: [main, genaicomps_refactor] types: [opened, reopened, ready_for_review, synchronize] paths: - "**/docker_image_build/*.yaml" diff --git a/.github/workflows/pr-docker-compose-e2e.yml b/.github/workflows/pr-docker-compose-e2e.yml index 687ae047a3..95f0e59205 100644 --- a/.github/workflows/pr-docker-compose-e2e.yml +++ b/.github/workflows/pr-docker-compose-e2e.yml @@ -4,8 +4,8 @@ name: E2E test with docker compose on: - pull_request_target: - branches: ["main", "*rc"] + pull_request: + branches: ["main", "*rc", "genaicomps_refactor"] types: [opened, reopened, ready_for_review, synchronize] # added `ready_for_review` since draft is skipped paths: - "**/Dockerfile**" diff --git a/.github/workflows/pr-dockerfile-path-and-build-yaml-scan.yml b/.github/workflows/pr-dockerfile-path-and-build-yaml-scan.yml index ededdba43c..78d3df3424 100644 --- a/.github/workflows/pr-dockerfile-path-and-build-yaml-scan.yml +++ b/.github/workflows/pr-dockerfile-path-and-build-yaml-scan.yml @@ -5,7 +5,7 @@ name: Compose file and dockerfile path checking on: pull_request: - branches: [main] + branches: [main, genaicomps_refactor] types: [opened, reopened, ready_for_review, synchronize] jobs: @@ -22,6 +22,7 @@ jobs: run: | cd .. git clone https://github.com/opea-project/GenAIComps.git + cd GenAIComps && git checkout refactor_comps - name: Check for Missing Dockerfile Paths in GenAIComps run: | diff --git a/.github/workflows/pr-link-path-scan.yml b/.github/workflows/pr-link-path-scan.yml index 77bf0d293f..8a01d15662 100644 --- a/.github/workflows/pr-link-path-scan.yml +++ b/.github/workflows/pr-link-path-scan.yml @@ -5,7 +5,7 @@ name: Check hyperlinks and relative path validity on: pull_request: - branches: [main] + branches: [main, genaicomps_refactor] types: [opened, reopened, ready_for_review, synchronize] jobs: diff --git a/AudioQnA/audioqna.py b/AudioQnA/audioqna.py index efbd5ddc5b..2343a66771 100644 --- a/AudioQnA/audioqna.py +++ b/AudioQnA/audioqna.py @@ -40,6 +40,15 @@ def align_inputs(self, inputs, cur_node, runtime_graph, llm_parameters_dict, **k return inputs +def align_inputs(self, inputs, cur_node, runtime_graph, llm_parameters_dict, **kwargs): + if self.services[cur_node].service_type == ServiceType.TTS: + new_inputs = {} + new_inputs["text"] = inputs["choices"][0]["text"] + return new_inputs + else: + return inputs + + class AudioQnAService: def __init__(self, host="0.0.0.0", port=8000): self.host = host diff --git a/AudioQnA/docker_compose/amd/gpu/rocm/compose.yaml b/AudioQnA/docker_compose/amd/gpu/rocm/compose.yaml index 44f320b929..9cdaa32e82 100644 --- a/AudioQnA/docker_compose/amd/gpu/rocm/compose.yaml +++ b/AudioQnA/docker_compose/amd/gpu/rocm/compose.yaml @@ -43,6 +43,12 @@ services: HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HF_HUB_DISABLE_PROGRESS_BARS: 1 HF_HUB_ENABLE_HF_TRANSFER: 0 + host_ip: ${host_ip} + healthcheck: + test: ["CMD-SHELL", "curl -f http://$host_ip:3006/health || exit 1"] + interval: 10s + timeout: 10s + retries: 100 command: --model-id ${LLM_MODEL_ID} cap_add: - SYS_PTRACE diff --git a/AudioQnA/docker_compose/intel/cpu/xeon/compose.yaml b/AudioQnA/docker_compose/intel/cpu/xeon/compose.yaml index d23684172b..096dadbc78 100644 --- a/AudioQnA/docker_compose/intel/cpu/xeon/compose.yaml +++ b/AudioQnA/docker_compose/intel/cpu/xeon/compose.yaml @@ -37,6 +37,12 @@ services: http_proxy: ${http_proxy} https_proxy: ${https_proxy} HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + host_ip: ${host_ip} + healthcheck: + test: ["CMD-SHELL", "curl -f http://$host_ip:3006/health || exit 1"] + interval: 10s + timeout: 10s + retries: 100 command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 audioqna-xeon-backend-server: image: ${REGISTRY:-opea}/audioqna:${TAG:-latest} diff --git a/AudioQnA/docker_compose/intel/hpu/gaudi/compose.yaml b/AudioQnA/docker_compose/intel/hpu/gaudi/compose.yaml index 18840be343..080e2523bc 100644 --- a/AudioQnA/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/AudioQnA/docker_compose/intel/hpu/gaudi/compose.yaml @@ -58,6 +58,11 @@ services: cap_add: - SYS_NICE ipc: host + healthcheck: + test: ["CMD-SHELL", "sleep 500 && exit 0"] + interval: 1s + timeout: 505s + retries: 1 command: --model-id ${LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048 audioqna-gaudi-backend-server: image: ${REGISTRY:-opea}/audioqna:${TAG:-latest} diff --git a/AudioQnA/docker_image_build/build.yaml b/AudioQnA/docker_image_build/build.yaml index d0ec1a7c31..8bbdbcc3e3 100644 --- a/AudioQnA/docker_image_build/build.yaml +++ b/AudioQnA/docker_image_build/build.yaml @@ -44,7 +44,7 @@ services: llm-tgi: build: context: GenAIComps - dockerfile: comps/llms/text-generation/tgi/Dockerfile + dockerfile: comps/llms/src/text-generation/Dockerfile extends: audioqna image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} speecht5-gaudi: diff --git a/AudioQnA/tests/test_compose_on_gaudi.sh b/AudioQnA/tests/test_compose_on_gaudi.sh index ab480d55c1..2d6f651c2c 100644 --- a/AudioQnA/tests/test_compose_on_gaudi.sh +++ b/AudioQnA/tests/test_compose_on_gaudi.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -set -e +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -45,25 +45,7 @@ function start_services() { # Start Docker Containers docker compose up -d > ${LOG_PATH}/start_services_with_compose.log - n=0 - until [[ "$n" -ge 200 ]]; do - docker logs tgi-gaudi-server > $LOG_PATH/tgi_service_start.log - if grep -q Connected $LOG_PATH/tgi_service_start.log; then - break - fi - sleep 5s - n=$((n+1)) - done - - n=0 - until [[ "$n" -ge 100 ]]; do - docker logs whisper-service > $LOG_PATH/whisper_service_start.log - if grep -q "Uvicorn server setup on port" $LOG_PATH/whisper_service_start.log; then - break - fi - sleep 5s - n=$((n+1)) - done + sleep 20s } diff --git a/AudioQnA/tests/test_compose_on_rocm.sh b/AudioQnA/tests/test_compose_on_rocm.sh index 756266aadd..efaa443eda 100644 --- a/AudioQnA/tests/test_compose_on_rocm.sh +++ b/AudioQnA/tests/test_compose_on_rocm.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Advanced Micro Devices, Inc. # SPDX-License-Identifier: Apache-2.0 -set -ex +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -47,15 +47,7 @@ function start_services() { # Start Docker Containers docker compose up -d > ${LOG_PATH}/start_services_with_compose.log - n=0 - until [[ "$n" -ge 200 ]]; do - docker logs tgi-service > $LOG_PATH/tgi_service_start.log - if grep -q Connected $LOG_PATH/tgi_service_start.log; then - break - fi - sleep 5s - n=$((n+1)) - done + sleep 24s } function validate_megaservice() { response=$(http_proxy="" curl http://${ip_address}:3008/v1/audioqna -XPOST -d '{"audio": "UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA", "max_tokens":64}' -H 'Content-Type: application/json') diff --git a/AudioQnA/tests/test_compose_on_xeon.sh b/AudioQnA/tests/test_compose_on_xeon.sh index 04ed04e06e..2869df6329 100644 --- a/AudioQnA/tests/test_compose_on_xeon.sh +++ b/AudioQnA/tests/test_compose_on_xeon.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -set -e +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -46,15 +46,7 @@ function start_services() { # Start Docker Containers docker compose up -d > ${LOG_PATH}/start_services_with_compose.log - n=0 - until [[ "$n" -ge 200 ]]; do - docker logs tgi-service > $LOG_PATH/tgi_service_start.log - if grep -q Connected $LOG_PATH/tgi_service_start.log; then - break - fi - sleep 5s - n=$((n+1)) - done + sleep 20s } diff --git a/AvatarChatbot/docker_compose/intel/cpu/xeon/compose.yaml b/AvatarChatbot/docker_compose/intel/cpu/xeon/compose.yaml index c0af8d71da..32f731fb8d 100644 --- a/AvatarChatbot/docker_compose/intel/cpu/xeon/compose.yaml +++ b/AvatarChatbot/docker_compose/intel/cpu/xeon/compose.yaml @@ -38,6 +38,12 @@ services: http_proxy: ${http_proxy} https_proxy: ${https_proxy} HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + host_ip: ${host_ip} + healthcheck: + test: ["CMD-SHELL", "curl -f http://$host_ip:3006/health || exit 1"] + interval: 10s + timeout: 10s + retries: 100 command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 wav2lip-service: image: ${REGISTRY:-opea}/wav2lip:${TAG:-latest} diff --git a/AvatarChatbot/docker_compose/intel/hpu/gaudi/README.md b/AvatarChatbot/docker_compose/intel/hpu/gaudi/README.md index ac251b84ef..ec654be63d 100644 --- a/AvatarChatbot/docker_compose/intel/hpu/gaudi/README.md +++ b/AvatarChatbot/docker_compose/intel/hpu/gaudi/README.md @@ -19,6 +19,10 @@ docker build -t opea/whisper-gaudi:latest --build-arg https_proxy=$https_proxy - ### 3. Build LLM Image +```bash +docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile . +``` + Intel Xeon optimized image hosted in huggingface repo will be used for TGI service: ghcr.io/huggingface/tgi-gaudi:2.0.6 (https://github.com/huggingface/tgi-gaudi) ### 4. Build TTS Image diff --git a/AvatarChatbot/docker_compose/intel/hpu/gaudi/compose.yaml b/AvatarChatbot/docker_compose/intel/hpu/gaudi/compose.yaml index 799510d0ab..ab05459d9f 100644 --- a/AvatarChatbot/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/AvatarChatbot/docker_compose/intel/hpu/gaudi/compose.yaml @@ -61,7 +61,12 @@ services: cap_add: - SYS_NICE ipc: host - command: --model-id ${LLM_MODEL_ID} --max-input-length 128 --max-total-tokens 256 + healthcheck: + test: ["CMD-SHELL", "sleep 500 && exit 0"] + interval: 1s + timeout: 505s + retries: 1 + command: --model-id ${LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048 wav2lip-service: image: ${REGISTRY:-opea}/wav2lip-gaudi:${TAG:-latest} container_name: wav2lip-service diff --git a/AvatarChatbot/docker_image_build/build.yaml b/AvatarChatbot/docker_image_build/build.yaml index 748c8877d0..25c597c4e9 100644 --- a/AvatarChatbot/docker_image_build/build.yaml +++ b/AvatarChatbot/docker_image_build/build.yaml @@ -32,7 +32,7 @@ services: llm-tgi: build: context: GenAIComps - dockerfile: comps/llms/text-generation/tgi/Dockerfile + dockerfile: comps/llms/src/text-generation/Dockerfile extends: avatarchatbot image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} speecht5-gaudi: diff --git a/AvatarChatbot/tests/test_compose_on_gaudi.sh b/AvatarChatbot/tests/test_compose_on_gaudi.sh index f023319458..90d0cc289c 100755 --- a/AvatarChatbot/tests/test_compose_on_gaudi.sh +++ b/AvatarChatbot/tests/test_compose_on_gaudi.sh @@ -72,19 +72,9 @@ function start_services() { # Start Docker Containers docker compose up -d > ${LOG_PATH}/start_services_with_compose.log - - n=0 - until [[ "$n" -ge 200 ]]; do - docker logs tgi-gaudi-server > $LOG_PATH/tgi_service_start.log - if grep -q Connected $LOG_PATH/tgi_service_start.log; then - break - fi - sleep 5s - n=$((n+1)) - done - + sleep 60s echo "All services are up and running" - sleep 5s + } diff --git a/AvatarChatbot/tests/test_compose_on_xeon.sh b/AvatarChatbot/tests/test_compose_on_xeon.sh index fbb1d5e570..5459c9121f 100755 --- a/AvatarChatbot/tests/test_compose_on_xeon.sh +++ b/AvatarChatbot/tests/test_compose_on_xeon.sh @@ -72,17 +72,8 @@ function start_services() { # Start Docker Containers docker compose up -d - n=0 - until [[ "$n" -ge 100 ]]; do - docker logs tgi-service > $LOG_PATH/tgi_service_start.log - if grep -q Connected $LOG_PATH/tgi_service_start.log; then - break - fi - sleep 5s - n=$((n+1)) - done + sleep 20s echo "All services are up and running" - sleep 5s } diff --git a/ChatQnA/docker_compose/amd/gpu/rocm/README.md b/ChatQnA/docker_compose/amd/gpu/rocm/README.md index 9ef30d2a16..61de8a3f83 100644 --- a/ChatQnA/docker_compose/amd/gpu/rocm/README.md +++ b/ChatQnA/docker_compose/amd/gpu/rocm/README.md @@ -138,7 +138,7 @@ cd ../../../.. ```bash cd GenAIComps -docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile . +docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/src/Dockerfile . ``` Then run the command `docker images`, you will have the following 5 Docker Images: diff --git a/ChatQnA/docker_compose/intel/cpu/aipc/README.md b/ChatQnA/docker_compose/intel/cpu/aipc/README.md index a438103e7a..bb7f09dacc 100644 --- a/ChatQnA/docker_compose/intel/cpu/aipc/README.md +++ b/ChatQnA/docker_compose/intel/cpu/aipc/README.md @@ -55,7 +55,7 @@ docker build --no-cache -t opea/chatqna-ui:latest --build-arg https_proxy=$https ```bash cd GenAIComps -docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile . +docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/src/Dockerfile . ``` Then run the command `docker images`, you will have the following 6 Docker Images: diff --git a/ChatQnA/docker_compose/intel/cpu/xeon/README.md b/ChatQnA/docker_compose/intel/cpu/xeon/README.md index 6eb03c4149..63218c9694 100644 --- a/ChatQnA/docker_compose/intel/cpu/xeon/README.md +++ b/ChatQnA/docker_compose/intel/cpu/xeon/README.md @@ -161,7 +161,7 @@ docker build --no-cache -t opea/chatqna-conversation-ui:latest --build-arg https ```bash cd GenAIComps -docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile . +docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/src/Dockerfile . ``` Then run the command `docker images`, you will have the following 5 Docker Images: diff --git a/ChatQnA/docker_compose/intel/cpu/xeon/README_pinecone.md b/ChatQnA/docker_compose/intel/cpu/xeon/README_pinecone.md index 1fddfff22a..5a91bae089 100644 --- a/ChatQnA/docker_compose/intel/cpu/xeon/README_pinecone.md +++ b/ChatQnA/docker_compose/intel/cpu/xeon/README_pinecone.md @@ -164,7 +164,7 @@ docker build --no-cache -t opea/chatqna-conversation-ui:latest --build-arg https ```bash cd GenAIComps -docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile . +docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/src/Dockerfile . ``` Then run the command `docker images`, you will have the following 5 Docker Images: diff --git a/ChatQnA/docker_compose/intel/cpu/xeon/README_qdrant.md b/ChatQnA/docker_compose/intel/cpu/xeon/README_qdrant.md index 2f9fa1b822..3426353d90 100644 --- a/ChatQnA/docker_compose/intel/cpu/xeon/README_qdrant.md +++ b/ChatQnA/docker_compose/intel/cpu/xeon/README_qdrant.md @@ -122,7 +122,7 @@ cd ../../../.. ```bash cd GenAIComps -docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile . +docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/src/Dockerfile . ``` Then run the command `docker images`, you will have the following 5 Docker Images: diff --git a/ChatQnA/docker_compose/intel/hpu/gaudi/README.md b/ChatQnA/docker_compose/intel/hpu/gaudi/README.md index 162c0f0385..62e0b60329 100644 --- a/ChatQnA/docker_compose/intel/hpu/gaudi/README.md +++ b/ChatQnA/docker_compose/intel/hpu/gaudi/README.md @@ -151,7 +151,7 @@ docker build --no-cache -t opea/chatqna-conversation-ui:latest --build-arg https ```bash cd GenAIComps -docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile . +docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/src/Dockerfile . ``` Then run the command `docker images`, you will have the following 5 Docker Images: diff --git a/ChatQnA/docker_compose/nvidia/gpu/README.md b/ChatQnA/docker_compose/nvidia/gpu/README.md index 92b7a26e79..2129766bfa 100644 --- a/ChatQnA/docker_compose/nvidia/gpu/README.md +++ b/ChatQnA/docker_compose/nvidia/gpu/README.md @@ -148,7 +148,7 @@ cd ../../.. ```bash cd GenAIComps -docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile . +docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/src/Dockerfile . ``` Then run the command `docker images`, you will have the following 5 Docker Images: diff --git a/ChatQnA/docker_image_build/build.yaml b/ChatQnA/docker_image_build/build.yaml index a8fa36bc3b..439c12fd68 100644 --- a/ChatQnA/docker_image_build/build.yaml +++ b/ChatQnA/docker_image_build/build.yaml @@ -44,7 +44,7 @@ services: embedding-tei: build: context: GenAIComps - dockerfile: comps/embeddings/tei/langchain/Dockerfile + dockerfile: comps/embeddings/src/Dockerfile extends: chatqna image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest} retriever-redis: @@ -68,25 +68,25 @@ services: reranking-tei: build: context: GenAIComps - dockerfile: comps/reranks/tei/Dockerfile + dockerfile: comps/reranks/src/Dockerfile extends: chatqna image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest} llm-tgi: build: context: GenAIComps - dockerfile: comps/llms/text-generation/tgi/Dockerfile + dockerfile: comps/llms/src/text-generation/Dockerfile extends: chatqna image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} llm-ollama: build: context: GenAIComps - dockerfile: comps/llms/text-generation/ollama/langchain/Dockerfile + dockerfile: comps/llms/src/text-generation/Dockerfile extends: chatqna image: ${REGISTRY:-opea}/llm-ollama:${TAG:-latest} llm-vllm: build: context: GenAIComps - dockerfile: comps/llms/text-generation/vllm/langchain/Dockerfile + dockerfile: comps/llms/src/text-generation/Dockerfile extends: chatqna image: ${REGISTRY:-opea}/llm-vllm:${TAG:-latest} dataprep-redis: @@ -128,6 +128,6 @@ services: nginx: build: context: GenAIComps - dockerfile: comps/nginx/Dockerfile + dockerfile: comps/3rd_parties/nginx/src/Dockerfile extends: chatqna image: ${REGISTRY:-opea}/nginx:${TAG:-latest} diff --git a/ChatQnA/tests/test_compose_on_rocm.sh b/ChatQnA/tests/test_compose_on_rocm.sh index 9075134342..9744731d24 100644 --- a/ChatQnA/tests/test_compose_on_rocm.sh +++ b/ChatQnA/tests/test_compose_on_rocm.sh @@ -76,6 +76,8 @@ function start_services() { sleep 1s n=$((n+1)) done + + echo "all containers start!" } function validate_service() { diff --git a/CodeGen/docker_compose/amd/gpu/rocm/README.md b/CodeGen/docker_compose/amd/gpu/rocm/README.md index dd01fdd0de..46e24f16aa 100644 --- a/CodeGen/docker_compose/amd/gpu/rocm/README.md +++ b/CodeGen/docker_compose/amd/gpu/rocm/README.md @@ -10,7 +10,7 @@ git clone https://github.com/opea-project/GenAIComps.git cd GenAIComps ### Build Docker image -docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile . +docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile . ``` ### Build the MegaService Docker Image diff --git a/CodeGen/docker_compose/amd/gpu/rocm/compose.yaml b/CodeGen/docker_compose/amd/gpu/rocm/compose.yaml index cf7d2369c4..55abc832ba 100644 --- a/CodeGen/docker_compose/amd/gpu/rocm/compose.yaml +++ b/CodeGen/docker_compose/amd/gpu/rocm/compose.yaml @@ -15,6 +15,12 @@ services: https_proxy: ${https_proxy} HUGGING_FACE_HUB_TOKEN: ${CODEGEN_HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${CODEGEN_HUGGINGFACEHUB_API_TOKEN} + host_ip: ${host_ip} + healthcheck: + test: ["CMD-SHELL", "curl -f http://$host_ip:${CODEGEN_TGI_SERVICE_PORT:-8028}/health || exit 1"] + interval: 10s + timeout: 10s + retries: 100 shm_size: 1g devices: - /dev/kfd:/dev/kfd @@ -31,7 +37,8 @@ services: image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} container_name: codegen-llm-server depends_on: - - codegen-tgi-service + codegen-tgi-service: + condition: service_healthy ports: - "${CODEGEN_LLM_SERVICE_PORT:-9000}:9000" ipc: host @@ -39,7 +46,8 @@ services: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} - TGI_LLM_ENDPOINT: "http://codegen-tgi-service" + LLM_ENDPOINT: "http://codegen-tgi-service" + LLM_MODEL_ID: ${CODEGEN_LLM_MODEL_ID} HUGGINGFACEHUB_API_TOKEN: ${CODEGEN_HUGGINGFACEHUB_API_TOKEN} restart: unless-stopped codegen-backend-server: diff --git a/CodeGen/docker_compose/intel/cpu/xeon/README.md b/CodeGen/docker_compose/intel/cpu/xeon/README.md index 5332d719a3..d44adc91d7 100644 --- a/CodeGen/docker_compose/intel/cpu/xeon/README.md +++ b/CodeGen/docker_compose/intel/cpu/xeon/README.md @@ -19,7 +19,7 @@ Should the Docker image you seek not yet be available on Docker Hub, you can bui ```bash git clone https://github.com/opea-project/GenAIComps.git cd GenAIComps -docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile . +docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile . ``` ### 2. Build the MegaService Docker Image diff --git a/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml b/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml index 64b74db71f..96226fe21e 100644 --- a/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml +++ b/CodeGen/docker_compose/intel/cpu/xeon/compose.yaml @@ -15,12 +15,19 @@ services: http_proxy: ${http_proxy} https_proxy: ${https_proxy} HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + host_ip: ${host_ip} + healthcheck: + test: ["CMD-SHELL", "curl -f http://$host_ip:8028/health || exit 1"] + interval: 10s + timeout: 10s + retries: 100 command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 llm: image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} container_name: llm-tgi-server depends_on: - - tgi-service + tgi-service: + condition: service_healthy ports: - "9000:9000" ipc: host @@ -28,7 +35,8 @@ services: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} - TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} + LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} + LLM_MODEL_ID: ${LLM_MODEL_ID} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} restart: unless-stopped codegen-xeon-backend-server: diff --git a/CodeGen/docker_compose/intel/hpu/gaudi/README.md b/CodeGen/docker_compose/intel/hpu/gaudi/README.md index 31cfad2929..ad68359258 100644 --- a/CodeGen/docker_compose/intel/hpu/gaudi/README.md +++ b/CodeGen/docker_compose/intel/hpu/gaudi/README.md @@ -11,7 +11,7 @@ First of all, you need to build the Docker images locally. This step can be igno ```bash git clone https://github.com/opea-project/GenAIComps.git cd GenAIComps -docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile . +docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile . ``` ### 2. Build the MegaService Docker Image diff --git a/CodeGen/docker_compose/intel/hpu/gaudi/compose.yaml b/CodeGen/docker_compose/intel/hpu/gaudi/compose.yaml index 92b70b099c..7f7e71295a 100644 --- a/CodeGen/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/CodeGen/docker_compose/intel/hpu/gaudi/compose.yaml @@ -20,6 +20,11 @@ services: LIMIT_HPU_GRAPH: true USE_FLASH_ATTENTION: true FLASH_ATTENTION_RECOMPUTE: true + healthcheck: + test: ["CMD-SHELL", "sleep 500 && exit 0"] + interval: 1s + timeout: 505s + retries: 1 runtime: habana cap_add: - SYS_NICE @@ -29,7 +34,8 @@ services: image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} container_name: llm-tgi-gaudi-server depends_on: - - tgi-service + tgi-service: + condition: service_healthy ports: - "9000:9000" ipc: host @@ -37,7 +43,8 @@ services: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} - TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} + LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} + LLM_MODEL_ID: ${LLM_MODEL_ID} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} restart: unless-stopped codegen-gaudi-backend-server: diff --git a/CodeGen/docker_image_build/build.yaml b/CodeGen/docker_image_build/build.yaml index 1f8a7f949c..9af65d7772 100644 --- a/CodeGen/docker_image_build/build.yaml +++ b/CodeGen/docker_image_build/build.yaml @@ -26,6 +26,6 @@ services: llm-tgi: build: context: GenAIComps - dockerfile: comps/llms/text-generation/tgi/Dockerfile + dockerfile: comps/llms/src/text-generation/Dockerfile extends: codegen image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} diff --git a/CodeGen/tests/test_compose_on_gaudi.sh b/CodeGen/tests/test_compose_on_gaudi.sh index bd0e36688f..8e06a904d3 100644 --- a/CodeGen/tests/test_compose_on_gaudi.sh +++ b/CodeGen/tests/test_compose_on_gaudi.sh @@ -34,6 +34,7 @@ function start_services() { export MEGA_SERVICE_HOST_IP=${ip_address} export LLM_SERVICE_HOST_IP=${ip_address} export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:7778/v1/codegen" + export host_ip=${ip_address} sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/CodeGen/tests/test_compose_on_rocm.sh b/CodeGen/tests/test_compose_on_rocm.sh index 7f37669e59..a09a368b6a 100644 --- a/CodeGen/tests/test_compose_on_rocm.sh +++ b/CodeGen/tests/test_compose_on_rocm.sh @@ -39,6 +39,7 @@ function start_services() { export CODEGEN_BACKEND_SERVICE_PORT=7778 export CODEGEN_BACKEND_SERVICE_URL="http://${ip_address}:${CODEGEN_BACKEND_SERVICE_PORT}/v1/codegen" export CODEGEN_UI_SERVICE_PORT=5173 + export host_ip=${ip_address} sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/CodeGen/tests/test_compose_on_xeon.sh b/CodeGen/tests/test_compose_on_xeon.sh index 637cc00f50..e95052497e 100644 --- a/CodeGen/tests/test_compose_on_xeon.sh +++ b/CodeGen/tests/test_compose_on_xeon.sh @@ -35,6 +35,7 @@ function start_services() { export MEGA_SERVICE_HOST_IP=${ip_address} export LLM_SERVICE_HOST_IP=${ip_address} export BACKEND_SERVICE_ENDPOINT="http://${ip_address}:7778/v1/codegen" + export host_ip=${ip_address} sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/CodeGen/ui/svelte/src/routes/+page.svelte b/CodeGen/ui/svelte/src/routes/+page.svelte index ff18c58171..0e7d43beaf 100644 --- a/CodeGen/ui/svelte/src/routes/+page.svelte +++ b/CodeGen/ui/svelte/src/routes/+page.svelte @@ -34,22 +34,21 @@ const eventSource = await fetchTextStream(query); eventSource.addEventListener("message", (e: any) => { - let Msg = e.data; - console.log("Msg", Msg); + let res = e.data; - if (Msg.startsWith("b")) { - const trimmedData = Msg.slice(2, -1); - if (trimmedData.includes("'''")) { - deleteFlag = true; - } else if (deleteFlag && trimmedData.includes("\\n")) { - deleteFlag = false; - } else if (trimmedData !== "" && !deleteFlag) { - code_output += trimmedData.replace(/\\n/g, "\n"); - } - } else if (Msg === "[DONE]") { + if (res === "[DONE]") { deleteFlag = false; loading = false; query = ''; + } else { + let Msg = JSON.parse(res).choices[0].text; + if (Msg.includes("'''")) { + deleteFlag = true; + } else if (deleteFlag && Msg.includes("\\n")) { + deleteFlag = false; + } else if (Msg !== "" && !deleteFlag) { + code_output += Msg.replace(/\\n/g, "\n"); + } } }); eventSource.stream(); diff --git a/CodeTrans/docker_compose/amd/gpu/rocm/README.md b/CodeTrans/docker_compose/amd/gpu/rocm/README.md index fafe837b40..38954284eb 100644 --- a/CodeTrans/docker_compose/amd/gpu/rocm/README.md +++ b/CodeTrans/docker_compose/amd/gpu/rocm/README.md @@ -10,7 +10,7 @@ git clone https://github.com/opea-project/GenAIComps.git cd GenAIComps ### Build Docker image -docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile . +docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile . ``` ### Build the MegaService Docker Image diff --git a/CodeTrans/docker_compose/amd/gpu/rocm/compose.yaml b/CodeTrans/docker_compose/amd/gpu/rocm/compose.yaml index cfad48a4d6..e58041f808 100644 --- a/CodeTrans/docker_compose/amd/gpu/rocm/compose.yaml +++ b/CodeTrans/docker_compose/amd/gpu/rocm/compose.yaml @@ -17,6 +17,12 @@ services: TGI_LLM_ENDPOINT: ${CODETRANS_TGI_LLM_ENDPOINT} HUGGING_FACE_HUB_TOKEN: ${CODEGEN_HUGGINGFACEHUB_API_TOKEN} HUGGINGFACEHUB_API_TOKEN: ${CODEGEN_HUGGINGFACEHUB_API_TOKEN} + host_ip: ${host_ip} + healthcheck: + test: ["CMD-SHELL", "curl -f http://$host_ip:8008/health || exit 1"] + interval: 10s + timeout: 10s + retries: 100 devices: - /dev/kfd:/dev/kfd - /dev/dri/:/dev/dri/ @@ -31,6 +37,9 @@ services: codetrans-llm-server: image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} container_name: codetrans-llm-server + depends_on: + codetrans-tgi-service: + condition: service_healthy ports: - "${CODETRANS_LLM_SERVICE_PORT:-9000}:9000" ipc: host @@ -38,7 +47,8 @@ services: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} - TGI_LLM_ENDPOINT: "http://codetrans-tgi-service" + LLM_ENDPOINT: "http://codetrans-tgi-service" + LLM_MODEL_ID: ${CODETRANS_LLM_MODEL_ID} HUGGINGFACEHUB_API_TOKEN: ${CODETRANS_HUGGINGFACEHUB_API_TOKEN} restart: unless-stopped codetrans-backend-server: diff --git a/CodeTrans/docker_compose/intel/cpu/xeon/README.md b/CodeTrans/docker_compose/intel/cpu/xeon/README.md index 15f6414f04..c574740e3d 100755 --- a/CodeTrans/docker_compose/intel/cpu/xeon/README.md +++ b/CodeTrans/docker_compose/intel/cpu/xeon/README.md @@ -19,7 +19,7 @@ First of all, you need to build Docker Images locally and install the python pac ```bash git clone https://github.com/opea-project/GenAIComps.git cd GenAIComps -docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile . +docker build -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile . ``` ### 2. Build MegaService Docker Image @@ -41,7 +41,7 @@ docker build -t opea/codetrans-ui:latest --build-arg https_proxy=$https_proxy -- ```bash cd GenAIComps -docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile . +docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/src/Dockerfile . ``` Then run the command `docker images`, you will have the following Docker Images: diff --git a/CodeTrans/docker_compose/intel/cpu/xeon/compose.yaml b/CodeTrans/docker_compose/intel/cpu/xeon/compose.yaml index 16c05cf363..896243786e 100644 --- a/CodeTrans/docker_compose/intel/cpu/xeon/compose.yaml +++ b/CodeTrans/docker_compose/intel/cpu/xeon/compose.yaml @@ -15,10 +15,19 @@ services: http_proxy: ${http_proxy} https_proxy: ${https_proxy} HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + host_ip: ${host_ip} + healthcheck: + test: ["CMD-SHELL", "curl -f http://$host_ip:8008/health || exit 1"] + interval: 10s + timeout: 10s + retries: 100 command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 llm: image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} container_name: llm-tgi-server + depends_on: + tgi-service: + condition: service_healthy ports: - "9000:9000" ipc: host @@ -26,7 +35,8 @@ services: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} - TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} + LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} + LLM_MODEL_ID: ${LLM_MODEL_ID} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} restart: unless-stopped codetrans-xeon-backend-server: diff --git a/CodeTrans/docker_compose/intel/hpu/gaudi/README.md b/CodeTrans/docker_compose/intel/hpu/gaudi/README.md index 04858bc235..c0ca35fcf6 100755 --- a/CodeTrans/docker_compose/intel/hpu/gaudi/README.md +++ b/CodeTrans/docker_compose/intel/hpu/gaudi/README.md @@ -11,7 +11,7 @@ First of all, you need to build Docker Images locally and install the python pac ```bash git clone https://github.com/opea-project/GenAIComps.git cd GenAIComps -docker build -t opea/llm-tgi:latest --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile . +docker build -t opea/llm-tgi:latest --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/src/text-generation/Dockerfile . ``` ### 2. Build MegaService Docker Image @@ -33,7 +33,7 @@ docker build -t opea/codetrans-ui:latest --build-arg https_proxy=$https_proxy -- ```bash cd GenAIComps -docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/nginx/Dockerfile . +docker build -t opea/nginx:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/3rd_parties/nginx/src/Dockerfile . ``` Then run the command `docker images`, you will have the following Docker Images: diff --git a/CodeTrans/docker_compose/intel/hpu/gaudi/compose.yaml b/CodeTrans/docker_compose/intel/hpu/gaudi/compose.yaml index 2f87d10c24..c1c6c4c537 100644 --- a/CodeTrans/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/CodeTrans/docker_compose/intel/hpu/gaudi/compose.yaml @@ -20,6 +20,11 @@ services: LIMIT_HPU_GRAPH: true USE_FLASH_ATTENTION: true FLASH_ATTENTION_RECOMPUTE: true + healthcheck: + test: ["CMD-SHELL", "sleep 500 && exit 0"] + interval: 1s + timeout: 505s + retries: 1 runtime: habana cap_add: - SYS_NICE @@ -28,6 +33,9 @@ services: llm: image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} container_name: llm-tgi-gaudi-server + depends_on: + tgi-service: + condition: service_healthy ports: - "9000:9000" ipc: host @@ -35,7 +43,8 @@ services: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} - TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} + LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} + LLM_MODEL_ID: ${LLM_MODEL_ID} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} restart: unless-stopped codetrans-gaudi-backend-server: diff --git a/CodeTrans/docker_image_build/build.yaml b/CodeTrans/docker_image_build/build.yaml index b8421e0248..3f3ec36a0d 100644 --- a/CodeTrans/docker_image_build/build.yaml +++ b/CodeTrans/docker_image_build/build.yaml @@ -20,12 +20,12 @@ services: llm-tgi: build: context: GenAIComps - dockerfile: comps/llms/text-generation/tgi/Dockerfile + dockerfile: comps/llms/src/text-generation/Dockerfile extends: codetrans image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest} nginx: build: context: GenAIComps - dockerfile: comps/nginx/Dockerfile + dockerfile: comps/3rd_parties/nginx/src/Dockerfile extends: codetrans image: ${REGISTRY:-opea}/nginx:${TAG:-latest} diff --git a/CodeTrans/tests/test_compose_on_gaudi.sh b/CodeTrans/tests/test_compose_on_gaudi.sh index 5625e2276d..ecab66c6f2 100644 --- a/CodeTrans/tests/test_compose_on_gaudi.sh +++ b/CodeTrans/tests/test_compose_on_gaudi.sh @@ -43,6 +43,7 @@ function start_services() { export BACKEND_SERVICE_IP=${ip_address} export BACKEND_SERVICE_PORT=7777 export NGINX_PORT=80 + export host_ip=${ip_address} sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/CodeTrans/tests/test_compose_on_rocm.sh b/CodeTrans/tests/test_compose_on_rocm.sh index 14ed64a7e4..322e9174c0 100644 --- a/CodeTrans/tests/test_compose_on_rocm.sh +++ b/CodeTrans/tests/test_compose_on_rocm.sh @@ -45,6 +45,7 @@ function start_services() { export CODETRANS_BACKEND_SERVICE_PORT=7777 export CODETRANS_NGINX_PORT=8088 export CODETRANS_BACKEND_SERVICE_URL="http://${ip_address}:${CODETRANS_BACKEND_SERVICE_PORT}/v1/codetrans" + export host_ip=${ip_address} sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/CodeTrans/tests/test_compose_on_xeon.sh b/CodeTrans/tests/test_compose_on_xeon.sh index ea3985422c..1d883dfdbc 100644 --- a/CodeTrans/tests/test_compose_on_xeon.sh +++ b/CodeTrans/tests/test_compose_on_xeon.sh @@ -42,6 +42,7 @@ function start_services() { export BACKEND_SERVICE_IP=${ip_address} export BACKEND_SERVICE_PORT=7777 export NGINX_PORT=80 + export host_ip=${ip_address} sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/CodeTrans/ui/svelte/src/routes/+page.svelte b/CodeTrans/ui/svelte/src/routes/+page.svelte index 46918c9457..fd6be39310 100644 --- a/CodeTrans/ui/svelte/src/routes/+page.svelte +++ b/CodeTrans/ui/svelte/src/routes/+page.svelte @@ -47,16 +47,16 @@ const languagesTag = { // 'TypeScript': typescript, - 'Python': python, - 'C': c, - 'C++': cpp, + Python: python, + C: c, + "C++": cpp, // 'C#': csharp, - 'Go': go, - 'Java': java, - 'JavaScript': javascript, + Go: go, + Java: java, + JavaScript: javascript, // 'Swift': swift, // 'Ruby': ruby, - 'Rust': rust, + Rust: rust, // 'PHP': php, // 'Kotlin': kotlin, // 'Objective-C': objectivec, @@ -103,21 +103,20 @@ const eventSource = await fetchTextStream(input, langFrom, langTo); eventSource.addEventListener("message", (e: any) => { - let Msg = e.data; - console.log('Msg', Msg); + let res = e.data; - if (Msg.startsWith("b")) { - const trimmedData = Msg.slice(2, -1); - if (trimmedData.includes("'''")) { + if (res === "[DONE]") { + deleteFlag = false; + loading = false; + } else { + let Msg = JSON.parse(res).choices[0].text; + if (Msg.includes("'''")) { deleteFlag = true; - } else if (deleteFlag && trimmedData.includes("\\n")) { + } else if (deleteFlag && Msg.includes("\\n")) { deleteFlag = false; - } else if (trimmedData !== "" && !deleteFlag) { - output += trimmedData.replace(/\\n/g, "\n"); + } else if (Msg !== "" && !deleteFlag) { + output += Msg.replace(/\\n/g, "\n"); } - } else if (Msg === "[DONE]") { - deleteFlag = false; - loading = false; } }); eventSource.stream(); @@ -202,7 +201,9 @@ data-testid="code-output" > {#if output !== ""} -
+