Skip to content

Commit

Permalink
adding and ensuring consistency of whisper-small and mistral-code models
Browse files Browse the repository at this point in the history
Signed-off-by: greg pereira <[email protected]>
  • Loading branch information
Gregory-Pereira committed Apr 27, 2024
1 parent 53a8ebe commit 4052562
Show file tree
Hide file tree
Showing 5 changed files with 15 additions and 7 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/codegen.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,8 @@ jobs:
run: make install

- name: Download model
working-directory: ./recipes/natural_language_processing/${{ env.IMAGE_NAME }}
run: make download-model-mistral
working-directory: ./models
run: make download-model-mistral-code

- name: Run Functional Tests
shell: bash
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/model_image_build_push.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,9 @@ jobs:
label: Q4_K_M
url: https://huggingface.co/instructlab/granite-7b-lab-GGUF/resolve/main/granite-7b-lab-Q4_K_M.gguf
platforms: linux/amd64,linux/arm64
- image_name: whisper-small
url: https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-small.bin
platforms: linux/amd64,linux/arm64
runs-on: ubuntu-latest
permissions:
contents: read
Expand Down
10 changes: 7 additions & 3 deletions models/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,19 @@ download-model-granite:
download-model-merlinite:
$(MAKE) MODEL_URL=https://huggingface.co/instructlab/merlinite-7b-lab-GGUF/resolve/main/merlinite-7b-lab-Q4_K_M.gguf MODEL_NAME=merlinite-7b-lab-Q4_K_M.gguf download-model

.PHONY: download-model-whisper-small # small .bin model type testing
.PHONY: download-model-whisper-small
download-model-whisper-small:
$(MAKE) MODEL_NAME=ggml-small.bin MODEL_URL=https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-small.bin download-model

.PHONY: download-model-mistral
download-model-mistral:
$(MAKE) MODEL_NAME=mistral-7b-instruct-v0.1.Q4_K_M.gguf MODEL_URL=https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/resolve/main/mistral-7b-instruct-v0.1.Q4_K_M.gguf download-model

.PHONY: download-mode-mistral-code
download-mode-mistral-code:
$(MAKE) MODEL_NAME=mistral-7b-code-16k-qlora.Q4_K_M.gguf MODEL_URL=https://huggingface.co/TheBloke/Mistral-7B-Code-16K-qlora-GGUF/resolve/main/mistral-7b-code-16k-qlora.Q4_K_M.gguf

.PHONY: clean
clean:
rm -f *tmp
rm -f mistral* whisper* granite* merlinite*
-rm -f *tmp
-rm -f mistral* ggml-* granite* merlinite*
1 change: 1 addition & 0 deletions models/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ Want to try one of our tested models? Try one or all of the following:
make download-model-granite
make download-model-merlinite
make download-model-mistral
make download-model-mistral-code
make download-model-whisper-small
```

Expand Down
4 changes: 2 additions & 2 deletions recipes/audio/audio_to_text/bootc/Containerfile
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ RUN set -eu; mkdir -p /usr/ssh && \
echo ${SSHPUBKEY} > /usr/ssh/root.keys && chmod 0600 /usr/ssh/root.keys

ARG RECIPE=audio-to-text
ARG MODEL_IMAGE=quay.io/ai-lab/mistral-7b-instruct:latest
ARG MODEL_IMAGE=quay.io/ai-lab/whisper-small:latest
ARG APP_IMAGE=quay.io/ai-lab/${RECIPE}:latest
ARG SERVER_IMAGE=quay.io/ai-lab/llamacpp_python:latest
ARG SERVER_IMAGE=quay.io/ai-lab/whispercpp:latest
ARG TARGETARCH

# Add quadlet files to setup system to automatically run AI application on boot
Expand Down

0 comments on commit 4052562

Please sign in to comment.