Skip to content

[llama3] Enable a llama3 model. #15

[llama3] Enable a llama3 model.

[llama3] Enable a llama3 model. #15

Workflow file for this run

name: CI
on:
workflow_dispatch:
pull_request:
push:
branches:
- main
concurrency:
# A PR number if a pull request and otherwise the commit hash. This cancels
# queued and in-progress runs for the same PR (presubmit) or commit
# (postsubmit). The workflow name is prepended to avoid conflicts between
# different workflows.
group: ${{ github.workflow }}-${{ github.event.number || github.sha }}
cancel-in-progress: true
jobs:
test:
name: "Unit Tests and Type Checking"
strategy:
matrix:
version: [3.11]
os: [ubuntu-latest]
runs-on: ${{matrix.os}}
env:
PIP_CACHE_DIR: "${{ github.workspace }}/.pip-cache"
steps:
- name: "Setting up Python"
id: setup_python
uses: actions/setup-python@v3
with:
python-version: ${{matrix.version}}
- name: "Checkout Code"
uses: actions/checkout@v3
- name: Cache Pip Packages
uses: actions/cache@v4
id: cache-pip
with:
path: ${{ env.PIP_CACHE_DIR }}
key: pip-${{ steps.setup_python.outputs.python-version }}-${{ hashFiles('*requirements.txt') }}
- name: Install pip deps
run: |
python -m pip install --no-compile --upgrade pip
# Note: We install in three steps in order to satisfy requirements
# from non default locations first. Installing the PyTorch CPU
# wheels saves multiple minutes and a lot of bandwidth on runner setup.
pip install --no-compile -r pytorch-cpu-requirements.txt
pip install --no-compile -f https://iree.dev/pip-release-links.html --src deps \
-e "git+https://github.com/nod-ai/SHARK-Turbine.git#egg=SHARK-Turbine&subdirectory=core"
pip install --no-compile -r requirements.txt -e sharktank/ shortfin/
- name: Run sharktank tests
if: ${{ !cancelled() }}
run: |
pytest -n 4 sharktank/
- name: Run shortfin tests
if: ${{ !cancelled() }}
run: |
pytest -n 4 shortfin/
# TODO: Enable type checking of sharktank
- name: MyPy Type Checking Shortfin
if: ${{ !cancelled() }}
run: |
(cd shortfin && mypy)