-
Notifications
You must be signed in to change notification settings - Fork 5
141 lines (108 loc) · 4.34 KB
/
ci.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
name: CI
on:
pull_request:
branches: [main]
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.10"
- "3.11"
- "3.12"
steps:
- uses: actions/checkout@v4
- name: Install UV
run: curl -LsSf https://astral.sh/uv/install.sh | sh
- name: Source Cargo Environment
run: source $HOME/.cargo/env
- name: Set up Python ${{ matrix.python-version }}
run: uv python install ${{ matrix.python-version }}
- name: Ruff
run: |
uvx ruff check -v
uvx ruff format --check -v
- name: Run tests
run: uv run pytest tests -m 'not integration'
# This integration tests the OpenAI API, using Ollama to host models.
# This lets us test PRs from forks which can't access secrets like API keys.
ollama:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
# Only test the latest python version.
- "3.12"
ollama-model:
# For quicker CI, use a smaller, tool-capable model than the default.
- "qwen2.5:0.5b"
steps:
- uses: actions/checkout@v4
- name: Install UV
run: curl -LsSf https://astral.sh/uv/install.sh | sh
- name: Source Cargo Environment
run: source $HOME/.cargo/env
- name: Set up Python
run: uv python install ${{ matrix.python-version }}
- name: Install Ollama
run: curl -fsSL https://ollama.com/install.sh | sh
- name: Start Ollama
run: |
# Run the background, in a way that survives to the next step
nohup ollama serve > ollama.log 2>&1 &
# Block using the ready endpoint
time curl --retry 5 --retry-connrefused --retry-delay 1 -sf http://localhost:11434 || (cat ollama.log && exit 1)
# Tests use OpenAI which does not have a mechanism to pull models. Run a
# simple prompt to (pull and) test the model first.
- name: Test Ollama model
run: ollama run $OLLAMA_MODEL hello || (cat ollama.log && exit 1)
env:
OLLAMA_MODEL: ${{ matrix.ollama-model }}
- name: Run Ollama tests
run: uv run pytest tests -m integration -k ollama || (cat ollama.log && exit 1)
env:
OLLAMA_MODEL: ${{ matrix.ollama-model }}
# This integration tests the OpenAI API, using LocalAI to host models.
# This lets us test PRs from forks which can't access secrets like API keys.
localai:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
# Only test the latest python version.
- "3.12"
steps:
- uses: actions/checkout@v4
- name: Install UV
run: curl -LsSf https://astral.sh/uv/install.sh | sh
- name: Source Cargo Environment
run: source $HOME/.cargo/env
- name: Set up Python
run: uv python install ${{ matrix.python-version }}
- name: Download LocalAI
run: gh release download -R mudler/LocalAI -p local-ai-Linux-x86_64
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Install LocalAI
run: |
mv local-ai-Linux-x86_64 /usr/local/bin/local-ai
chmod +x /usr/local/bin/local-ai
- name: Start LocalAI
run: |
# Run the background, in a way that survives to the next step
nohup local-ai run > localai.log 2>&1 &
# Note: we don't `local-ai run` with the `LOCALAI_MODELS` env var
# because the it would introduce a race. The below check would pass
# before the model is downloaded.
# Block using the ready endpoint
time curl --retry 5 --retry-connrefused --retry-delay 1 -sf http://localhost:8080/readyz || (cat localai && exit 1)
# Tests use OpenAI which does not have a mechanism to install models.
# This blocks until the model is installed to prevent failures.
- name: Install LocalAI model
run: |
# Use the default model until we find a small one that passes tests.
LOCALAI_MODEL=$(uv run python -c "from src.exchange.providers.localai import LOCALAI_MODEL; print(LOCALAI_MODEL)")
local-ai models install $LOCALAI_MODEL || (cat localai && exit 1)
- name: Run LocalAI tests
run: uv run pytest tests -m integration -k localai || (cat localai && exit 1)