-
Notifications
You must be signed in to change notification settings - Fork 5
82 lines (62 loc) · 2.47 KB
/
ci.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
name: CI
on:
pull_request:
branches: [main]
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.10"
- "3.11"
- "3.12"
steps:
- uses: actions/checkout@v4
- name: Install UV
run: curl -LsSf https://astral.sh/uv/install.sh | sh
- name: Source Cargo Environment
run: source $HOME/.cargo/env
- name: Set up Python ${{ matrix.python-version }}
run: uv python install ${{ matrix.python-version }}
- name: Ruff
run: |
uvx ruff check -v
uvx ruff format --check -v
- name: Run tests
run: uv run pytest tests -m 'not integration'
# This runs integration tests of the OpenAI API (via the Ollama provider).
# This is an alternative to clouds, as testing those on PR will leak secrets.
ollama:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install UV
run: curl -LsSf https://astral.sh/uv/install.sh | sh
- name: Source Cargo Environment
run: source $HOME/.cargo/env
- name: Set up Python 3.12
run: uv python install 3.12
- name: Cache Ollama models
id: cache-ollama
uses: actions/cache@v4
with: # cache key is based on where OLLAMA_MODEL is defined.
path: ~/.ollama/models # default directory for Ollama models
key: ollama-${{ hashFiles('./src/exchange/providers/ollama.py') }}
- name: Install Ollama
run: curl -fsSL https://ollama.com/install.sh | sh
- name: Start Ollama
run: |
# Run the background, in a way that survives to the next step
nohup ollama serve > ollama.log 2>&1 &
# Block using the ready endpoint mentioned in ollama/ollama#3341
time curl --retry 5 --retry-connrefused --retry-delay 0 -sf http://localhost:11434
# First time pull, and first time execution of a model is slow, so we do
# this prior to running tests. This also reduces the chance of flakiness.
- name: Pull and Test Ollama model
run: | # get the OLLAMA_MODEL from ./src/exchange/providers/ollama.py
OLLAMA_MODEL=$(uv run python -c "from src.exchange.providers.ollama import OLLAMA_MODEL; print(OLLAMA_MODEL)")
ollama pull $OLLAMA_MODEL || cat ollama.log
ollama run $OLLAMA_MODEL hello || cat ollama.log
- name: Run Ollama tests
run: uv run pytest tests -m integration -k ollama