Skip to content

Commit

Permalink
feat: convert openai related tests to VCR (#50)
Browse files Browse the repository at this point in the history
Signed-off-by: Adrian Cole <[email protected]>
  • Loading branch information
codefromthecrypt authored Sep 23, 2024
1 parent 1109ad6 commit c0114fb
Show file tree
Hide file tree
Showing 9 changed files with 273 additions and 75 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ requires = ["hatchling"]
build-backend = "hatchling.build"

[tool.uv]
dev-dependencies = ["pytest>=8.3.2", "codecov>=2.1.13"]
dev-dependencies = ["pytest>=8.3.2", "pytest-vcr>=1.0.2", "codecov>=2.1.13"]

[project.entry-points."exchange.provider"]
openai = "exchange.providers.openai:OpenAiProvider"
Expand Down
1 change: 1 addition & 0 deletions tests/providers/openai/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Tests that use the OpenAI API."""
68 changes: 68 additions & 0 deletions tests/providers/openai/cassettes/test_ollama_completion.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
interactions:
- request:
body: ''
headers:
accept:
- '*/*'
accept-encoding:
- gzip, deflate
connection:
- keep-alive
host:
- localhost:11434
user-agent:
- python-httpx/0.27.2
method: GET
uri: http://localhost:11434/
response:
body:
string: Ollama is running
headers:
Content-Length:
- '17'
Content-Type:
- text/plain; charset=utf-8
Date:
- Sun, 22 Sep 2024 23:40:13 GMT
Set-Cookie: test_set_cookie
openai-organization: test_openai_org_key
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello"}], "model": "mistral-nemo"}'
headers:
accept:
- '*/*'
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '140'
content-type:
- application/json
host:
- localhost:11434
user-agent:
- python-httpx/0.27.2
method: POST
uri: http://localhost:11434/v1/chat/completions
response:
body:
string: "{\"id\":\"chatcmpl-429\",\"object\":\"chat.completion\",\"created\":1727048416,\"model\":\"mistral-nemo\",\"system_fingerprint\":\"fp_ollama\",\"choices\":[{\"index\":0,\"message\":{\"role\":\"assistant\",\"content\":\"Hello!
I'm here to help. How can I assist you today? Let's chat. \U0001F60A\"},\"finish_reason\":\"stop\"}],\"usage\":{\"prompt_tokens\":10,\"completion_tokens\":23,\"total_tokens\":33}}\n"
headers:
Content-Length:
- '356'
Content-Type:
- application/json
Date:
- Sun, 22 Sep 2024 23:40:16 GMT
Set-Cookie: test_set_cookie
openai-organization: test_openai_org_key
status:
code: 200
message: OK
version: 1
80 changes: 80 additions & 0 deletions tests/providers/openai/cassettes/test_openai_completion.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello"}], "model": "gpt-4o-mini"}'
headers:
accept:
- '*/*'
accept-encoding:
- gzip, deflate
authorization:
- Bearer test_openai_api_key
connection:
- keep-alive
content-length:
- '139'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- python-httpx/0.27.2
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-AAQTYi3DXJnltAfd5sUH1Wnzh69t3\",\n \"object\":
\"chat.completion\",\n \"created\": 1727048416,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Hello! How can I assist you today?\",\n
\ \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 18,\n \"completion_tokens\":
9,\n \"total_tokens\": 27,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
0\n }\n },\n \"system_fingerprint\": \"fp_1bb46167f9\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8c762399feb55739-SYD
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Sun, 22 Sep 2024 23:40:17 GMT
Server:
- cloudflare
Set-Cookie: test_set_cookie
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
content-length:
- '593'
openai-organization: test_openai_org_key
openai-processing-ms:
- '560'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15552000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '200000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '199973'
x-ratelimit-reset-requests:
- 8.64s
x-ratelimit-reset-tokens:
- 8ms
x-request-id:
- req_22e26c840219cde3152eaba1ce89483b
status:
code: 200
message: OK
version: 1
52 changes: 52 additions & 0 deletions tests/providers/openai/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import os
import pytest

OPENAI_MODEL = "gpt-4o-mini"
OPENAI_API_KEY = "test_openai_api_key"
OPENAI_ORG_ID = "test_openai_org_key"
OPENAI_PROJECT_ID = "test_openai_project_id"


@pytest.fixture
def default_openai_api_key(monkeypatch):
"""
This fixture avoids the error OpenAiProvider.from_env() raises when the
OPENAI_API_KEY is not set in the environment.
When running VCR tests for the first time or after deleting a cassette
recording, a real OPENAI_API_KEY must be passed as an environment variable,
so real responses can be fetched. Subsequent runs use the recorded data, so
don't need a real key.
"""
if "OPENAI_API_KEY" not in os.environ:
monkeypatch.setenv("OPENAI_API_KEY", OPENAI_API_KEY)


@pytest.fixture(scope="module")
def vcr_config():
"""
This scrubs sensitive data and gunzips bodies when in recording mode.
Without this, you would leak cookies and auth tokens in the cassettes.
Also, depending on the request, some responses would be binary encoded
while others plain json. This ensures all bodies are human-readable.
"""
return {
"decode_compressed_response": True,
"filter_headers": [
("authorization", "Bearer " + OPENAI_API_KEY),
("openai-organization", OPENAI_ORG_ID),
("openai-project", OPENAI_PROJECT_ID),
("cookie", None),
],
"before_record_response": scrub_response_headers,
}


def scrub_response_headers(response):
"""
This scrubs sensitive response headers. Note they are case-sensitive!
"""
response["headers"]["openai-organization"] = OPENAI_ORG_ID
response["headers"]["Set-Cookie"] = "test_set_cookie"
return response
32 changes: 32 additions & 0 deletions tests/providers/openai/test_ollama.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from typing import Tuple

import pytest

from exchange import Text
from exchange.message import Message
from exchange.providers.base import Usage
from exchange.providers.ollama import OllamaProvider, OLLAMA_MODEL


@pytest.mark.vcr()
def test_ollama_completion(default_openai_api_key):
reply_message, reply_usage = ollama_complete()

assert reply_message.content == [Text(text="Hello! I'm here to help. How can I assist you today? Let's chat. 😊")]
assert reply_usage.total_tokens == 33


@pytest.mark.integration
def test_ollama_completion_integration():
reply = ollama_complete()

assert reply[0].content is not None
print("Completion content from OpenAI:", reply[0].content)


def ollama_complete() -> Tuple[Message, Usage]:
provider = OllamaProvider.from_env()
model = OLLAMA_MODEL
system = "You are a helpful assistant."
messages = [Message.user("Hello")]
return provider.complete(model=model, system=system, messages=messages, tools=None)
39 changes: 39 additions & 0 deletions tests/providers/openai/test_openai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
from typing import Tuple

import os
import pytest

from exchange import Text
from exchange.message import Message
from exchange.providers.base import Usage
from exchange.providers.openai import OpenAiProvider
from .conftest import OPENAI_MODEL, OPENAI_API_KEY


@pytest.mark.vcr()
def test_openai_completion(monkeypatch):
# When running VCR tests the first time, it needs OPENAI_API_KEY to call
# the real service. Afterward, it is not needed as VCR mocks the service.
if "OPENAI_API_KEY" not in os.environ:
monkeypatch.setenv("OPENAI_API_KEY", OPENAI_API_KEY)

reply_message, reply_usage = openai_complete()

assert reply_message.content == [Text(text="Hello! How can I assist you today?")]
assert reply_usage.total_tokens == 27


@pytest.mark.integration
def test_openai_completion_integration():
reply = openai_complete()

assert reply[0].content is not None
print("Completion content from OpenAI:", reply[0].content)


def openai_complete() -> Tuple[Message, Usage]:
provider = OpenAiProvider.from_env()
model = OPENAI_MODEL
system = "You are a helpful assistant."
messages = [Message.user("Hello")]
return provider.complete(model=model, system=system, messages=messages, tools=None)
16 changes: 0 additions & 16 deletions tests/providers/test_ollama.py

This file was deleted.

58 changes: 0 additions & 58 deletions tests/providers/test_openai.py

This file was deleted.

0 comments on commit c0114fb

Please sign in to comment.