Skip to content

Commit

Permalink
test: reduce code redundancy in openai based tests (#54)
Browse files Browse the repository at this point in the history
Signed-off-by: Adrian Cole <[email protected]>
  • Loading branch information
codefromthecrypt authored Sep 26, 2024
1 parent 9feb7ec commit d93f551
Show file tree
Hide file tree
Showing 19 changed files with 485 additions and 148 deletions.
7 changes: 5 additions & 2 deletions src/exchange/providers/ollama.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,10 @@ def from_env(cls: Type["OllamaProvider"]) -> "OllamaProvider":
base_url=url,
timeout=httpx.Timeout(60 * 10),
)
# from_env is expected to fail if provider is not available
# so we run a quick test that the endpoint is running
# from_env is expected to fail if required ENV variables are not
# available. Since this provider can run with defaults, we substitute
# a health check to verify the endpoint is running.
client.get("")
# The OpenAI API is defined after "v1/", so we need to join it here.
client.base_url = client.base_url.join("v1/")
return cls(client)
8 changes: 6 additions & 2 deletions src/exchange/providers/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def from_env(cls: Type["OpenAiProvider"]) -> "OpenAiProvider":
"Failed to get OPENAI_API_KEY from the environment, see https://platform.openai.com/docs/api-reference/api-keys"
)
client = httpx.Client(
base_url=url,
base_url=url + "v1/",
auth=("Bearer", key),
timeout=httpx.Timeout(60 * 10),
)
Expand Down Expand Up @@ -93,5 +93,9 @@ def complete(

@retry_procedure
def _post(self, payload: dict) -> dict:
response = self.client.post("v1/chat/completions", json=payload)
# Note: While OpenAI and Ollama mount the API under "v1", this is
# conventional and not a strict requirement. For example, Azure OpenAI
# mounts the API under the deployment name, and "v1" is not in the URL.
# See https://github.com/openai/openai-openapi/blob/master/openapi.yaml
response = self.client.post("chat/completions", json=payload)
return raise_for_status(response).json()
1 change: 1 addition & 0 deletions tests/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Tests for exchange."""
16 changes: 16 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,19 @@ def _create_usage(input_tokens=100, output_tokens=200, total_tokens=300):
return Usage(input_tokens=input_tokens, output_tokens=output_tokens, total_tokens=total_tokens)

return _create_usage


def read_file(filename: str) -> str:
"""
Read the contents of the file.
Args:
filename (str): The path to the file, which can be relative or
absolute. If it is a plain filename, it is assumed to be in the
current working directory.
Returns:
str: The contents of the file.
"""
assert filename == "test.txt"
return "hello exchange"
1 change: 1 addition & 0 deletions tests/providers/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Tests for chat completion providers."""
75 changes: 75 additions & 0 deletions tests/providers/cassettes/test_ollama_tools.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
interactions:
- request:
body: ''
headers:
accept:
- '*/*'
accept-encoding:
- gzip, deflate
connection:
- keep-alive
host:
- localhost:11434
user-agent:
- python-httpx/0.27.2
method: GET
uri: http://localhost:11434/
response:
body:
string: Ollama is running
headers:
Content-Length:
- '17'
Content-Type:
- text/plain; charset=utf-8
Date:
- Wed, 25 Sep 2024 09:23:08 GMT
Set-Cookie: test_set_cookie
openai-organization: test_openai_org_key
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are a helpful assistant.
Expect to need to read a file using read_file."}, {"role": "user", "content":
"What are the contents of this file? test.txt"}], "model": "mistral-nemo", "tools":
[{"type": "function", "function": {"name": "read_file", "description": "Read
the contents of the file.", "parameters": {"type": "object", "properties": {"filename":
{"type": "string", "description": "The path to the file, which can be relative
or\nabsolute. If it is a plain filename, it is assumed to be in the\ncurrent
working directory."}}, "required": ["filename"]}}}]}'
headers:
accept:
- '*/*'
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '609'
content-type:
- application/json
host:
- localhost:11434
user-agent:
- python-httpx/0.27.2
method: POST
uri: http://localhost:11434/v1/chat/completions
response:
body:
string: '{"id":"chatcmpl-245","object":"chat.completion","created":1727256190,"model":"mistral-nemo","system_fingerprint":"fp_ollama","choices":[{"index":0,"message":{"role":"assistant","content":"","tool_calls":[{"id":"call_z6fgu3z3","type":"function","function":{"name":"read_file","arguments":"{\"filename\":\"test.txt\"}"}}]},"finish_reason":"tool_calls"}],"usage":{"prompt_tokens":112,"completion_tokens":21,"total_tokens":133}}
'
headers:
Content-Length:
- '425'
Content-Type:
- application/json
Date:
- Wed, 25 Sep 2024 09:23:10 GMT
Set-Cookie: test_set_cookie
openai-organization: test_openai_org_key
status:
code: 200
message: OK
version: 1
90 changes: 90 additions & 0 deletions tests/providers/cassettes/test_openai_tools.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are a helpful assistant.
Expect to need to read a file using read_file."}, {"role": "user", "content":
"What are the contents of this file? test.txt"}], "model": "gpt-4o-mini", "tools":
[{"type": "function", "function": {"name": "read_file", "description": "Read
the contents of the file.", "parameters": {"type": "object", "properties": {"filename":
{"type": "string", "description": "The path to the file, which can be relative
or\nabsolute. If it is a plain filename, it is assumed to be in the\ncurrent
working directory."}}, "required": ["filename"]}}}]}'
headers:
accept:
- '*/*'
accept-encoding:
- gzip, deflate
authorization:
- Bearer test_openai_api_key
connection:
- keep-alive
content-length:
- '608'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- python-httpx/0.27.2
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-ABIV2aZWVKQ774RAQ8KHYdNwkI5N7\",\n \"object\":
\"chat.completion\",\n \"created\": 1727256084,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_xXYlw4A7Ud1qtCopuK5gEJrP\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"read_file\",\n
\ \"arguments\": \"{\\\"filename\\\":\\\"test.txt\\\"}\"\n }\n
\ }\n ],\n \"refusal\": null\n },\n \"logprobs\":
null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\":
{\n \"prompt_tokens\": 107,\n \"completion_tokens\": 15,\n \"total_tokens\":
122,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n
\ }\n },\n \"system_fingerprint\": \"fp_1bb46167f9\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8c89f19fed997e43-SYD
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Wed, 25 Sep 2024 09:21:25 GMT
Server:
- cloudflare
Set-Cookie: test_set_cookie
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
content-length:
- '844'
openai-organization: test_openai_org_key
openai-processing-ms:
- '266'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '200000'
x-ratelimit-remaining-requests:
- '9991'
x-ratelimit-remaining-tokens:
- '199952'
x-ratelimit-reset-requests:
- 1m9.486s
x-ratelimit-reset-tokens:
- 14ms
x-request-id:
- req_ff6b5d65c24f40e1faaf049c175e718d
status:
code: 200
message: OK
version: 1
86 changes: 86 additions & 0 deletions tests/providers/cassettes/test_openai_vision.yaml

Large diffs are not rendered by default.

87 changes: 87 additions & 0 deletions tests/providers/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import os
from typing import Type, Tuple

import pytest

from exchange import Message, ToolUse, ToolResult, Tool
from exchange.providers import Usage, Provider
from tests.conftest import read_file

OPENAI_API_KEY = "test_openai_api_key"
OPENAI_ORG_ID = "test_openai_org_key"
OPENAI_PROJECT_ID = "test_openai_project_id"


@pytest.fixture
def default_openai_env(monkeypatch):
"""
This fixture prevents OpenAIProvider.from_env() from erring on missing
environment variables.
When running VCR tests for the first time or after deleting a cassette
recording, set required environment variables, so that real requests don't
fail. Subsequent runs use the recorded data, so don't need them.
"""
if "OPENAI_API_KEY" not in os.environ:
monkeypatch.setenv("OPENAI_API_KEY", OPENAI_API_KEY)


@pytest.fixture(scope="module")
def vcr_config():
"""
This scrubs sensitive data and gunzips bodies when in recording mode.
Without this, you would leak cookies and auth tokens in the cassettes.
Also, depending on the request, some responses would be binary encoded
while others plain json. This ensures all bodies are human-readable.
"""
return {
"decode_compressed_response": True,
"filter_headers": [
("authorization", "Bearer " + OPENAI_API_KEY),
("openai-organization", OPENAI_ORG_ID),
("openai-project", OPENAI_PROJECT_ID),
("cookie", None),
],
"before_record_response": scrub_response_headers,
}


def scrub_response_headers(response):
"""
This scrubs sensitive response headers. Note they are case-sensitive!
"""
response["headers"]["openai-organization"] = OPENAI_ORG_ID
response["headers"]["Set-Cookie"] = "test_set_cookie"
return response


def complete(provider_cls: Type[Provider], model: str) -> Tuple[Message, Usage]:
provider = provider_cls.from_env()
system = "You are a helpful assistant."
messages = [Message.user("Hello")]
return provider.complete(model=model, system=system, messages=messages, tools=None)


def tools(provider_cls: Type[Provider], model: str) -> Tuple[Message, Usage]:
provider = provider_cls.from_env()
system = "You are a helpful assistant. Expect to need to read a file using read_file."
messages = [Message.user("What are the contents of this file? test.txt")]
return provider.complete(model=model, system=system, messages=messages, tools=(Tool.from_function(read_file),))


def vision(provider_cls: Type[Provider], model: str) -> Tuple[Message, Usage]:
provider = provider_cls.from_env()
system = "You are a helpful assistant."
messages = [
Message.user("What does the first entry in the menu say?"),
Message(
role="assistant",
content=[ToolUse(id="xyz", name="screenshot", parameters={})],
),
Message(
role="user",
content=[ToolResult(tool_use_id="xyz", output='"image:tests/test_image.png"')],
),
]
return provider.complete(model=model, system=system, messages=messages, tools=None)
1 change: 0 additions & 1 deletion tests/providers/openai/__init__.py

This file was deleted.

52 changes: 0 additions & 52 deletions tests/providers/openai/conftest.py

This file was deleted.

Loading

0 comments on commit d93f551

Please sign in to comment.