diff --git a/pyproject.toml b/pyproject.toml index e9faaa8..7ef2295 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,7 +23,7 @@ requires = ["hatchling"] build-backend = "hatchling.build" [tool.uv] -dev-dependencies = ["pytest>=8.3.2", "codecov>=2.1.13"] +dev-dependencies = ["pytest>=8.3.2", "pytest-vcr>=1.0.2", "codecov>=2.1.13"] [project.entry-points."exchange.provider"] openai = "exchange.providers.openai:OpenAiProvider" diff --git a/tests/providers/openai/__init__.py b/tests/providers/openai/__init__.py new file mode 100644 index 0000000..8c5df76 --- /dev/null +++ b/tests/providers/openai/__init__.py @@ -0,0 +1 @@ +"""Tests that use the OpenAI API.""" diff --git a/tests/providers/openai/cassettes/test_ollama_completion.yaml b/tests/providers/openai/cassettes/test_ollama_completion.yaml new file mode 100644 index 0000000..88bc206 --- /dev/null +++ b/tests/providers/openai/cassettes/test_ollama_completion.yaml @@ -0,0 +1,68 @@ +interactions: +- request: + body: '' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + host: + - localhost:11434 + user-agent: + - python-httpx/0.27.2 + method: GET + uri: http://localhost:11434/ + response: + body: + string: Ollama is running + headers: + Content-Length: + - '17' + Content-Type: + - text/plain; charset=utf-8 + Date: + - Sun, 22 Sep 2024 23:40:13 GMT + Set-Cookie: test_set_cookie + openai-organization: test_openai_org_key + status: + code: 200 + message: OK +- request: + body: '{"messages": [{"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello"}], "model": "mistral-nemo"}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '140' + content-type: + - application/json + host: + - localhost:11434 + user-agent: + - python-httpx/0.27.2 + method: POST + uri: http://localhost:11434/v1/chat/completions + response: + body: + string: "{\"id\":\"chatcmpl-429\",\"object\":\"chat.completion\",\"created\":1727048416,\"model\":\"mistral-nemo\",\"system_fingerprint\":\"fp_ollama\",\"choices\":[{\"index\":0,\"message\":{\"role\":\"assistant\",\"content\":\"Hello! + I'm here to help. How can I assist you today? Let's chat. \U0001F60A\"},\"finish_reason\":\"stop\"}],\"usage\":{\"prompt_tokens\":10,\"completion_tokens\":23,\"total_tokens\":33}}\n" + headers: + Content-Length: + - '356' + Content-Type: + - application/json + Date: + - Sun, 22 Sep 2024 23:40:16 GMT + Set-Cookie: test_set_cookie + openai-organization: test_openai_org_key + status: + code: 200 + message: OK +version: 1 diff --git a/tests/providers/openai/cassettes/test_openai_completion.yaml b/tests/providers/openai/cassettes/test_openai_completion.yaml new file mode 100644 index 0000000..1a92eb3 --- /dev/null +++ b/tests/providers/openai/cassettes/test_openai_completion.yaml @@ -0,0 +1,80 @@ +interactions: +- request: + body: '{"messages": [{"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello"}], "model": "gpt-4o-mini"}' + headers: + accept: + - '*/*' + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '139' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - python-httpx/0.27.2 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-AAQTYi3DXJnltAfd5sUH1Wnzh69t3\",\n \"object\": + \"chat.completion\",\n \"created\": 1727048416,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Hello! How can I assist you today?\",\n + \ \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 18,\n \"completion_tokens\": + 9,\n \"total_tokens\": 27,\n \"completion_tokens_details\": {\n \"reasoning_tokens\": + 0\n }\n },\n \"system_fingerprint\": \"fp_1bb46167f9\"\n}\n" + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8c762399feb55739-SYD + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Sun, 22 Sep 2024 23:40:17 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + content-length: + - '593' + openai-organization: test_openai_org_key + openai-processing-ms: + - '560' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=15552000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '200000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '199973' + x-ratelimit-reset-requests: + - 8.64s + x-ratelimit-reset-tokens: + - 8ms + x-request-id: + - req_22e26c840219cde3152eaba1ce89483b + status: + code: 200 + message: OK +version: 1 diff --git a/tests/providers/openai/conftest.py b/tests/providers/openai/conftest.py new file mode 100644 index 0000000..e282c87 --- /dev/null +++ b/tests/providers/openai/conftest.py @@ -0,0 +1,52 @@ +import os +import pytest + +OPENAI_MODEL = "gpt-4o-mini" +OPENAI_API_KEY = "test_openai_api_key" +OPENAI_ORG_ID = "test_openai_org_key" +OPENAI_PROJECT_ID = "test_openai_project_id" + + +@pytest.fixture +def default_openai_api_key(monkeypatch): + """ + This fixture avoids the error OpenAiProvider.from_env() raises when the + OPENAI_API_KEY is not set in the environment. + + When running VCR tests for the first time or after deleting a cassette + recording, a real OPENAI_API_KEY must be passed as an environment variable, + so real responses can be fetched. Subsequent runs use the recorded data, so + don't need a real key. + """ + if "OPENAI_API_KEY" not in os.environ: + monkeypatch.setenv("OPENAI_API_KEY", OPENAI_API_KEY) + + +@pytest.fixture(scope="module") +def vcr_config(): + """ + This scrubs sensitive data and gunzips bodies when in recording mode. + + Without this, you would leak cookies and auth tokens in the cassettes. + Also, depending on the request, some responses would be binary encoded + while others plain json. This ensures all bodies are human-readable. + """ + return { + "decode_compressed_response": True, + "filter_headers": [ + ("authorization", "Bearer " + OPENAI_API_KEY), + ("openai-organization", OPENAI_ORG_ID), + ("openai-project", OPENAI_PROJECT_ID), + ("cookie", None), + ], + "before_record_response": scrub_response_headers, + } + + +def scrub_response_headers(response): + """ + This scrubs sensitive response headers. Note they are case-sensitive! + """ + response["headers"]["openai-organization"] = OPENAI_ORG_ID + response["headers"]["Set-Cookie"] = "test_set_cookie" + return response diff --git a/tests/providers/openai/test_ollama.py b/tests/providers/openai/test_ollama.py new file mode 100644 index 0000000..a432d0c --- /dev/null +++ b/tests/providers/openai/test_ollama.py @@ -0,0 +1,32 @@ +from typing import Tuple + +import pytest + +from exchange import Text +from exchange.message import Message +from exchange.providers.base import Usage +from exchange.providers.ollama import OllamaProvider, OLLAMA_MODEL + + +@pytest.mark.vcr() +def test_ollama_completion(default_openai_api_key): + reply_message, reply_usage = ollama_complete() + + assert reply_message.content == [Text(text="Hello! I'm here to help. How can I assist you today? Let's chat. 😊")] + assert reply_usage.total_tokens == 33 + + +@pytest.mark.integration +def test_ollama_completion_integration(): + reply = ollama_complete() + + assert reply[0].content is not None + print("Completion content from OpenAI:", reply[0].content) + + +def ollama_complete() -> Tuple[Message, Usage]: + provider = OllamaProvider.from_env() + model = OLLAMA_MODEL + system = "You are a helpful assistant." + messages = [Message.user("Hello")] + return provider.complete(model=model, system=system, messages=messages, tools=None) diff --git a/tests/providers/openai/test_openai.py b/tests/providers/openai/test_openai.py new file mode 100644 index 0000000..354d1c5 --- /dev/null +++ b/tests/providers/openai/test_openai.py @@ -0,0 +1,39 @@ +from typing import Tuple + +import os +import pytest + +from exchange import Text +from exchange.message import Message +from exchange.providers.base import Usage +from exchange.providers.openai import OpenAiProvider +from .conftest import OPENAI_MODEL, OPENAI_API_KEY + + +@pytest.mark.vcr() +def test_openai_completion(monkeypatch): + # When running VCR tests the first time, it needs OPENAI_API_KEY to call + # the real service. Afterward, it is not needed as VCR mocks the service. + if "OPENAI_API_KEY" not in os.environ: + monkeypatch.setenv("OPENAI_API_KEY", OPENAI_API_KEY) + + reply_message, reply_usage = openai_complete() + + assert reply_message.content == [Text(text="Hello! How can I assist you today?")] + assert reply_usage.total_tokens == 27 + + +@pytest.mark.integration +def test_openai_completion_integration(): + reply = openai_complete() + + assert reply[0].content is not None + print("Completion content from OpenAI:", reply[0].content) + + +def openai_complete() -> Tuple[Message, Usage]: + provider = OpenAiProvider.from_env() + model = OPENAI_MODEL + system = "You are a helpful assistant." + messages = [Message.user("Hello")] + return provider.complete(model=model, system=system, messages=messages, tools=None) diff --git a/tests/providers/test_ollama.py b/tests/providers/test_ollama.py deleted file mode 100644 index 7812fe6..0000000 --- a/tests/providers/test_ollama.py +++ /dev/null @@ -1,16 +0,0 @@ -import pytest -from exchange import Message -from exchange.providers.ollama import OllamaProvider, OLLAMA_MODEL - - -@pytest.mark.integration -def test_ollama_integration(): - provider = OllamaProvider.from_env() - model = OLLAMA_MODEL - system = "You are a helpful assistant." - messages = [Message.user("Hello")] - - reply = provider.complete(model=model, system=system, messages=messages, tools=None) - - assert reply[0].content is not None - print("Completion content from Ollama:", reply[0].content) diff --git a/tests/providers/test_openai.py b/tests/providers/test_openai.py deleted file mode 100644 index 0e0e000..0000000 --- a/tests/providers/test_openai.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -from unittest.mock import patch - -import pytest -from exchange import Message, Text -from exchange.providers.openai import OpenAiProvider - - -@pytest.fixture -@patch.dict(os.environ, {"OPENAI_API_KEY": "test_api_key"}) -def openai_provider(): - return OpenAiProvider.from_env() - - -@patch("httpx.Client.post") -@patch("time.sleep", return_value=None) -@patch("logging.warning") -@patch("logging.error") -def test_openai_completion(mock_error, mock_warning, mock_sleep, mock_post, openai_provider): - mock_response = { - "choices": [{"message": {"role": "assistant", "content": "Hello!"}}], - "usage": {"prompt_tokens": 10, "completion_tokens": 25, "total_tokens": 35}, - } - - mock_post.return_value.json.return_value = mock_response - - model = "gpt-4" - system = "You are a helpful assistant." - messages = [Message.user("Hello")] - tools = () - - reply_message, reply_usage = openai_provider.complete(model=model, system=system, messages=messages, tools=tools) - - assert reply_message.content == [Text(text="Hello!")] - assert reply_usage.total_tokens == 35 - mock_post.assert_called_once_with( - "v1/chat/completions", - json={ - "messages": [ - {"role": "system", "content": system}, - {"role": "user", "content": "Hello"}, - ], - "model": model, - }, - ) - - -@pytest.mark.integration -def test_openai_integration(): - provider = OpenAiProvider.from_env() - model = "gpt-4" # specify a valid model - system = "You are a helpful assistant." - messages = [Message.user("Hello")] - - reply = provider.complete(model=model, system=system, messages=messages, tools=None) - - assert reply[0].content is not None - print("Completion content from OpenAI:", reply[0].content)