From ca489fcb5a2992b8a45e3efaae241df9b2768bf5 Mon Sep 17 00:00:00 2001 From: Adrian Cole Date: Wed, 25 Sep 2024 09:18:35 +1000 Subject: [PATCH] test: reduce code redundancy in openai based tests Signed-off-by: Adrian Cole --- ...pletion.yaml => test_ollama_complete.yaml} | 0 ...pletion.yaml => test_openai_complete.yaml} | 0 tests/providers/openai/conftest.py | 17 ++++++++++- tests/providers/openai/test_ollama.py | 24 ++++----------- tests/providers/openai/test_openai.py | 30 ++++--------------- 5 files changed, 28 insertions(+), 43 deletions(-) rename tests/providers/openai/cassettes/{test_ollama_completion.yaml => test_ollama_complete.yaml} (100%) rename tests/providers/openai/cassettes/{test_openai_completion.yaml => test_openai_complete.yaml} (100%) diff --git a/tests/providers/openai/cassettes/test_ollama_completion.yaml b/tests/providers/openai/cassettes/test_ollama_complete.yaml similarity index 100% rename from tests/providers/openai/cassettes/test_ollama_completion.yaml rename to tests/providers/openai/cassettes/test_ollama_complete.yaml diff --git a/tests/providers/openai/cassettes/test_openai_completion.yaml b/tests/providers/openai/cassettes/test_openai_complete.yaml similarity index 100% rename from tests/providers/openai/cassettes/test_openai_completion.yaml rename to tests/providers/openai/cassettes/test_openai_complete.yaml diff --git a/tests/providers/openai/conftest.py b/tests/providers/openai/conftest.py index e282c87..c752e73 100644 --- a/tests/providers/openai/conftest.py +++ b/tests/providers/openai/conftest.py @@ -1,11 +1,19 @@ import os +from typing import Type, Tuple + import pytest -OPENAI_MODEL = "gpt-4o-mini" +from exchange import Message +from exchange.providers import Usage, Provider +from exchange.providers.ollama import OLLAMA_MODEL + OPENAI_API_KEY = "test_openai_api_key" OPENAI_ORG_ID = "test_openai_org_key" OPENAI_PROJECT_ID = "test_openai_project_id" +OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", OLLAMA_MODEL) +OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini") + @pytest.fixture def default_openai_api_key(monkeypatch): @@ -50,3 +58,10 @@ def scrub_response_headers(response): response["headers"]["openai-organization"] = OPENAI_ORG_ID response["headers"]["Set-Cookie"] = "test_set_cookie" return response + + +def complete(provider_cls: Type[Provider], model: str) -> Tuple[Message, Usage]: + provider = provider_cls.from_env() + system = "You are a helpful assistant." + messages = [Message.user("Hello")] + return provider.complete(model=model, system=system, messages=messages, tools=None) diff --git a/tests/providers/openai/test_ollama.py b/tests/providers/openai/test_ollama.py index 0aa4646..d8a02ad 100644 --- a/tests/providers/openai/test_ollama.py +++ b/tests/providers/openai/test_ollama.py @@ -1,33 +1,21 @@ -from typing import Tuple - -import os import pytest from exchange import Text -from exchange.message import Message -from exchange.providers.base import Usage -from exchange.providers.ollama import OllamaProvider, OLLAMA_MODEL +from exchange.providers.ollama import OllamaProvider +from .conftest import complete, OLLAMA_MODEL @pytest.mark.vcr() -def test_ollama_completion(default_openai_api_key): - reply_message, reply_usage = ollama_complete() +def test_ollama_complete(default_openai_api_key): + reply_message, reply_usage = complete(OllamaProvider, OLLAMA_MODEL) assert reply_message.content == [Text(text="Hello! I'm here to help. How can I assist you today? Let's chat. 😊")] assert reply_usage.total_tokens == 33 @pytest.mark.integration -def test_ollama_completion_integration(): - reply = ollama_complete() +def test_ollama_complete_integration(): + reply = complete(OllamaProvider, OLLAMA_MODEL) assert reply[0].content is not None print("Completion content from OpenAI:", reply[0].content) - - -def ollama_complete() -> Tuple[Message, Usage]: - provider = OllamaProvider.from_env() - model = os.getenv("OLLAMA_MODEL", OLLAMA_MODEL) - system = "You are a helpful assistant." - messages = [Message.user("Hello")] - return provider.complete(model=model, system=system, messages=messages, tools=None) diff --git a/tests/providers/openai/test_openai.py b/tests/providers/openai/test_openai.py index 354d1c5..5f72981 100644 --- a/tests/providers/openai/test_openai.py +++ b/tests/providers/openai/test_openai.py @@ -1,39 +1,21 @@ -from typing import Tuple - -import os import pytest from exchange import Text -from exchange.message import Message -from exchange.providers.base import Usage from exchange.providers.openai import OpenAiProvider -from .conftest import OPENAI_MODEL, OPENAI_API_KEY +from .conftest import complete, OPENAI_MODEL @pytest.mark.vcr() -def test_openai_completion(monkeypatch): - # When running VCR tests the first time, it needs OPENAI_API_KEY to call - # the real service. Afterward, it is not needed as VCR mocks the service. - if "OPENAI_API_KEY" not in os.environ: - monkeypatch.setenv("OPENAI_API_KEY", OPENAI_API_KEY) - - reply_message, reply_usage = openai_complete() +def test_openai_complete(default_openai_api_key): + reply_message, reply_usage = complete(OpenAiProvider, OPENAI_MODEL) assert reply_message.content == [Text(text="Hello! How can I assist you today?")] assert reply_usage.total_tokens == 27 @pytest.mark.integration -def test_openai_completion_integration(): - reply = openai_complete() +def test_openai_complete_integration(): + reply = complete(OpenAiProvider, OPENAI_MODEL) assert reply[0].content is not None - print("Completion content from OpenAI:", reply[0].content) - - -def openai_complete() -> Tuple[Message, Usage]: - provider = OpenAiProvider.from_env() - model = OPENAI_MODEL - system = "You are a helpful assistant." - messages = [Message.user("Hello")] - return provider.complete(model=model, system=system, messages=messages, tools=None) + print("Complete content from OpenAI:", reply[0].content)