Skip to content

Commit

Permalink
test: reduce code redundancy in openai based tests
Browse files Browse the repository at this point in the history
Signed-off-by: Adrian Cole <[email protected]>
  • Loading branch information
codefromthecrypt committed Sep 25, 2024
1 parent 5b34bc5 commit ca489fc
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 43 deletions.
17 changes: 16 additions & 1 deletion tests/providers/openai/conftest.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,19 @@
import os
from typing import Type, Tuple

import pytest

OPENAI_MODEL = "gpt-4o-mini"
from exchange import Message
from exchange.providers import Usage, Provider
from exchange.providers.ollama import OLLAMA_MODEL

OPENAI_API_KEY = "test_openai_api_key"
OPENAI_ORG_ID = "test_openai_org_key"
OPENAI_PROJECT_ID = "test_openai_project_id"

OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", OLLAMA_MODEL)
OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini")


@pytest.fixture
def default_openai_api_key(monkeypatch):
Expand Down Expand Up @@ -50,3 +58,10 @@ def scrub_response_headers(response):
response["headers"]["openai-organization"] = OPENAI_ORG_ID
response["headers"]["Set-Cookie"] = "test_set_cookie"
return response


def complete(provider_cls: Type[Provider], model: str) -> Tuple[Message, Usage]:
provider = provider_cls.from_env()
system = "You are a helpful assistant."
messages = [Message.user("Hello")]
return provider.complete(model=model, system=system, messages=messages, tools=None)
24 changes: 6 additions & 18 deletions tests/providers/openai/test_ollama.py
Original file line number Diff line number Diff line change
@@ -1,33 +1,21 @@
from typing import Tuple

import os
import pytest

from exchange import Text
from exchange.message import Message
from exchange.providers.base import Usage
from exchange.providers.ollama import OllamaProvider, OLLAMA_MODEL
from exchange.providers.ollama import OllamaProvider
from .conftest import complete, OLLAMA_MODEL


@pytest.mark.vcr()
def test_ollama_completion(default_openai_api_key):
reply_message, reply_usage = ollama_complete()
def test_ollama_complete(default_openai_api_key):
reply_message, reply_usage = complete(OllamaProvider, OLLAMA_MODEL)

assert reply_message.content == [Text(text="Hello! I'm here to help. How can I assist you today? Let's chat. 😊")]
assert reply_usage.total_tokens == 33


@pytest.mark.integration
def test_ollama_completion_integration():
reply = ollama_complete()
def test_ollama_complete_integration():
reply = complete(OllamaProvider, OLLAMA_MODEL)

assert reply[0].content is not None
print("Completion content from OpenAI:", reply[0].content)


def ollama_complete() -> Tuple[Message, Usage]:
provider = OllamaProvider.from_env()
model = os.getenv("OLLAMA_MODEL", OLLAMA_MODEL)
system = "You are a helpful assistant."
messages = [Message.user("Hello")]
return provider.complete(model=model, system=system, messages=messages, tools=None)
30 changes: 6 additions & 24 deletions tests/providers/openai/test_openai.py
Original file line number Diff line number Diff line change
@@ -1,39 +1,21 @@
from typing import Tuple

import os
import pytest

from exchange import Text
from exchange.message import Message
from exchange.providers.base import Usage
from exchange.providers.openai import OpenAiProvider
from .conftest import OPENAI_MODEL, OPENAI_API_KEY
from .conftest import complete, OPENAI_MODEL


@pytest.mark.vcr()
def test_openai_completion(monkeypatch):
# When running VCR tests the first time, it needs OPENAI_API_KEY to call
# the real service. Afterward, it is not needed as VCR mocks the service.
if "OPENAI_API_KEY" not in os.environ:
monkeypatch.setenv("OPENAI_API_KEY", OPENAI_API_KEY)

reply_message, reply_usage = openai_complete()
def test_openai_complete(default_openai_api_key):
reply_message, reply_usage = complete(OpenAiProvider, OPENAI_MODEL)

assert reply_message.content == [Text(text="Hello! How can I assist you today?")]
assert reply_usage.total_tokens == 27


@pytest.mark.integration
def test_openai_completion_integration():
reply = openai_complete()
def test_openai_complete_integration():
reply = complete(OpenAiProvider, OPENAI_MODEL)

assert reply[0].content is not None
print("Completion content from OpenAI:", reply[0].content)


def openai_complete() -> Tuple[Message, Usage]:
provider = OpenAiProvider.from_env()
model = OPENAI_MODEL
system = "You are a helpful assistant."
messages = [Message.user("Hello")]
return provider.complete(model=model, system=system, messages=messages, tools=None)
print("Complete content from OpenAI:", reply[0].content)

0 comments on commit ca489fc

Please sign in to comment.