Skip to content

Commit

Permalink
Merge pull request #263 from andreped/openai-fix
Browse files Browse the repository at this point in the history
Adds support for setting max_tokens and temperature in OpenAI Chat
  • Loading branch information
zainhoda authored Feb 26, 2024
2 parents fb384d4 + f6e52d2 commit bb0b4cc
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 21 deletions.
44 changes: 25 additions & 19 deletions src/vanna/openai/openai_chat.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
import os
import re
from abc import abstractmethod

import pandas as pd
from openai import OpenAI

from ..base import VannaBase
Expand All @@ -20,6 +17,16 @@ def __init__(self, client=None, config=None):
self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
return

# default parameters - can be overrided using config
self.temperature = 0.7
self.max_tokens = 500

if "temperature" in config:
self.temperature = config["temperature"]

if "max_tokens" in config:
self.max_tokens = config["max_tokens"]

if "api_type" in config:
raise Exception(
"Passing api_type is now deprecated. Please pass an OpenAI client instead."
Expand Down Expand Up @@ -55,11 +62,10 @@ def submit_prompt(self, prompt, **kwargs) -> str:
raise Exception("Prompt is empty")

# Count the number of tokens in the message log
# Use 4 as an approximation for the number of characters per token
num_tokens = 0
for message in prompt:
num_tokens += (
len(message["content"]) / 4
) # Use 4 as an approximation for the number of characters per token
num_tokens += len(message["content"]) / 4

if self.config is not None and "engine" in self.config:
print(
Expand All @@ -68,9 +74,9 @@ def submit_prompt(self, prompt, **kwargs) -> str:
response = self.client.chat.completions.create(
engine=self.config["engine"],
messages=prompt,
max_tokens=500,
max_tokens=self.max_tokens,
stop=None,
temperature=0.7,
temperature=self.temperature,
)
elif self.config is not None and "model" in self.config:
print(
Expand All @@ -79,9 +85,9 @@ def submit_prompt(self, prompt, **kwargs) -> str:
response = self.client.chat.completions.create(
model=self.config["model"],
messages=prompt,
max_tokens=500,
max_tokens=self.max_tokens,
stop=None,
temperature=0.7,
temperature=self.temperature,
)
else:
if num_tokens > 3500:
Expand All @@ -91,17 +97,17 @@ def submit_prompt(self, prompt, **kwargs) -> str:

print(f"Using model {model} for {num_tokens} tokens (approx)")
response = self.client.chat.completions.create(
model=model, messages=prompt, max_tokens=500, stop=None, temperature=0.7
model=model,
messages=prompt,
max_tokens=self.max_tokens,
stop=None,
temperature=self.temperature,
)

for (
choice
) in (
response.choices
): # Find the first response from the chatbot that has text in it (some responses may not have text)
# Find the first response from the chatbot that has text in it (some responses may not have text)
for choice in response.choices:
if "text" in choice:
return choice.text

return response.choices[
0
].message.content # If no response with text is found, return the first response's content (which may be empty)
# If no response with text is found, return the first response's content (which may be empty)
return response.choices[0].message.content
2 changes: 0 additions & 2 deletions src/vanna/openai/openai_embeddings.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from abc import abstractmethod

from openai import OpenAI

from ..base import VannaBase
Expand Down

0 comments on commit bb0b4cc

Please sign in to comment.