Skip to content

Commit

Permalink
Format Python code with psf/black push
Browse files Browse the repository at this point in the history
  • Loading branch information
github-actions authored and github-actions committed Oct 29, 2023
1 parent 69247ab commit 9e5aa46
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 6 deletions.
7 changes: 6 additions & 1 deletion cogs/code_interpreter_service_cog.py
Original file line number Diff line number Diff line change
Expand Up @@ -486,7 +486,12 @@ async def code_interpreter_chat_command(

llm = ChatOpenAI(model=model, temperature=0, openai_api_key=OPENAI_API_KEY)

memory = ConversationSummaryBufferMemory(memory_key="memory", return_messages=True, llm=llm, max_token_limit=29000 if "gpt-4" in model else 7500 )
memory = ConversationSummaryBufferMemory(
memory_key="memory",
return_messages=True,
llm=llm,
max_token_limit=29000 if "gpt-4" in model else 7500,
)

agent_kwargs = {
"extra_prompt_messages": [MessagesPlaceholder(variable_name="memory")],
Expand Down
14 changes: 11 additions & 3 deletions cogs/search_service_cog.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,11 @@
AgentExecutor,
)
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory, CombinedMemory, ConversationSummaryBufferMemory
from langchain.memory import (
ConversationBufferMemory,
CombinedMemory,
ConversationSummaryBufferMemory,
)
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
Expand Down Expand Up @@ -501,8 +505,12 @@ async def search_chat_command(

llm = ChatOpenAI(model=model, temperature=0, openai_api_key=OPENAI_API_KEY)

memory = ConversationSummaryBufferMemory(memory_key="memory", return_messages=True, llm=llm,
max_token_limit=29000 if "gpt-4" in model else 7500)
memory = ConversationSummaryBufferMemory(
memory_key="memory",
return_messages=True,
llm=llm,
max_token_limit=29000 if "gpt-4" in model else 7500,
)

agent_kwargs = {
"extra_prompt_messages": [MessagesPlaceholder(variable_name="memory")],
Expand Down
8 changes: 6 additions & 2 deletions models/index_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,8 +333,12 @@ async def start_index_chat(self, ctx, search, user, model):
)
llm = ChatOpenAI(model=model, temperature=0)

memory = ConversationSummaryBufferMemory(memory_key="memory", return_messages=True, llm=llm,
max_token_limit=29000 if "gpt-4" in model else 7500)
memory = ConversationSummaryBufferMemory(
memory_key="memory",
return_messages=True,
llm=llm,
max_token_limit=29000 if "gpt-4" in model else 7500,
)

agent_chain = create_llama_chat_agent(toolkit, llm, memory=memory, verbose=True)

Expand Down

0 comments on commit 9e5aa46

Please sign in to comment.