From 58b4aff7b54646658e9d495e53c1ae879e10601e Mon Sep 17 00:00:00 2001 From: J S <49557684+svilupp@users.noreply.github.com> Date: Sun, 24 Dec 2023 11:00:35 +0100 Subject: [PATCH 1/3] enable conversations --- CHANGELOG.md | 1 + README.md | 9 +++- src/PromptingTools.jl | 2 +- src/macros.jl | 100 ++++++++++++++++++++++++++++++++++++++-- src/precompilation.jl | 5 ++ src/user_preferences.jl | 30 +++++++++++- src/utils.jl | 74 ++++++++++++++++++++++++++++- test/macros.jl | 89 +++++++++++++++++++++++++++++++++++ test/runtests.jl | 1 + test/utils.jl | 30 ++++++++++++ 10 files changed, 332 insertions(+), 9 deletions(-) create mode 100644 test/macros.jl diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e2bcef98..930cf6784 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] ### Added +- `@ai_str` macros now support multi-turn conversations. The `ai"something"` call will automatically remember the last conversation, so you can simply reply with `ai!"my-reply"`. If you send another message with `ai""`, you'll start a new conversation. Same for the asynchronous versions `aai""` and `aai!""`. ### Fixed - Removed template `RAG/CreateQAFromContext` because it's a duplicate of `RAG/RAGCreateQAFromContext` diff --git a/README.md b/README.md index 532e260ce..cbb3a33d1 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,12 @@ ai"What is the capital of France?" The returned object is a light wrapper with a generated message in the field `:content` (eg, `ans.content`) for additional downstream processing. +> [!TIP] +> If you want to reply to the previous message, or simply continue the conversation, use `@ai!_str` (notice the bang `!`): +> ```julia +> ai!"And what is the population of it?" +> ``` + You can easily inject any variables with string interpolation: ```julia country = "Spain" @@ -46,6 +52,7 @@ ai"What is the capital of \$(country)?" > [!TIP] > Use after-string-flags to select the model to be called, eg, `ai"What is the capital of France?"gpt4` (use `gpt4t` for the new GPT-4 Turbo model). Great for those extra hard questions! + For more complex prompt templates, you can use handlebars-style templating and provide variables as keyword arguments: ```julia @@ -58,7 +65,7 @@ msg = aigenerate("What is the capital of {{country}}? Is the population larger t > Use `asyncmap` to run multiple AI-powered tasks concurrently. > [!TIP] -> If you use slow models (like GPT-4), you can use async version of `@ai_str` -> `@aai_str` to avoid blocking the REPL, eg, `aai"Say hi but slowly!"gpt4` +> If you use slow models (like GPT-4), you can use async version of `@ai_str` -> `@aai_str` to avoid blocking the REPL, eg, `aai"Say hi but slowly!"gpt4` (similarly `@ai!_str` -> `@aai!_str` for multi-turn conversations). For more practical examples, see the `examples/` folder and the [Advanced Examples](#advanced-examples) section below. diff --git a/src/PromptingTools.jl b/src/PromptingTools.jl index a137bd866..80c792e63 100644 --- a/src/PromptingTools.jl +++ b/src/PromptingTools.jl @@ -62,7 +62,7 @@ include("llm_openai.jl") include("llm_ollama_managed.jl") ## Convenience utils -export @ai_str, @aai_str +export @ai_str, @aai_str, @ai!_str, @aai!_str include("macros.jl") ## Experimental modules diff --git a/src/macros.jl b/src/macros.jl index c3b6e1ea5..a2fe4cefa 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -3,6 +3,8 @@ The `ai""` string macro generates an AI response to a given prompt by using `aigenerate` under the hood. +See also `ai!""` if you want to reply to the provided message / continue the conversation. + ## Arguments - `user_prompt` (String): The input prompt for the AI model. - `model_alias` (optional, any): Provide model alias of the AI model (see `MODEL_ALIASES`). @@ -25,15 +27,72 @@ result = ai"What is `\$a+\$a`?" If you want to use a different model, eg, GPT-4, you can provide its alias as a flag: ```julia -result = ai"What is `1.23 * 100 + 1`?"gpt4 +result = ai"What is `1.23 * 100 + 1`?"gpt4t # AIMessage("The answer is 124.") ``` """ macro ai_str(user_prompt, flags...) + global CONV_HISTORY, MAX_HISTORY_LENGTH + model = isempty(flags) ? MODEL_CHAT : only(flags) + prompt = Meta.parse("\"$(escape_string(user_prompt))\"") + quote + conv = aigenerate($(esc(prompt)); model = $(esc(model)), return_all = true) + push_conversation!($(esc(CONV_HISTORY)), conv, $(esc(MAX_HISTORY_LENGTH))) + last(conv) + end +end + +""" + ai!"user_prompt"[model_alias] -> AIMessage + +The `ai!""` string macro is used to continue a previous conversation with the AI model. + +It appends the new user prompt to the last conversation in the tracked history (in `PromptingTools.CONV_HISTORY`) and generates a response based on the entire conversation context. +If you want to see the previous conversation, you can access it via `PromptingTools.CONV_HISTORY`, which keeps at most last `PromptingTools.MAX_HISTORY_LENGTH` conversations. + +## Arguments +- `user_prompt` (String): The new input prompt to be added to the existing conversation. +- `model_alias` (optional, any): Specify the model alias of the AI model to be used (see `MODEL_ALIASES`). If not provided, the default model is used. + +## Returns +`AIMessage` corresponding to the new user prompt, considering the entire conversation history. + +## Example +To continue a conversation: +```julia +# start conversation as normal +ai"Say hi." + +# ... wait for reply and then react to it: + +# continue the conversation (notice that you can change the model, eg, to more powerful one for better answer) +ai!"What do you think about that?"gpt4t +# AIMessage("Considering our previous discussion, I think that...") +``` + +## Usage Notes +- This macro should be used when you want to maintain the context of an ongoing conversation (ie, the last `ai""` message). +- It automatically accesses and updates the global conversation history. +- If no conversation history is found, it raises an assertion error, suggesting to initiate a new conversation using `ai""` instead. + +## Important +Ensure that the conversation history is not too long to maintain relevancy and coherence in the AI's responses. The history length is managed by `MAX_HISTORY_LENGTH`. +""" +macro ai!_str(user_prompt, flags...) + global CONV_HISTORY model = isempty(flags) ? MODEL_CHAT : only(flags) prompt = Meta.parse("\"$(escape_string(user_prompt))\"") quote - aigenerate($(esc(prompt)); model = $(esc(model))) + @assert !isempty($(esc(CONV_HISTORY))) "No conversation history found. Please use `ai\"\"` instead." + # grab the last conversation + old_conv = $(esc(CONV_HISTORY))[end] + conv = aigenerate(vcat(old_conv, [UserMessage($(esc(prompt)))]); + model = $(esc(model)), + return_all = true) + # replace the last conversation with the new one + $(esc(CONV_HISTORY))[end] = conv + # + last(conv) end end @@ -42,6 +101,8 @@ end Asynchronous version of `@ai_str` macro, which will log the result once it's ready. +See also `aai!""` if you want an asynchronous reply to the provided message / continue the conversation. + # Example Send asynchronous request to GPT-4, so we don't have to wait for the response: @@ -54,13 +115,42 @@ m = aai"Say Hi!"gpt4; # [ Info: AIMessage> Hello! How can I assist you today? """ macro aai_str(user_prompt, flags...) + global CONV_HISTORY, MAX_HISTORY_LENGTH, CONV_HISTORY_LOCK model = isempty(flags) ? MODEL_CHAT : only(flags) prompt = Meta.parse("\"$(escape_string(user_prompt))\"") quote Threads.@spawn begin - m = aigenerate($(esc(prompt)); model = $(esc(model))) - @info "AIMessage> $(m.content)" # display the result once it's ready - m + conv = aigenerate($(esc(prompt)); model = $(esc(model)), return_all = true) + lock($(esc(CONV_HISTORY_LOCK))) do + push_conversation!($(esc(CONV_HISTORY)), conv, $(esc(MAX_HISTORY_LENGTH))) + end + @info "AIMessage> $(last(conv).content)" # display the result once it's ready + last(conv) + end + end +end + +macro aai!_str(user_prompt, flags...) + global CONV_HISTORY, CONV_HISTORY_LOCK + model = isempty(flags) ? MODEL_CHAT : only(flags) + prompt = Meta.parse("\"$(escape_string(user_prompt))\"") + quote + @assert !isempty($(esc(CONV_HISTORY))) "No conversation history found. Please use `aai\"\"` instead." + Threads.@spawn begin + # grab the last conversation + old_conv = $(esc(CONV_HISTORY))[end] + + # send to AI + conv = aigenerate(vcat(old_conv, [UserMessage($(esc(prompt)))]); + model = $(esc(model)), + return_all = true) + + # replace the last conversation with the new one + lock($(esc(CONV_HISTORY_LOCK))) do + $(esc(CONV_HISTORY))[end] = conv + end + @info "AIMessage> $(last(conv).content)" # display the result once it's ready + last(conv) end end end diff --git a/src/precompilation.jl b/src/precompilation.jl index 0a540344a..a1d823cc6 100644 --- a/src/precompilation.jl +++ b/src/precompilation.jl @@ -24,6 +24,11 @@ msg = aiextract(schema, "I want to ask {{it}}"; it = "Is this correct?", return_ image_url = "some_mock_url" msg = aiscan(schema, "Describe the image"; image_url) +# macro calls +ai"Hello"echo +ai!"Hello again"echo +empty!(CONV_HISTORY) + # Use of Templates template_name = :JudgeIsItTrue msg = aigenerate(schema, template_name; it = "Is this correct?") diff --git a/src/user_preferences.jl b/src/user_preferences.jl index 2c33bf563..71456c4a6 100644 --- a/src/user_preferences.jl +++ b/src/user_preferences.jl @@ -19,6 +19,8 @@ Check your preferences by calling `get_preferences(key::String)`. See `PROMPT_SCHEMA` for more information. - `MODEL_ALIASES`: A dictionary of model aliases (`alias => full_model_name`). Aliases are used to refer to models by their aliases instead of their full names to make it more convenient to use them. See `MODEL_ALIASES` for more information. +- `MAX_HISTORY_LENGTH`: The maximum length of the conversation history. Defaults to 5. Set to `nothing` to disable history. + See `CONV_HISTORY` for more information. At the moment it is not possible to persist changes to `MODEL_REGISTRY` across sessions. Define your `register_model!()` calls in your `startup.jl` file to make them available across sessions or put them at the top of your script. @@ -55,6 +57,7 @@ function set_preferences!(pairs::Pair{String, <:Any}...) "MODEL_EMBEDDING", "MODEL_ALIASES", "PROMPT_SCHEMA", + "MAX_HISTORY_LENGTH", ] for (key, value) in pairs @assert key in allowed_preferences "Unknown preference '$key'! (Allowed preferences: $(join(allowed_preferences,", "))" @@ -110,6 +113,22 @@ isempty(OPENAI_API_KEY) && const MISTRALAI_API_KEY::String = @load_preference("MISTRALAI_API_KEY", default=get(ENV, "MISTRALAI_API_KEY", "")); +## CONVERSATION HISTORY +""" + CONV_HISTORY + +Tracks the most recent conversations through the `ai_str macros`. + +Preference available: MAX_HISTORY_LENGTH, which sets how many last messages should be remembered. + +See also: `push_conversation!`, `resize_conversation!` + +""" +const CONV_HISTORY = Vector{Vector{<:Any}}() +const CONV_HISTORY_LOCK = ReentrantLock() +const MAX_HISTORY_LENGTH = @load_preference("MAX_HISTORY_LENGTH", + default=5)::Union{Int, Nothing} + ## Model registry # A dictionary of model names and their specs (ie, name, costs per token, etc.) # Model specs are saved in ModelSpec struct (see below) @@ -288,7 +307,16 @@ registry = Dict{String, ModelSpec}("gpt-3.5-turbo" => ModelSpec("gpt-3.5-turbo", MistralOpenAISchema(), 1.08e-7, 0.0, - "Mistral AI's hosted model for embeddings.")) + "Mistral AI's hosted model for embeddings."), + "echo" => ModelSpec("echo", + TestEchoOpenAISchema(; + response = Dict(:choices => [Dict(:message => Dict(:content => "Hello!"))], + :usage => Dict(:total_tokens => 3, + :prompt_tokens => 2, + :completion_tokens => 1)), status = 200), + 0.0, + 0.0, + "Echo is only for testing. It always responds with 'Hello!'")) ### Model Registry Structure @kwdef mutable struct ModelRegistry diff --git a/src/utils.jl b/src/utils.jl index a0f6b996b..4592bd6d6 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -251,4 +251,76 @@ _encode_local_image(::Nothing) = String[] # Used for image_url in aiscan to provided consistent output type _string_to_vector(s::AbstractString) = [s] -_string_to_vector(v::Vector{<:AbstractString}) = v \ No newline at end of file +_string_to_vector(v::Vector{<:AbstractString}) = v + +### Conversation Management + +""" + push_conversation!(conv_history, conversation::AbstractVector, max_history::Union{Int, Nothing}) + +Add a new conversation to the conversation history and resize the history if necessary. + +This function appends a conversation to the `conv_history`, which is a vector of conversations. Each conversation is represented as a vector of `AbstractMessage` objects. After adding the new conversation, the history is resized according to the `max_history` parameter to ensure that the size of the history does not exceed the specified limit. + +## Arguments +- `conv_history`: A vector that stores the history of conversations. Typically, this is `PT.CONV_HISTORY`. +- `conversation`: The new conversation to be added. It should be a vector of `AbstractMessage` objects. +- `max_history`: The maximum number of conversations to retain in the history. If `Nothing`, the history is not resized. + +## Returns +The updated conversation history. + +## Example +```julia +new_conversation = aigenerate("Hello World"; return_all = true) +push_conversation!(PT.CONV_HISTORY, new_conversation, 10) +``` + +This is done automatically by the ai"" macros. +""" +function push_conversation!(conv_history::Vector{<:Vector{<:Any}}, + conversation::AbstractVector, + max_history::Union{Int, Nothing}) + if isnothing(max_history) + return + end + push!(conv_history, conversation) + resize_conversation!(conv_history, max_history) + return conv_history +end + +""" + resize_conversation!(conv_history, max_history::Union{Int, Nothing}) + +Resize the conversation history to a specified maximum length. + +This function trims the `conv_history` to ensure that its size does not exceed `max_history`. It removes the oldest conversations first if the length of `conv_history` is greater than `max_history`. + +## Arguments +- `conv_history`: A vector that stores the history of conversations. Typically, this is `PT.CONV_HISTORY`. +- `max_history`: The maximum number of conversations to retain in the history. If `Nothing`, the history is not resized. + +## Returns +The resized conversation history. + +## Example +```julia +resize_conversation!(PT.CONV_HISTORY, PT.MAX_HISTORY_LENGTH) +``` + +After the function call, `conv_history` will contain only the 10 most recent conversations. + +This is done automatically by the ai"" macros. + +""" +function resize_conversation!(conv_history, + max_history::Union{Int, Nothing}) + if isnothing(max_history) + return + end + + while length(conv_history) > max_history + popfirst!(conv_history) + end + return conv_history +end \ No newline at end of file diff --git a/test/macros.jl b/test/macros.jl new file mode 100644 index 000000000..aa3addaed --- /dev/null +++ b/test/macros.jl @@ -0,0 +1,89 @@ +using PromptingTools: @ai_str, @aai_str, @ai!_str, @aai!_str +using PromptingTools: TestEchoOpenAISchema, push_conversation!, CONV_HISTORY, UserMessage + +# Develop the test for all ai"" macros... +# eg, ai"Hello echo"echo0 will send it to our echo model + +# Global variables for conversation history and max length for testing purposes + +@testset "ai_str,ai!_str" begin + ## Setup echo + # corresponds to OpenAI API v1 + response = Dict(:choices => [Dict(:message => Dict(:content => "Hello!"))], + :usage => Dict(:total_tokens => 3, :prompt_tokens => 2, :completion_tokens => 1)) + PT.register_model!(; + name = "echo0", + schema = TestEchoOpenAISchema(; response, status = 200)) + + # Test generation of AI response using the basic macro with no model alias (default model) + response = ai"Hello, how are you?"echo0 # simple call using the default model + @test response.content == "Hello!" + schema_ref = PT.MODEL_REGISTRY["echo0"].schema + @test schema_ref.inputs == + [Dict("role" => "system", "content" => "Act as a helpful AI assistant") + Dict("role" => "user", "content" => "Hello, how are you?")] + + # Test the macro with string interpolation + a = 1 + response = ai"What is `$a+$a`?"echo0 # Test with interpolated variable + schema_ref = PT.MODEL_REGISTRY["echo0"].schema + @test schema_ref.inputs == + [Dict("role" => "system", "content" => "Act as a helpful AI assistant") + Dict("role" => "user", "content" => "What is `1+1`?")] + + # ai!_str_macro" begin + # Prepopulate conversation history + push_conversation!(CONV_HISTORY, [AIMessage("Say hi.")], 999) + + # Test if it continues the conversation as expected + response = ai!"Hi again!"echo0 # continue the conversation + schema_ref = PT.MODEL_REGISTRY["echo0"].schema + @test schema_ref.inputs == + [Dict("role" => "system", "content" => "Act as a helpful AI assistant"), + Dict("role" => "assistant", "content" => "Say hi."), + Dict("role" => "user", "content" => "Hi again!")] + + @test CONV_HISTORY[end][end].content == "Hello!" + + # Test an assertion that there is conversation history + empty!(CONV_HISTORY) + @test_throws AssertionError ai!"Where are you located?" + + # clean up + empty!(CONV_HISTORY) +end + +@testset "aai_str,aai!_str" begin + ## Setup echo + # corresponds to OpenAI API v1 + response = Dict(:choices => [Dict(:message => Dict(:content => "Hello!"))], + :usage => Dict(:total_tokens => 3, :prompt_tokens => 2, :completion_tokens => 1)) + PT.register_model!(; + name = "echo0", + schema = TestEchoOpenAISchema(; response, status = 200)) + + # default test + response = aai"Hello, how are you?"echo0 # simple call using the default model + wait(response) # Wait for the task to complete + @test fetch(response).content == "Hello!" + schema_ref = PT.MODEL_REGISTRY["echo0"].schema + @test schema_ref.inputs == + [Dict("role" => "system", "content" => "Act as a helpful AI assistant") + Dict("role" => "user", "content" => "Hello, how are you?")] + @test CONV_HISTORY[end][end].content == "Hello!" + + # continue conversation + push_conversation!(CONV_HISTORY, [AIMessage("Say hi.")], 999) + response = aai!"Hi again!"echo0 # continue the conversation + wait(response) # Wait for the task to complete + schema_ref = PT.MODEL_REGISTRY["echo0"].schema + @test schema_ref.inputs == + [Dict("role" => "system", "content" => "Act as a helpful AI assistant"), + Dict("role" => "assistant", "content" => "Say hi."), + Dict("role" => "user", "content" => "Hi again!")] + + @test CONV_HISTORY[end][end].content == "Hello!" + + # clean up + empty!(CONV_HISTORY) +end \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index d8bc24dad..1d413fa3c 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -18,6 +18,7 @@ end include("llm_shared.jl") include("llm_openai.jl") include("llm_ollama_managed.jl") + include("macros.jl") include("templates.jl") include("serialization.jl") include("code_generation.jl") diff --git a/test/utils.jl b/test/utils.jl index ceabdd5e7..1af4c3166 100644 --- a/test/utils.jl +++ b/test/utils.jl @@ -2,6 +2,7 @@ using PromptingTools: split_by_length, replace_words using PromptingTools: _extract_handlebar_variables, call_cost, _report_stats using PromptingTools: _string_to_vector, _encode_local_image using PromptingTools: DataMessage, AIMessage +using PromptingTools: push_conversation!, resize_conversation! @testset "replace_words" begin words = ["Disney", "Snow White", "Mickey Mouse"] @@ -142,3 +143,32 @@ end @test output2[1] == output2[2] == output @test_throws AssertionError _encode_local_image("not an path") end + +### Conversation Management +@testset "push_conversation!,resize_conversation!" begin + # Test 1: Adding to Conversation History + conv_history = Vector{Vector{<:Any}}() + conversation = [AIMessage("Test message")] + push_conversation!(conv_history, conversation, 5) + @test length(conv_history) == 1 + @test conv_history[end] === conversation + + # Test 2: History Resize on Addition + max_history = 5 + conv_history = [[AIMessage("Test message")] for i in 1:max_history] + new_conversation = [AIMessage("Test message")] + push_conversation!(conv_history, new_conversation, max_history) + @test length(conv_history) == max_history + @test conv_history[end] === new_conversation + + # Test 3: Manual Resize + max_history = 5 + conv_history = [[AIMessage("Test message")] for i in 1:(max_history + 2)] + resize_conversation!(conv_history, max_history) + @test length(conv_history) == max_history + + # Test 4: No Resize with Nothing + conv_history = [[AIMessage("Test message")] for i in 1:7] + resize_conversation!(conv_history, nothing) + @test length(conv_history) == 7 +end \ No newline at end of file From c001ef19b98c4ed35b1b6da1943f42ac25bb1034 Mon Sep 17 00:00:00 2001 From: J S <49557684+svilupp@users.noreply.github.com> Date: Sun, 24 Dec 2023 11:02:42 +0100 Subject: [PATCH 2/3] update docs --- docs/src/getting_started.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/src/getting_started.md b/docs/src/getting_started.md index 27245ac8f..bb25e667c 100644 --- a/docs/src/getting_started.md +++ b/docs/src/getting_started.md @@ -58,6 +58,11 @@ AIMessage("The capital of France is Paris.") Returned object is a light wrapper with generated message in field `:content` (eg, `ans.content`) for additional downstream processing. +If you want to reply to the previous message, or simply continue the conversation, use `@ai!_str` (notice the bang `!`): +```julia +ai!"And what is the population of it?" +``` + You can easily inject any variables with string interpolation: ```julia country = "Spain" @@ -86,6 +91,6 @@ AIMessage("The capital of Spain is Madrid. And yes, the population of Madrid is Pro tip: Use `asyncmap` to run multiple AI-powered tasks concurrently. -Pro tip: If you use slow models (like GPT-4), you can use async version of `@ai_str` -> `@aai_str` to avoid blocking the REPL, eg, `aai"Say hi but slowly!"gpt4` +Pro tip: If you use slow models (like GPT-4), you can use the asynchronous version of `@ai_str` -> `@aai_str` to avoid blocking the REPL, eg, `aai"Say hi but slowly!"gpt4` (similarly `@ai!_str` -> `@aai!_str` for multi-turn conversations). For more practical examples, see the [Various Examples](@ref) section. \ No newline at end of file From cc4b268e6769013e3bcca1cab9039b74d351c277 Mon Sep 17 00:00:00 2001 From: J S <49557684+svilupp@users.noreply.github.com> Date: Sun, 24 Dec 2023 11:11:35 +0100 Subject: [PATCH 3/3] update tests --- src/utils.jl | 3 --- test/utils.jl | 5 +++++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/utils.jl b/src/utils.jl index 4592bd6d6..70741ac44 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -281,9 +281,6 @@ This is done automatically by the ai"" macros. function push_conversation!(conv_history::Vector{<:Vector{<:Any}}, conversation::AbstractVector, max_history::Union{Int, Nothing}) - if isnothing(max_history) - return - end push!(conv_history, conversation) resize_conversation!(conv_history, max_history) return conv_history diff --git a/test/utils.jl b/test/utils.jl index 1af4c3166..eb705ca11 100644 --- a/test/utils.jl +++ b/test/utils.jl @@ -160,6 +160,11 @@ end push_conversation!(conv_history, new_conversation, max_history) @test length(conv_history) == max_history @test conv_history[end] === new_conversation + push_conversation!(conv_history, new_conversation, nothing) + push_conversation!(conv_history, new_conversation, nothing) + push_conversation!(conv_history, new_conversation, nothing) + @test length(conv_history) > max_history + @test conv_history[end] === new_conversation # Test 3: Manual Resize max_history = 5