diff --git a/CHANGELOG.md b/CHANGELOG.md index b303f1007..9823075a1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed +## [0.66.0] + +### Added +- Added a new `AnnotationMessage` type for keeping human-only information in the message changes. See `?annotate!` on how to use it. +- Added a new `ConversationMemory` type to enable long multi-turn conversations with a truncated memory of the conversation history. Truncation works in "batches" to not prevent caching. See `?ConversationMemory` and `get_last` for more information. + + ## [0.65.0] ### Breaking diff --git a/Project.toml b/Project.toml index d2c24e8ef..d9b150e41 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "PromptingTools" uuid = "670122d1-24a8-4d70-bfce-740807c42192" authors = ["J S @svilupp and contributors"] -version = "0.65.0" +version = "0.66.0" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" diff --git a/src/PromptingTools.jl b/src/PromptingTools.jl index d2e9dea37..4f911ab35 100644 --- a/src/PromptingTools.jl +++ b/src/PromptingTools.jl @@ -75,6 +75,12 @@ export SystemMessage, UserMessage, AIMessage, AnnotationMessage, issystemmessage export ConversationMemory, get_last, last_message, last_output # export UserMessage, UserMessageWithImages, SystemMessage, DataMessage # for debugging only +# export ConversationMemory +include("memory.jl") +# export annotate! +include("annotation.jl") + + export aitemplates, AITemplate include("templates.jl") diff --git a/src/annotation.jl b/src/annotation.jl new file mode 100644 index 000000000..f1597381e --- /dev/null +++ b/src/annotation.jl @@ -0,0 +1,40 @@ +""" + annotate!(messages::AbstractVector{<:AbstractMessage}, content; kwargs...) + annotate!(message::AbstractMessage, content; kwargs...) + +Add an annotation message to a vector of messages or wrap a single message in a vector with an annotation. +The annotation is always inserted after any existing annotation messages. + +# Arguments +- `messages`: Vector of messages or single message to annotate +- `content`: Content of the annotation +- `kwargs...`: Additional fields for the AnnotationMessage (extras, tags, comment) + +# Returns +Vector{AbstractMessage} with the annotation message inserted + +# Example +```julia +messages = [SystemMessage("Assistant"), UserMessage("Hello")] +annotate!(messages, "This is important"; tags=[:important], comment="For review") +``` +""" +function annotate!(messages::AbstractVector{T}, content::AbstractString; + kwargs...) where {T <: AbstractMessage} + # Convert to Vector{AbstractMessage} if needed + messages_abstract = T == AbstractMessage ? messages : + convert(Vector{AbstractMessage}, messages) + + # Find last annotation message index + last_anno_idx = findlast(isabstractannotationmessage, messages_abstract) + insert_idx = isnothing(last_anno_idx) ? 1 : last_anno_idx + 1 + + # Create and insert annotation message + anno = AnnotationMessage(; content = content, kwargs...) + insert!(messages_abstract, insert_idx, anno) + return messages_abstract +end + +function annotate!(message::AbstractMessage, content::AbstractString; kwargs...) + return annotate!(AbstractMessage[message], content; kwargs...) +end \ No newline at end of file diff --git a/src/llm_anthropic.jl b/src/llm_anthropic.jl index 87688f0b6..513950d29 100644 --- a/src/llm_anthropic.jl +++ b/src/llm_anthropic.jl @@ -47,6 +47,8 @@ function render(schema::AbstractAnthropicSchema, for msg in messages_replaced if issystemmessage(msg) system = msg.content + elseif isabstractannotationmessage(msg) + continue elseif isusermessage(msg) || isaimessage(msg) content = msg.content push!(conversation, diff --git a/src/llm_google.jl b/src/llm_google.jl index d0824618d..c27fd9213 100644 --- a/src/llm_google.jl +++ b/src/llm_google.jl @@ -37,6 +37,9 @@ function render(schema::AbstractGoogleSchema, # replace any handlebar variables in the messages for msg in messages_replaced + if isabstractannotationmessage(msg) + continue + end push!(conversation, Dict( :role => role4render(schema, msg), :parts => [Dict("text" => msg.content)])) diff --git a/src/llm_ollama.jl b/src/llm_ollama.jl index 4cfa4ef12..c95a3b6f5 100644 --- a/src/llm_ollama.jl +++ b/src/llm_ollama.jl @@ -39,6 +39,9 @@ function render(schema::AbstractOllamaSchema, # replace any handlebar variables in the messages for msg in messages_replaced + if isabstractannotationmessage(msg) + continue + end new_message = Dict{String, Any}( "role" => role4render(schema, msg), "content" => msg.content) ## Special case for images diff --git a/src/llm_ollama_managed.jl b/src/llm_ollama_managed.jl index 669a58296..d26e9eec1 100644 --- a/src/llm_ollama_managed.jl +++ b/src/llm_ollama_managed.jl @@ -40,6 +40,8 @@ function render(schema::AbstractOllamaManagedSchema, system = msg.content elseif msg isa UserMessage prompt = msg.content + elseif isabstractannotationmessage(msg) + continue elseif msg isa UserMessageWithImages error("Managed schema does not support UserMessageWithImages. Please use OpenAISchema instead.") elseif msg isa AIMessage diff --git a/src/llm_openai.jl b/src/llm_openai.jl index 0a0332c1d..3e565c2aa 100644 --- a/src/llm_openai.jl +++ b/src/llm_openai.jl @@ -75,6 +75,8 @@ function render(schema::AbstractOpenAISchema, content = msg.content isa AbstractString ? msg.content : string(msg.content) Dict("role" => role4render(schema, msg), "content" => content, "tool_call_id" => msg.tool_call_id) + elseif isabstractannotationmessage(msg) + continue else ## Vanilla assistant message Dict("role" => role4render(schema, msg), diff --git a/src/llm_shared.jl b/src/llm_shared.jl index 889c5ce47..831281214 100644 --- a/src/llm_shared.jl +++ b/src/llm_shared.jl @@ -8,6 +8,7 @@ role4render(schema::AbstractPromptSchema, msg::UserMessageWithImages) = "user" role4render(schema::AbstractPromptSchema, msg::AIMessage) = "assistant" role4render(schema::AbstractPromptSchema, msg::AIToolRequest) = "assistant" role4render(schema::AbstractPromptSchema, msg::ToolMessage) = "tool" +role4render(schema::AbstractPromptSchema, msg::AbstractAnnotationMessage) = "annotation" """ render(schema::NoSchema, messages::Vector{<:AbstractMessage}; @@ -76,6 +77,9 @@ function render(schema::NoSchema, count_system_msg += 1 # move to the front pushfirst!(conversation, msg) + elseif isabstractannotationmessage(msg) + # Ignore annotation messages + continue else # Note: Ignores any DataMessage or other types for the prompt/conversation history @warn "Unexpected message type: $(typeof(msg)). Skipping." diff --git a/src/llm_sharegpt.jl b/src/llm_sharegpt.jl index 24ed65733..01ad9ad22 100644 --- a/src/llm_sharegpt.jl +++ b/src/llm_sharegpt.jl @@ -9,7 +9,7 @@ end function render(schema::AbstractShareGPTSchema, conv::AbstractVector{<:AbstractMessage}) Dict("conversations" => [Dict("from" => role4render(schema, msg), "value" => msg.content) - for msg in conv]) + for msg in conv if !isabstractannotationmessage(msg)]) end ### AI Functions diff --git a/src/llm_tracer.jl b/src/llm_tracer.jl index 352dd7de0..33b920d8e 100644 --- a/src/llm_tracer.jl +++ b/src/llm_tracer.jl @@ -16,6 +16,9 @@ end function role4render(schema::AbstractTracerSchema, msg::AIMessage) role4render(schema.schema, msg) end +function role4render(schema::AbstractTracerSchema, msg::AbstractAnnotationMessage) + role4render(schema.schema, msg) +end """ render(tracer_schema::AbstractTracerSchema, conv::AbstractVector{<:AbstractMessage}; kwargs...) diff --git a/src/memory.jl b/src/memory.jl index 4d08ab025..121c267d8 100644 --- a/src/memory.jl +++ b/src/memory.jl @@ -1,59 +1,58 @@ -# Conversation Memory Implementation -import PromptingTools: AbstractMessage, SystemMessage, AIMessage, UserMessage -import PromptingTools: issystemmessage, isusermessage, isaimessage -import PromptingTools: aigenerate, last_message, last_output - """ ConversationMemory -A structured container for managing conversation history with intelligent truncation -and caching capabilities. +A structured container for managing conversation history. It has only one field `:conversation` +which is a vector of `AbstractMessage`s. It's built to support intelligent truncation and caching +behavior (`get_last`). -The memory supports batched retrieval with deterministic truncation points for optimal -caching behavior. +You can also use it as a functor to have extended conversations (easier than constantly passing `conversation` kwarg) """ Base.@kwdef mutable struct ConversationMemory conversation::Vector{AbstractMessage} = AbstractMessage[] end -# Basic interface extensions -import Base: show, push!, append!, length - """ show(io::IO, mem::ConversationMemory) -Display the number of non-system messages in the conversation memory. +Display the number of non-system/non-annotation messages in the conversation memory. """ function Base.show(io::IO, mem::ConversationMemory) - n_msgs = count(!issystemmessage, mem.conversation) + n_msgs = count( + x -> !issystemmessage(x) && !isabstractannotationmessage(x), mem.conversation) print(io, "ConversationMemory($(n_msgs) messages)") end """ length(mem::ConversationMemory) -Return the number of messages, excluding system messages. +Return the number of messages. All of them. """ function Base.length(mem::ConversationMemory) - count(!issystemmessage, mem.conversation) + length(mem.conversation) end """ last_message(mem::ConversationMemory) -Get the last message in the conversation, delegating to PromptingTools.last_message. +Get the last message in the conversation. """ function last_message(mem::ConversationMemory) - PromptingTools.last_message(mem.conversation) + last_message(mem.conversation) end """ last_output(mem::ConversationMemory) -Get the last AI message in the conversation, delegating to PromptingTools.last_output. +Get the last AI message in the conversation. """ function last_output(mem::ConversationMemory) - PromptingTools.last_output(mem.conversation) + last_output(mem.conversation) +end + +function pprint( + io::IO, mem::ConversationMemory; + text_width::Int = displaysize(io)[2]) + pprint(io, mem.conversation; text_width) end """ @@ -62,7 +61,7 @@ end verbose::Bool=false, explain::Bool=false) -Get the last n messages with intelligent batching and caching support. +Get the last n messages (but including system message) with intelligent batching to preserve caching. Arguments: - n::Integer: Maximum number of messages to return (default: 20) @@ -75,11 +74,34 @@ Vector{AbstractMessage} with the selected messages, always including: 1. The system message (if present) 2. First user message 3. Messages up to n, respecting batch_size boundaries + +Once you get your full conversation back, you can use `append!(mem, conversation)` to merge the new messages into the memory. + +# Examples: +```julia +# Basic usage - get last 3 messages +mem = ConversationMemory() +push!(mem, SystemMessage("You are helpful")) +push!(mem, UserMessage("Hello")) +push!(mem, AIMessage("Hi!")) +push!(mem, UserMessage("How are you?")) +push!(mem, AIMessage("I'm good!")) +messages = get_last(mem, 3) + +# Using batch_size for caching efficiency +messages = get_last(mem, 10; batch_size=5) # Aligns to 5-message batches for caching + +# Add explanation about truncation +messages = get_last(mem, 3; explain=true) # Adds truncation note to first AI message so the model knows it's truncated + +# Get verbose output about truncation +messages = get_last(mem, 3; verbose=true) # Prints info about truncation +``` """ -function get_last(mem::ConversationMemory, n::Integer=20; - batch_size::Union{Nothing,Integer}=nothing, - verbose::Bool=false, - explain::Bool=false) +function get_last(mem::ConversationMemory, n::Integer = 20; + batch_size::Union{Nothing, Integer} = nothing, + verbose::Bool = false, + explain::Bool = false) messages = mem.conversation isempty(messages) && return AbstractMessage[] @@ -96,59 +118,68 @@ function get_last(mem::ConversationMemory, n::Integer=20; push!(result, messages[first_user_idx]) end - # Get remaining messages excluding system and first user - exclude_indices = filter(!isnothing, [system_idx, first_user_idx]) - remaining_msgs = messages[setdiff(1:length(messages), exclude_indices)] - - # Calculate how many messages to include based on batch size - if !isnothing(batch_size) - # When batch_size=10, should return between 11-20 messages - total_msgs = length(remaining_msgs) - num_batches = ceil(Int, total_msgs / batch_size) - - # Calculate target size (between batch_size+1 and 2*batch_size) - target_size = if num_batches * batch_size > n - batch_size + 1 # Reset to minimum (11 for batch_size=10) - else - min(num_batches * batch_size, n - length(result)) - end - - # Get messages to append - if !isempty(remaining_msgs) - start_idx = max(1, length(remaining_msgs) - target_size + 1) - append!(result, remaining_msgs[start_idx:end]) - end - else - # Without batch size, just get the last n-length(result) messages - if !isempty(remaining_msgs) - start_idx = max(1, length(remaining_msgs) - (n - length(result)) + 1) - append!(result, remaining_msgs[start_idx:end]) - end + # Calculate remaining message budget + remaining_budget = n - length(result) + visible_messages = findall( + x -> !issystemmessage(x) && !isabstractannotationmessage(x), messages) + + if remaining_budget > 0 + default_start_idx = max(1, length(visible_messages) - remaining_budget + 1) + start_idx = !isnothing(batch_size) ? + batch_start_index( + length(visible_messages), remaining_budget, batch_size) : default_start_idx + ## find first AIMessage after that (it must be aligned to follow after UserMessage) + valid_idxs = @view(visible_messages[start_idx:end]) + ai_msg_idx = findfirst(isaimessage, @view(messages[valid_idxs])) + !isnothing(ai_msg_idx) && + append!(result, messages[@view(valid_idxs[ai_msg_idx:end])]) end - if verbose - println("Total messages: ", length(messages)) - println("Keeping: ", length(result)) - println("Required messages: ", count(m -> issystemmessage(m) || m === messages[first_user_idx], result)) - if !isnothing(batch_size) - println("Using batch size: ", batch_size) - end - end + verbose && + @info "ConversationMemory truncated to $(length(result))/$(length(messages)) messages" # Add explanation if requested and we truncated messages - if explain && length(messages) > length(result) + if explain && (length(visible_messages) + 1) > length(result) # Find first AI message in result after required messages - ai_msg_idx = findfirst(m -> isaimessage(m) && !(m in result[1:length(exclude_indices)]), result) + ai_msg_idx = findfirst(x -> isaimessage(x) || isaitoolrequest(x), result) + trunc_count = length(visible_messages) + 1 - length(result) if !isnothing(ai_msg_idx) - orig_content = result[ai_msg_idx].content - explanation = "For efficiency reasons, we have truncated the preceding $(length(messages) - length(result)) messages.\n\n$orig_content" - result[ai_msg_idx] = AIMessage(explanation) + ai_msg = result[ai_msg_idx] + orig_content = ai_msg.content + explanation = "[This is an automatically added explanation to inform you that for efficiency reasons, the user has truncated the preceding $(trunc_count) messages.]\n\n$orig_content" + ai_msg_type = typeof(ai_msg) + result[ai_msg_idx] = ai_msg_type(; + [f => getfield(ai_msg, f) + for f in fieldnames(ai_msg_type) if f != :content]..., + content = explanation) end end return result end +""" + batch_start_index(array_length::Integer, n::Integer, batch_size::Integer) -> Integer + +Compute the starting index for retrieving the most recent data, adjusting in blocks of `batch_size`. +The function accumulates messages until hitting a batch boundary, then jumps to the next batch. + +For example, with n=20 and batch_size=10: +- At length 90-99: returns 80 (allowing accumulation of 11-20 messages) +- At length 100-109: returns 90 (allowing accumulation of 11-20 messages) +- At length 110: returns 100 (resetting to 11 messages) +""" +function batch_start_index(array_length::Integer, n::Integer, batch_size::Integer)::Integer + @assert n>=batch_size "n must be >= batch_size" + # Calculate which batch we're in + batch_number = (array_length - (n - batch_size)) ÷ batch_size + # Calculate the start of the current batch + batch_start = batch_number * batch_size + + # Ensure we don't go before the first element + return max(1, batch_start) +end + """ append!(mem::ConversationMemory, msgs::Vector{<:AbstractMessage}) @@ -157,27 +188,33 @@ Only appends messages that are newer than the latest matching message in memory. """ function Base.append!(mem::ConversationMemory, msgs::Vector{<:AbstractMessage}) isempty(msgs) && return mem + isempty(mem.conversation) && return append!(mem.conversation, msgs) - if isempty(mem.conversation) - append!(mem.conversation, msgs) - return mem - end + # get all messages in mem.conversation with run_id + run_id_indices = findall(x -> hasproperty(x, :run_id), mem.conversation) + + # Search backwards through messages to find matching point + for idx in reverse(eachindex(msgs)) + msg = msgs[idx] - # Find latest run_id in memory - latest_run_id = 0 - for msg in mem.conversation - if isdefined(msg, :run_id) - latest_run_id = max(latest_run_id, msg.run_id) + # Find matching message in memory based on run_id if present + match_idx = if hasproperty(msg, :run_id) + findlast( + m -> hasproperty(m, :run_id) && m.run_id == msg.run_id, @view(mem.conversation[run_id_indices])) + else + findlast(m -> m == msg, mem.conversation) end - end - # Keep messages that either don't have a run_id or have a higher run_id - new_msgs = filter(msgs) do msg - !isdefined(msg, :run_id) || msg.run_id > latest_run_id + if !isnothing(match_idx) + # Found match - append everything after this message + (idx + 1 <= length(msgs)) && append!(mem.conversation, msgs[(idx + 1):end]) + @info idx + return mem + end end - append!(mem.conversation, new_msgs) - return mem + @warn "No matching messages found in memory, appending all" + return append!(mem.conversation, msgs) end """ @@ -191,34 +228,33 @@ function Base.push!(mem::ConversationMemory, msg::AbstractMessage) end """ - (mem::ConversationMemory)(prompt::String; last::Union{Nothing,Integer}=nothing, kwargs...) + (mem::ConversationMemory)(prompt::AbstractString; last::Union{Nothing,Integer}=nothing, kwargs...) Functor interface for direct generation using the conversation memory. +Optionally, specify the number of last messages to include in the context (uses `get_last`). """ -function (mem::ConversationMemory)(prompt::String; last::Union{Nothing,Integer}=nothing, kwargs...) +function (mem::ConversationMemory)( + prompt::AbstractString; last::Union{Nothing, Integer} = nothing, kwargs...) # Get conversation context - context = if isnothing(last) - mem.conversation - else - get_last(mem, last) - end + context = isnothing(last) ? mem.conversation : get_last(mem, last) # Add user message to memory first user_msg = UserMessage(prompt) - push!(mem.conversation, user_msg) + push!(mem, user_msg) # Generate response with context - response = aigenerate(context, prompt; kwargs...) - push!(mem.conversation, response) - return response + response = aigenerate(context; return_all = true, kwargs...) + append!(mem, response) + return last_message(response) end """ - aigenerate(mem::ConversationMemory, prompt::String; kwargs...) + aigenerate(schema::AbstractPromptSchema, + mem::ConversationMemory; kwargs...) Generate a response using the conversation memory context. """ -function PromptingTools.aigenerate(messages::Vector{<:AbstractMessage}, prompt::String; kwargs...) - schema = get(kwargs, :schema, OpenAISchema()) - aigenerate(schema, [messages..., UserMessage(prompt)]; kwargs...) +function aigenerate(schema::AbstractPromptSchema, + mem::ConversationMemory; kwargs...) + aigenerate(schema, mem.conversation; kwargs...) end diff --git a/src/messages.jl b/src/messages.jl index 7531b2b89..a3c446e66 100644 --- a/src/messages.jl +++ b/src/messages.jl @@ -4,21 +4,21 @@ abstract type AbstractMessage end abstract type AbstractChatMessage <: AbstractMessage end # with text-based content abstract type AbstractDataMessage <: AbstractMessage end # with data-based content, eg, embeddings -abstract type AbstractAnnotationMessage <: AbstractChatMessage end # messages that provide extra information without being sent to LLMs +""" + AbstractAnnotationMessage + +Messages that provide extra information without being sent to LLMs. + +Required fields: `content`, `tags`, `comment`, `run_id`. + +Note: `comment` is intended for human readers only and should never be used. +`run_id` should be a unique identifier for the annotation, typically a random number. +""" +abstract type AbstractAnnotationMessage <: AbstractMessage end # messages that provide extra information without being sent to LLMs abstract type AbstractTracerMessage{T <: AbstractMessage} <: AbstractMessage end # message with annotation that exposes the underlying message # Complementary type for tracing, follows the same API as TracerMessage abstract type AbstractTracer{T <: Any} end -# Helper functions for message type checking -isabstractannotationmessage(msg::AbstractMessage) = msg isa AbstractAnnotationMessage - -## Allowed inputs for ai* functions, AITemplate is resolved one level higher -const ALLOWED_PROMPT_TYPE = Union{ - AbstractString, - AbstractMessage, - Vector{<:AbstractMessage} -} - ## Allowed inputs for ai* functions, AITemplate is resolved one level higher const ALLOWED_PROMPT_TYPE = Union{ AbstractString, @@ -39,17 +39,15 @@ end Base.@kwdef struct SystemMessage{T <: AbstractString} <: AbstractChatMessage content::T variables::Vector{Symbol} = _extract_handlebar_variables(content) - run_id::Union{Nothing, Int} = Int(rand(Int16)) _type::Symbol = :systemmessage + SystemMessage{T}(c, v, t) where {T <: AbstractString} = new(c, v, t) end - -# Add positional constructor -function SystemMessage(content::T; run_id::Union{Nothing,Int}=Int(rand(Int16)), - _type::Symbol=:systemmessage) where {T <: AbstractString} - variables = _extract_handlebar_variables(content) +function SystemMessage(content::T, + variables::Vector{Symbol}, + type::Symbol) where {T <: AbstractString} not_allowed_kwargs = intersect(variables, RESERVED_KWARGS) @assert length(not_allowed_kwargs)==0 "Error: Some placeholders are invalid, as they are reserved for `ai*` functions. Change: $(join(not_allowed_kwargs,","))" - SystemMessage{T}(content, variables, run_id, _type) + return SystemMessage{T}(content, variables, type) end """ @@ -67,20 +65,16 @@ Base.@kwdef struct UserMessage{T <: AbstractString} <: AbstractChatMessage content::T variables::Vector{Symbol} = _extract_handlebar_variables(content) name::Union{Nothing, String} = nothing - run_id::Union{Nothing, Int} = Int(rand(Int16)) - cost::Union{Nothing, Float64} = nothing _type::Symbol = :usermessage - UserMessage{T}(c, v, n, r, co, t) where {T <: AbstractString} = new(c, v, n, r, co, t) + UserMessage{T}(c, v, n, t) where {T <: AbstractString} = new(c, v, n, t) end function UserMessage(content::T, variables::Vector{Symbol}, name::Union{Nothing, String}, - run_id::Union{Nothing, Int}, - cost::Union{Nothing, Float64}, type::Symbol) where {T <: AbstractString} not_allowed_kwargs = intersect(variables, RESERVED_KWARGS) @assert length(not_allowed_kwargs)==0 "Error: Some placeholders are invalid, as they are reserved for `ai*` functions. Change: $(join(not_allowed_kwargs,","))" - return UserMessage{T}(content, variables, name, run_id, cost, type) + return UserMessage{T}(content, variables, name, type) end """ @@ -178,92 +172,6 @@ Base.@kwdef struct DataMessage{T <: Any} <: AbstractDataMessage _type::Symbol = :datamessage end -""" - AnnotationMessage - -A message type for providing extra information in the conversation history without being sent to LLMs. -These messages are filtered out during rendering to ensure they don't affect the LLM's context. - -Used to bundle key information and documentation for colleagues and future reference together with the data. - -# Fields -- `content::T`: The content of the annotation (can be used for inputs to airag etc.) -- `extras::Dict{Symbol,Any}`: Additional metadata with symbol keys and any values -- `tags::Vector{Symbol}`: Vector of tags for categorization (default: empty) -- `comment::String`: Human-readable comment, never used for automatic operations (default: empty) -- `run_id::Union{Nothing,Int}`: The unique ID of the run - -Note: The comment field is intended for human readers only and should never be used -for automatic operations. -""" -Base.@kwdef struct AnnotationMessage{T} <: AbstractAnnotationMessage - content::T - extras::Dict{Symbol,Any} = Dict{Symbol,Any}() - tags::Vector{Symbol} = Symbol[] - comment::String = "" - run_id::Union{Nothing,Int} = Int(rand(Int16)) - _type::Symbol = :annotationmessage -end - -# Add positional constructor for string content -function AnnotationMessage(content::AbstractString; - extras::Union{Dict{Symbol,Any}, Dict{Symbol,String}}=Dict{Symbol,Any}(), - tags::Vector{Symbol}=Symbol[], - comment::String="", - run_id::Union{Nothing,Int}=Int(rand(Int16)), - _type::Symbol=:annotationmessage) - # Convert Dict{Symbol,String} to Dict{Symbol,Any} if needed - extras_any = extras isa Dict{Symbol,String} ? Dict{Symbol,Any}(k => v for (k,v) in extras) : extras - AnnotationMessage{typeof(content)}(content, extras_any, tags, comment, run_id, _type) -end - -""" - annotate!(messages::Vector{<:AbstractMessage}, content; kwargs...) - annotate!(message::AbstractMessage, content; kwargs...) - -Add an annotation message to a vector of messages or wrap a single message in a vector with an annotation. -The annotation is always inserted after any existing annotation messages. - -# Arguments -- `messages`: Vector of messages or single message to annotate -- `content`: Content of the annotation -- `kwargs...`: Additional fields for the AnnotationMessage (extras, tags, comment) - -# Returns -Vector{AbstractMessage} with the annotation message inserted - -# Example -```julia -messages = [SystemMessage("Assistant"), UserMessage("Hello")] -annotate!(messages, "This is important"; tags=[:important], comment="For review") -``` -""" -function annotate!(messages::Vector{T}, content; kwargs...) where {T<:AbstractMessage} - # Create new annotation message - annotation = AnnotationMessage(content; kwargs...) - - # Convert messages to Vector{AbstractMessage} to allow mixed types - abstract_messages = Vector{AbstractMessage}(messages) - - # Find the last annotation message index - last_annotation_idx = findlast(isabstractannotationmessage, abstract_messages) - - if isnothing(last_annotation_idx) - # If no annotation exists, insert at beginning - insert!(abstract_messages, 1, annotation) - else - # Insert after the last annotation - insert!(abstract_messages, last_annotation_idx + 1, annotation) - end - - return abstract_messages -end - -# Single message version - wrap in vector and use the other method -function annotate!(message::AbstractMessage, content; kwargs...) - annotate!([message], content; kwargs...) -end - """ ToolMessage @@ -328,23 +236,52 @@ Base.@kwdef struct AIToolRequest{T <: Union{AbstractString, Nothing}} <: Abstrac sample_id::Union{Nothing, Int} = nothing _type::Symbol = :aitoolrequest end - "Get the vector of tool call requests from an AIToolRequest/message." tool_calls(msg::AIToolRequest) = msg.tool_calls tool_calls(msg::AbstractMessage) = ToolMessage[] tool_calls(msg::ToolMessage) = [msg] tool_calls(msg::AbstractTracerMessage) = tool_calls(msg.object) +""" + AnnotationMessage + +A message type for providing extra information in the conversation history without being sent to LLMs. +These messages are filtered out during rendering to ensure they don't affect the LLM's context. + +Used to bundle key information and documentation for colleagues and future reference together with the data. + +# Fields +- `content::T`: The content of the annotation (can be used for inputs to airag etc.) +- `extras::Dict{Symbol,Any}`: Additional metadata with symbol keys and any values +- `tags::Vector{Symbol}`: Vector of tags for categorization (default: empty) +- `comment::String`: Human-readable comment, never used for automatic operations (default: empty) +- `run_id::Union{Nothing,Int}`: The unique ID of the annotation + +Note: The comment field is intended for human readers only and should never be used +for automatic operations. +""" +Base.@kwdef struct AnnotationMessage{T <: AbstractString} <: AbstractAnnotationMessage + content::T + extras::Dict{Symbol, Any} = Dict{Symbol, Any}() + tags::Vector{Symbol} = Symbol[] + comment::String = "" + run_id::Union{Nothing, Int} = Int(rand(Int32)) + _type::Symbol = :annotationmessage +end + ### Other Message methods # content-only constructor -function (MSG::Type{<:AbstractChatMessage})(prompt::AbstractString; run_id::Union{Nothing, Int}=Int(rand(Int16))) - MSG(; content = prompt, run_id = run_id) +function (MSG::Type{<:AbstractChatMessage})(prompt::AbstractString; kwargs...) + MSG(; content = prompt, kwargs...) +end +function (MSG::Type{<:AbstractAnnotationMessage})(content::AbstractString; kwargs...) + MSG(; content, kwargs...) end -function (MSG::Type{<:AbstractChatMessage})(msg::AbstractChatMessage; run_id::Union{Nothing, Int}=msg.run_id) - MSG(; content = msg.content, run_id = run_id) +function (MSG::Type{<:AbstractChatMessage})(msg::AbstractChatMessage) + MSG(; msg.content) end -function (MSG::Type{<:AbstractChatMessage})(msg::AbstractTracerMessage{<:AbstractChatMessage}; run_id::Union{Nothing, Int}=msg.object.run_id) - MSG(; content = msg.content, run_id = run_id) +function (MSG::Type{<:AbstractChatMessage})(msg::AbstractTracerMessage{<:AbstractChatMessage}) + MSG(; msg.content) end ## It checks types so it should be defined for all inputs @@ -355,9 +292,8 @@ isdatamessage(m::Any) = m isa DataMessage isaimessage(m::Any) = m isa AIMessage istoolmessage(m::Any) = m isa ToolMessage isaitoolrequest(m::Any) = m isa AIToolRequest +isabstractannotationmessage(msg::Any) = msg isa AbstractAnnotationMessage istracermessage(m::Any) = m isa AbstractTracerMessage -isabstractannotationmessage(m::Any) = m isa AbstractAnnotationMessage - isusermessage(m::AbstractTracerMessage) = isusermessage(m.object) isusermessagewithimages(m::AbstractTracerMessage) = isusermessagewithimages(m.object) issystemmessage(m::AbstractTracerMessage) = issystemmessage(m.object) @@ -365,7 +301,9 @@ isdatamessage(m::AbstractTracerMessage) = isdatamessage(m.object) isaimessage(m::AbstractTracerMessage) = isaimessage(m.object) istoolmessage(m::AbstractTracerMessage) = istoolmessage(m.object) isaitoolrequest(m::AbstractTracerMessage) = isaitoolrequest(m.object) -isabstractannotationmessage(m::AbstractTracerMessage) = isabstractannotationmessage(m.object) +function isabstractannotationmessage(m::AbstractTracerMessage) + isabstractannotationmessage(m.object) +end # equality check for testing, only equal if all fields are equal and type is the same Base.var"=="(m1::AbstractMessage, m2::AbstractMessage) = false @@ -605,8 +543,6 @@ function Base.show(io::IO, ::MIME"text/plain", m::AbstractChatMessage) printstyled(io, type_; color = :light_red) elseif m isa MetadataMessage printstyled(io, type_; color = :light_blue) - elseif m isa AnnotationMessage - printstyled(io, type_; color = :yellow) else print(io, type_) end @@ -631,6 +567,11 @@ function Base.show(io::IO, ::MIME"text/plain", m::AbstractDataMessage) print(io, "(", typeof(m.content), ")") end end +function Base.show(io::IO, ::MIME"text/plain", m::AbstractAnnotationMessage) + type_ = string(typeof(m)) |> x -> split(x, "{")[begin] + printstyled(io, type_; color = :light_blue) + print(io, "(\"", m.content, "\")") +end function Base.show(io::IO, ::MIME"text/plain", t::AbstractTracerMessage) dump(IOContext(io, :limit => true), t, maxdepth = 1) end @@ -640,7 +581,7 @@ end ## Dispatch for render # function render(schema::AbstractPromptSchema, -# messages::Vector{<:AbstractMessage>; +# messages::Vector{<:AbstractMessage}; # kwargs...) # render(schema, messages; kwargs...) # end @@ -648,7 +589,6 @@ function role4render(schema::AbstractPromptSchema, msg::AbstractTracerMessage) role4render(schema, msg.object) end function render(schema::AbstractPromptSchema, msg::AbstractMessage; kwargs...) - isabstractannotationmessage(msg) && return nothing # Skip annotation messages render(schema, [msg]; kwargs...) end function render(schema::AbstractPromptSchema, msg::AbstractString; @@ -689,29 +629,6 @@ function StructTypes.subtypes(::Type{AbstractAnnotationMessage}) (annotationmessage = AnnotationMessage,) end -# Serialization methods for AnnotationMessage -function Base.Dict(msg::AnnotationMessage) - Dict{String,Any}( - "content" => msg.content, - "extras" => msg.extras, - "tags" => msg.tags, - "comment" => msg.comment, - "run_id" => msg.run_id, - "_type" => msg._type - ) -end - -function Base.convert(::Type{AnnotationMessage}, d::Dict{String,Any}) - AnnotationMessage(; - content = d["content"], - extras = convert(Dict{Symbol,Any}, d["extras"]), - tags = Symbol.(d["tags"]), - comment = d["comment"], - run_id = d["run_id"], - _type = Symbol(d["_type"]) - ) -end - StructTypes.StructType(::Type{AbstractTracerMessage}) = StructTypes.AbstractType() StructTypes.subtypekey(::Type{AbstractTracerMessage}) = :_type function StructTypes.subtypes(::Type{AbstractTracerMessage}) @@ -732,8 +649,8 @@ StructTypes.StructType(::Type{ToolMessage}) = StructTypes.Struct() StructTypes.StructType(::Type{AIToolRequest}) = StructTypes.Struct() StructTypes.StructType(::Type{AIMessage}) = StructTypes.Struct() StructTypes.StructType(::Type{DataMessage}) = StructTypes.Struct() -StructTypes.StructType(::Type{TracerMessage}) = StructTypes.Struct() # Ignore mutability once we serialize StructTypes.StructType(::Type{AnnotationMessage}) = StructTypes.Struct() +StructTypes.StructType(::Type{TracerMessage}) = StructTypes.Struct() # Ignore mutability once we serialize StructTypes.StructType(::Type{TracerMessageLike}) = StructTypes.Struct() # Ignore mutability once we serialize ### Message Access Utilities @@ -802,9 +719,9 @@ function pprint(io::IO, msg::AbstractMessage; text_width::Int = displaysize(io)[ elseif istoolmessage(msg) isnothing(msg.content) ? string("Name: ", msg.name, ", Args: ", msg.raw) : string(msg.content) - elseif msg isa AnnotationMessage - tags_str = isempty(msg.tags) ? "" : " [$(join(msg.tags, ", "))]" - comment_str = isempty(msg.comment) ? "" : " ($(msg.comment))" + elseif isabstractannotationmessage(msg) + tags_str = isempty(msg.tags) ? "" : "\n [$(join(msg.tags, ", "))]" + comment_str = isempty(msg.comment) ? "" : "\n ($(msg.comment))" "$(msg.content)$tags_str$comment_str" else wrap_string(msg.content, text_width) diff --git a/src/user_preferences.jl b/src/user_preferences.jl index 166754ee5..95aa2c2a4 100644 --- a/src/user_preferences.jl +++ b/src/user_preferences.jl @@ -194,7 +194,7 @@ function load_api_keys!() default=get(ENV, "MISTRAL_API_KEY", get(ENV, "MISTRALAI_API_KEY", ""))) if !isempty(get(ENV, "MISTRALAI_API_KEY", "")) - @warn "The MISTRALAI_API_KEY environment variable is deprecated. Use MISTRAL_API_KEY instead." + @debug "The MISTRALAI_API_KEY environment variable is deprecated. Use MISTRAL_API_KEY instead." end global COHERE_API_KEY COHERE_API_KEY = @load_preference("COHERE_API_KEY", diff --git a/test/LocalPreferences.toml b/test/LocalPreferences.toml deleted file mode 100644 index b6e43f541..000000000 --- a/test/LocalPreferences.toml +++ /dev/null @@ -1,4 +0,0 @@ -[PromptingTools] -MODEL_CHAT = "gpt-4o-mini" -MODEL_EMBEDDING = "text-embedding-3-small" -OPENAI_API_KEY = "sk-proj-DODnZqEwrRUSeny4tvtFT3BlbkFJfy3ftqOpDbdky6kEu60q " diff --git a/test/Manifest.toml b/test/Manifest.toml deleted file mode 100644 index cb89c21a8..000000000 --- a/test/Manifest.toml +++ /dev/null @@ -1,449 +0,0 @@ -# This file is machine-generated - editing it directly is not advised - -julia_version = "1.10.6" -manifest_format = "2.0" -project_hash = "7f329ba0ad13fa85b7e1c60360c4a28941fe20c4" - -[[deps.AbstractTrees]] -git-tree-sha1 = "2d9c9a55f9c93e8887ad391fbae72f8ef55e1177" -uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" -version = "0.4.5" - -[[deps.Aqua]] -deps = ["Compat", "Pkg", "Test"] -git-tree-sha1 = "49b1d7a9870c87ba13dc63f8ccfcf578cb266f95" -uuid = "4c88cf16-eb10-579e-8560-4a9242c79595" -version = "0.8.9" - -[[deps.ArgCheck]] -git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4" -uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197" -version = "2.3.0" - -[[deps.ArgTools]] -uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" -version = "1.1.1" - -[[deps.Artifacts]] -uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" - -[[deps.Base64]] -uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" - -[[deps.BitFlags]] -git-tree-sha1 = "0691e34b3bb8be9307330f88d1a3c3f25466c24d" -uuid = "d1d4a3ce-64b1-5f1a-9ba4-7e7e69966f35" -version = "0.1.9" - -[[deps.CEnum]] -git-tree-sha1 = "389ad5c84de1ae7cf0e28e381131c98ea87d54fc" -uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82" -version = "0.5.0" - -[[deps.CodecZlib]] -deps = ["TranscodingStreams", "Zlib_jll"] -git-tree-sha1 = "bce6804e5e6044c6daab27bb533d1295e4a2e759" -uuid = "944b1d66-785c-5afd-91f1-9de20f533193" -version = "0.7.6" - -[[deps.Compat]] -deps = ["TOML", "UUIDs"] -git-tree-sha1 = "8ae8d32e09f0dcf42a36b90d4e17f5dd2e4c4215" -uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" -version = "4.16.0" - - [deps.Compat.extensions] - CompatLinearAlgebraExt = "LinearAlgebra" - - [deps.Compat.weakdeps] - Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" - LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" - -[[deps.ConcurrentUtilities]] -deps = ["Serialization", "Sockets"] -git-tree-sha1 = "ea32b83ca4fefa1768dc84e504cc0a94fb1ab8d1" -uuid = "f0e56b4a-5159-44fe-b623-3e5288b988bb" -version = "2.4.2" - -[[deps.DataDeps]] -deps = ["HTTP", "Libdl", "Reexport", "SHA", "Scratch", "p7zip_jll"] -git-tree-sha1 = "8ae085b71c462c2cb1cfedcb10c3c877ec6cf03f" -uuid = "124859b0-ceae-595e-8997-d05f6a7a8dfe" -version = "0.7.13" - -[[deps.DataStructures]] -deps = ["Compat", "InteractiveUtils", "OrderedCollections"] -git-tree-sha1 = "1d0a14036acb104d9e89698bd408f63ab58cdc82" -uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" -version = "0.18.20" - -[[deps.Dates]] -deps = ["Printf"] -uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" - -[[deps.DocStringExtensions]] -deps = ["LibGit2"] -git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d" -uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" -version = "0.9.3" - -[[deps.DoubleArrayTries]] -deps = ["OffsetArrays", "Preferences", "StringViews"] -git-tree-sha1 = "78dcacc06dbe5eef9c97a8ddbb9a3e9a8d9df7b7" -uuid = "abbaa0e5-f788-499c-92af-c35ff4258c82" -version = "0.1.1" - -[[deps.Downloads]] -deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] -uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" -version = "1.6.0" - -[[deps.ExceptionUnwrapping]] -deps = ["Test"] -git-tree-sha1 = "d36f682e590a83d63d1c7dbd287573764682d12a" -uuid = "460bff9d-24e4-43bc-9d9f-a8973cb893f4" -version = "0.1.11" - -[[deps.FileWatching]] -uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" - -[[deps.FlashRank]] -deps = ["DataDeps", "DoubleArrayTries", "JSON3", "ONNXRunTime", "StringViews", "Unicode", "WordTokenizers"] -git-tree-sha1 = "51eeaf22caadb6b5f919f5df59e1ef108d1e9984" -uuid = "22cc3f58-1757-4700-bb45-2032706e5a8d" -version = "0.4.1" - -[[deps.HTML_Entities]] -deps = ["StrTables"] -git-tree-sha1 = "c4144ed3bc5f67f595622ad03c0e39fa6c70ccc7" -uuid = "7693890a-d069-55fe-a829-b4a6d304f0ee" -version = "1.0.1" - -[[deps.HTTP]] -deps = ["Base64", "CodecZlib", "ConcurrentUtilities", "Dates", "ExceptionUnwrapping", "Logging", "LoggingExtras", "MbedTLS", "NetworkOptions", "OpenSSL", "PrecompileTools", "Random", "SimpleBufferStream", "Sockets", "URIs", "UUIDs"] -git-tree-sha1 = "ae350b8225575cc3ea385d4131c81594f86dfe4f" -uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3" -version = "1.10.12" - -[[deps.InteractiveUtils]] -deps = ["Markdown"] -uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" - -[[deps.JLLWrappers]] -deps = ["Artifacts", "Preferences"] -git-tree-sha1 = "be3dc50a92e5a386872a493a10050136d4703f9b" -uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" -version = "1.6.1" - -[[deps.JSON]] -deps = ["Dates", "Mmap", "Parsers", "Unicode"] -git-tree-sha1 = "31e996f0a15c7b280ba9f76636b3ff9e2ae58c9a" -uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" -version = "0.21.4" - -[[deps.JSON3]] -deps = ["Dates", "Mmap", "Parsers", "PrecompileTools", "StructTypes", "UUIDs"] -git-tree-sha1 = "1d322381ef7b087548321d3f878cb4c9bd8f8f9b" -uuid = "0f8b85d8-7281-11e9-16c2-39a750bddbf1" -version = "1.14.1" - - [deps.JSON3.extensions] - JSON3ArrowExt = ["ArrowTypes"] - - [deps.JSON3.weakdeps] - ArrowTypes = "31f734f8-188a-4ce0-8406-c8a06bd891cd" - -[[deps.Languages]] -deps = ["InteractiveUtils", "JSON", "RelocatableFolders"] -git-tree-sha1 = "0cf92ba8402f94c9f4db0ec156888ee8d299fcb8" -uuid = "8ef0a80b-9436-5d2c-a485-80b904378c43" -version = "0.4.6" - -[[deps.LazyArtifacts]] -deps = ["Artifacts", "Pkg"] -uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3" - -[[deps.LibCURL]] -deps = ["LibCURL_jll", "MozillaCACerts_jll"] -uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" -version = "0.6.4" - -[[deps.LibCURL_jll]] -deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] -uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" -version = "8.4.0+0" - -[[deps.LibGit2]] -deps = ["Base64", "LibGit2_jll", "NetworkOptions", "Printf", "SHA"] -uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" - -[[deps.LibGit2_jll]] -deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll"] -uuid = "e37daf67-58a4-590a-8e99-b0245dd2ffc5" -version = "1.6.4+0" - -[[deps.LibSSH2_jll]] -deps = ["Artifacts", "Libdl", "MbedTLS_jll"] -uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" -version = "1.11.0+1" - -[[deps.Libdl]] -uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" - -[[deps.Logging]] -uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" - -[[deps.LoggingExtras]] -deps = ["Dates", "Logging"] -git-tree-sha1 = "f02b56007b064fbfddb4c9cd60161b6dd0f40df3" -uuid = "e6f89c97-d47a-5376-807f-9c37f3926c36" -version = "1.1.0" - -[[deps.Markdown]] -deps = ["Base64"] -uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" - -[[deps.MbedTLS]] -deps = ["Dates", "MbedTLS_jll", "MozillaCACerts_jll", "NetworkOptions", "Random", "Sockets"] -git-tree-sha1 = "c067a280ddc25f196b5e7df3877c6b226d390aaf" -uuid = "739be429-bea8-5141-9913-cc70e7f3736d" -version = "1.1.9" - -[[deps.MbedTLS_jll]] -deps = ["Artifacts", "Libdl"] -uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" -version = "2.28.2+1" - -[[deps.Mmap]] -uuid = "a63ad114-7e13-5084-954f-fe012c677804" - -[[deps.MozillaCACerts_jll]] -uuid = "14a3606d-f60d-562e-9121-12d972cd8159" -version = "2023.1.10" - -[[deps.NetworkOptions]] -uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" -version = "1.2.0" - -[[deps.ONNXRunTime]] -deps = ["ArgCheck", "CEnum", "DataStructures", "DocStringExtensions", "LazyArtifacts", "Libdl", "Pkg"] -git-tree-sha1 = "25b0c81d59c40cfe21204d3b08d48147be73fbe1" -uuid = "e034b28e-924e-41b2-b98f-d2bbeb830c6a" -version = "1.2.0" - - [deps.ONNXRunTime.extensions] - CUDAExt = ["CUDA", "cuDNN"] - - [deps.ONNXRunTime.weakdeps] - CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" - cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd" - -[[deps.OffsetArrays]] -git-tree-sha1 = "1a27764e945a152f7ca7efa04de513d473e9542e" -uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" -version = "1.14.1" - - [deps.OffsetArrays.extensions] - OffsetArraysAdaptExt = "Adapt" - - [deps.OffsetArrays.weakdeps] - Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" - -[[deps.OpenAI]] -deps = ["Dates", "HTTP", "JSON3"] -git-tree-sha1 = "fb6a407f3707daf513c4b88f25536dd3dbf94220" -uuid = "e9f21f70-7185-4079-aca2-91159181367c" -version = "0.9.1" - -[[deps.OpenSSL]] -deps = ["BitFlags", "Dates", "MozillaCACerts_jll", "OpenSSL_jll", "Sockets"] -git-tree-sha1 = "38cb508d080d21dc1128f7fb04f20387ed4c0af4" -uuid = "4d8831e6-92b7-49fb-bdf8-b643e874388c" -version = "1.4.3" - -[[deps.OpenSSL_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl"] -git-tree-sha1 = "7493f61f55a6cce7325f197443aa80d32554ba10" -uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95" -version = "3.0.15+1" - -[[deps.OrderedCollections]] -git-tree-sha1 = "dfdf5519f235516220579f949664f1bf44e741c5" -uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" -version = "1.6.3" - -[[deps.Parsers]] -deps = ["Dates", "PrecompileTools", "UUIDs"] -git-tree-sha1 = "8489905bcdbcfac64d1daa51ca07c0d8f0283821" -uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "2.8.1" - -[[deps.Pkg]] -deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] -uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" -version = "1.10.0" - -[[deps.PrecompileTools]] -deps = ["Preferences"] -git-tree-sha1 = "5aa36f7049a63a1528fe8f7c3f2113413ffd4e1f" -uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a" -version = "1.2.1" - -[[deps.Preferences]] -deps = ["TOML"] -git-tree-sha1 = "9306f6085165d270f7e3db02af26a400d580f5c6" -uuid = "21216c6a-2e73-6563-6e65-726566657250" -version = "1.4.3" - -[[deps.Printf]] -deps = ["Unicode"] -uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" - -[[deps.PromptingTools]] -deps = ["AbstractTrees", "Base64", "Dates", "HTTP", "JSON3", "Logging", "OpenAI", "Pkg", "PrecompileTools", "Preferences", "REPL", "Random", "StreamCallbacks", "StructTypes", "Test"] -path = ".." -uuid = "670122d1-24a8-4d70-bfce-740807c42192" -version = "0.65.0" - - [deps.PromptingTools.extensions] - FlashRankPromptingToolsExt = ["FlashRank"] - GoogleGenAIPromptingToolsExt = ["GoogleGenAI"] - MarkdownPromptingToolsExt = ["Markdown"] - RAGToolsExperimentalExt = ["SparseArrays", "LinearAlgebra", "Unicode"] - SnowballPromptingToolsExt = ["Snowball"] - - [deps.PromptingTools.weakdeps] - FlashRank = "22cc3f58-1757-4700-bb45-2032706e5a8d" - GoogleGenAI = "903d41d1-eaca-47dd-943b-fee3930375ab" - LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" - Markdown = "d6f4376e-aef5-505a-96c1-9c027394607a" - Snowball = "fb8f903a-0164-4e73-9ffe-431110250c3b" - SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" - Unicode = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" - -[[deps.REPL]] -deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] -uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" - -[[deps.Random]] -deps = ["SHA"] -uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" - -[[deps.Reexport]] -git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b" -uuid = "189a3867-3050-52da-a836-e630ba90ab69" -version = "1.2.2" - -[[deps.RelocatableFolders]] -deps = ["SHA", "Scratch"] -git-tree-sha1 = "ffdaf70d81cf6ff22c2b6e733c900c3321cab864" -uuid = "05181044-ff0b-4ac5-8273-598c1e38db00" -version = "1.0.1" - -[[deps.SHA]] -uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" -version = "0.7.0" - -[[deps.Scratch]] -deps = ["Dates"] -git-tree-sha1 = "3bac05bc7e74a75fd9cba4295cde4045d9fe2386" -uuid = "6c6a2e73-6563-6170-7368-637461726353" -version = "1.2.1" - -[[deps.Serialization]] -uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" - -[[deps.SimpleBufferStream]] -git-tree-sha1 = "f305871d2f381d21527c770d4788c06c097c9bc1" -uuid = "777ac1f9-54b0-4bf8-805c-2214025038e7" -version = "1.2.0" - -[[deps.Snowball]] -deps = ["Languages", "Snowball_jll", "WordTokenizers"] -git-tree-sha1 = "8b466b16804ab8687f8d3a1b5312a0aa1b7d8b64" -uuid = "fb8f903a-0164-4e73-9ffe-431110250c3b" -version = "0.1.1" - -[[deps.Snowball_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "6ff3a185a583dca7265cbfcaae1da16aa3b6a962" -uuid = "88f46535-a3c0-54f4-998e-4320a1339f51" -version = "2.2.0+0" - -[[deps.Sockets]] -uuid = "6462fe0b-24de-5631-8697-dd941f90decc" - -[[deps.StrTables]] -deps = ["Dates"] -git-tree-sha1 = "5998faae8c6308acc25c25896562a1e66a3bb038" -uuid = "9700d1a9-a7c8-5760-9816-a99fda30bb8f" -version = "1.0.1" - -[[deps.StreamCallbacks]] -deps = ["HTTP", "JSON3", "PrecompileTools"] -git-tree-sha1 = "827180547dd10f4c018ccdbede9375c76dbdcafe" -uuid = "c1b9e933-98a0-46fc-8ea7-3b58b195fb0a" -version = "0.5.0" - -[[deps.StringViews]] -git-tree-sha1 = "ec4bf39f7d25db401bcab2f11d2929798c0578e5" -uuid = "354b36f9-a18e-4713-926e-db85100087ba" -version = "1.3.4" - -[[deps.StructTypes]] -deps = ["Dates", "UUIDs"] -git-tree-sha1 = "159331b30e94d7b11379037feeb9b690950cace8" -uuid = "856f2bd8-1eba-4b0a-8007-ebc267875bd4" -version = "1.11.0" - -[[deps.TOML]] -deps = ["Dates"] -uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" -version = "1.0.3" - -[[deps.Tar]] -deps = ["ArgTools", "SHA"] -uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" -version = "1.10.0" - -[[deps.Test]] -deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] -uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[[deps.TranscodingStreams]] -git-tree-sha1 = "0c45878dcfdcfa8480052b6ab162cdd138781742" -uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" -version = "0.11.3" - -[[deps.URIs]] -git-tree-sha1 = "67db6cc7b3821e19ebe75791a9dd19c9b1188f2b" -uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4" -version = "1.5.1" - -[[deps.UUIDs]] -deps = ["Random", "SHA"] -uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" - -[[deps.Unicode]] -uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" - -[[deps.WordTokenizers]] -deps = ["DataDeps", "HTML_Entities", "StrTables", "Unicode"] -git-tree-sha1 = "01dd4068c638da2431269f49a5964bf42ff6c9d2" -uuid = "796a5d58-b03d-544a-977e-18100b691f6e" -version = "0.5.6" - -[[deps.Zlib_jll]] -deps = ["Libdl"] -uuid = "83775a58-1f1d-513f-b197-d71354ab007a" -version = "1.2.13+1" - -[[deps.nghttp2_jll]] -deps = ["Artifacts", "Libdl"] -uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" -version = "1.52.0+1" - -[[deps.p7zip_jll]] -deps = ["Artifacts", "Libdl"] -uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" -version = "17.4.0+2" diff --git a/test/Project.toml b/test/Project.toml deleted file mode 100644 index 8db7b268c..000000000 --- a/test/Project.toml +++ /dev/null @@ -1,8 +0,0 @@ -[deps] -PromptingTools = "670122d1-24a8-4d70-bfce-740807c42192" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -HTTP = "cd3eb016-35fb-5094-929b-558a96fad6f3" -JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1" -Snowball = "fb8f903a-0164-4e73-9ffe-431110250c3b" -FlashRank = "22cc3f58-1757-4700-bb45-2032706e5a8d" -Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" diff --git a/test/annotation_messages_render.jl b/test/annotation.jl similarity index 55% rename from test/annotation_messages_render.jl rename to test/annotation.jl index bc96f85cb..5b4e10a4f 100644 --- a/test/annotation_messages_render.jl +++ b/test/annotation.jl @@ -1,6 +1,6 @@ -using Test -using PromptingTools -using PromptingTools: OpenAISchema, AnthropicSchema, OllamaSchema, GoogleSchema, TestEchoOpenAISchema, render +using PromptingTools: isabstractannotationmessage, annotate!, pprint +using PromptingTools: OpenAISchema, AnthropicSchema, OllamaSchema, GoogleSchema, TestEchoOpenAISchema, render, NoSchema +using PromptingTools: AnnotationMessage, SystemMessage, TracerMessage,UserMessage, AIMessage @testset "Annotation Message Rendering" begin # Create a mix of messages including annotation messages @@ -12,21 +12,9 @@ using PromptingTools: OpenAISchema, AnthropicSchema, OllamaSchema, GoogleSchema, AIMessage("Hi there!") # No status needed for basic message ] - # Additional edge cases - messages_complex = [ - AnnotationMessage("Metadata 1", extras=Dict{Symbol,Any}(:key => "value")), - AnnotationMessage("Metadata 2", extras=Dict{Symbol,Any}(:key2 => "value2")), - SystemMessage("Be helpful"), - AnnotationMessage("Metadata 3", tags=[:important]), - UserMessage("Hello"), - AnnotationMessage("Metadata 4", comment="For debugging"), - AIMessage("Hi there!"), - AnnotationMessage("Metadata 5", extras=Dict{Symbol,Any}(:key3 => "value3")) - ] - @testset "Basic Message Filtering" begin # Test OpenAI Schema with TestEcho - schema = TestEchoOpenAISchema( + schema = TestEchoOpenAISchema(; response=Dict( "choices" => [Dict("message" => Dict("content" => "Test response", "role" => "assistant"), "index" => 0, "finish_reason" => "stop")], "usage" => Dict("prompt_tokens" => 10, "completion_tokens" => 20, "total_tokens" => 30), @@ -62,33 +50,59 @@ using PromptingTools: OpenAISchema, AnthropicSchema, OllamaSchema, GoogleSchema, @test all(msg[:role] in ["user", "model"] for msg in rendered) # Google uses "model" instead of "assistant" @test !any(msg -> any(part -> contains(part["text"], "metadata"), msg[:parts]), rendered) end +end - @testset "Complex Edge Cases" begin - # Test with multiple consecutive annotation messages - for schema in [TestEchoOpenAISchema(), AnthropicSchema(), OllamaSchema(), GoogleSchema()] - rendered = render(schema, messages_complex) - if schema isa AnthropicSchema - @test length(rendered.conversation) == 2 # user and AI only - @test !isnothing(rendered.system) # system preserved - else - @test length(rendered) == (schema isa GoogleSchema ? 2 : 3) # Google schema combines system with user message - end +@testset "AnnotationMessage" begin + # Test creation and basic properties + annotation = AnnotationMessage( + content="Test annotation", + extras=Dict{Symbol,Any}(:key => "value"), + tags=[:debug, :test], + comment="Test comment" + ) + @test annotation.content == "Test annotation" + @test annotation.extras[:key] == "value" + @test :debug in annotation.tags + @test annotation.comment == "Test comment" + @test isabstractannotationmessage(annotation) + @test !isabstractannotationmessage(UserMessage("test")) - # Test no metadata leaks through - for i in 1:5 - if schema isa GoogleSchema - # Google schema uses a different structure - @test !any(msg -> any(part -> contains(part["text"], "Metadata $i"), msg[:parts]), rendered) - elseif schema isa AnthropicSchema - # Check each message's content array for metadata - @test !any(msg -> any(content -> contains(content["text"], "Metadata $i"), msg["content"]), rendered.conversation) - @test !contains(rendered.system, "Metadata $i") - else - # OpenAI and Ollama schemas - @test !any(msg -> contains(msg["content"], "Metadata $i"), rendered) - end - end - end - end + # Test that annotations are filtered out during rendering + messages = [ + SystemMessage("System prompt"), + UserMessage("User message"), + AnnotationMessage(content="Debug info", comment="Debug note"), + AIMessage("AI response") + ] + + # Create a basic schema for testing + schema = NoSchema() + rendered = render(schema, messages) + + # Verify annotation message is not in rendered output + @test length(rendered) == 3 # Only system, user, and AI messages + @test all(!isabstractannotationmessage, rendered) + + # Test annotate! utility + msgs = [UserMessage("Hello"), AIMessage("Hi")] + msgs=annotate!(msgs, "Debug info", tags=[:debug]) + @test length(msgs) == 3 + @test isabstractannotationmessage(msgs[1]) + @test msgs[1].tags == [:debug] + + # Test single message annotation + msg = UserMessage("Test") + result = annotate!(msg, "Annotation", comment="Note") + @test length(result) == 2 + @test isabstractannotationmessage(result[1]) + @test result[1].comment == "Note" + + # Test pretty printing + io = IOBuffer() + pprint(io, annotation) + output = String(take!(io)) + @test contains(output, "Test annotation") + @test contains(output, "debug") + @test contains(output, "Test comment") end diff --git a/test/annotation_messages.jl b/test/annotation_messages.jl deleted file mode 100644 index d2c399def..000000000 --- a/test/annotation_messages.jl +++ /dev/null @@ -1,61 +0,0 @@ -using Test -using PromptingTools -using PromptingTools: isabstractannotationmessage - -@testset "AnnotationMessage" begin - # Test creation and basic properties - annotation = AnnotationMessage( - content="Test annotation", - extras=Dict{Symbol,Any}(:key => "value"), - tags=[:debug, :test], - comment="Test comment" - ) - @test annotation.content == "Test annotation" - @test annotation.extras[:key] == "value" - @test :debug in annotation.tags - @test annotation.comment == "Test comment" - @test isabstractannotationmessage(annotation) - @test !isabstractannotationmessage(UserMessage("test")) - - # Test that annotations are filtered out during rendering - messages = [ - SystemMessage("System prompt"), - UserMessage("User message"), - AnnotationMessage(content="Debug info", comment="Debug note"), - AIMessage("AI response") - ] - - # Create a basic schema for testing - schema = NoSchema() - rendered = render(schema, messages) - - # Verify annotation message is not in rendered output - @test length(rendered) == 3 # Only system, user, and AI messages - @test all(!isabstractannotationmessage, rendered) - - # Test annotate! utility - msgs = [UserMessage("Hello"), AIMessage("Hi")] - annotate!(msgs, "Debug info", tags=[:debug]) - @test length(msgs) == 3 - @test isabstractannotationmessage(msgs[1]) - @test msgs[1].tags == [:debug] - - # Test single message annotation - msg = UserMessage("Test") - result = annotate!(msg, "Annotation", comment="Note") - @test length(result) == 2 - @test isabstractannotationmessage(result[1]) - @test result[1].comment == "Note" - - # Test tracer message handling - tracer_msg = TracerMessage(annotation) - @test isabstractannotationmessage(tracer_msg) - - # Test pretty printing - io = IOBuffer() - pprint(io, annotation) - output = String(take!(io)) - @test contains(output, "Test annotation") - @test contains(output, "debug") - @test contains(output, "Test comment") -end diff --git a/test/llm_shared.jl b/test/llm_shared.jl index b5f78de55..1688341de 100644 --- a/test/llm_shared.jl +++ b/test/llm_shared.jl @@ -26,9 +26,6 @@ using PromptingTools: finalize_outputs, role4render UserMessage(; content = "Hello, my name is John", variables = [:name], - name = nothing, - run_id = nothing, - cost = nothing, _type = :usermessage) ] conversation = render(schema, @@ -95,7 +92,7 @@ using PromptingTools: finalize_outputs, role4render SystemMessage("System message 1"), UserMessage("Hello {{name}}"), AIMessage("Hi there"), - UserMessage("How are you, John?", [:name], nothing, nothing, nothing, :usermessage), + UserMessage("How are you, John?", [:name], nothing, :usermessage), AIMessage("I'm doing well, thank you!") ] conversation = render(schema, messages; conversation, name = "John") @@ -129,7 +126,7 @@ using PromptingTools: finalize_outputs, role4render UserMessage("How are you?") ] expected_output = [ - SystemMessage("Hello, !"; run_id=nothing), + SystemMessage("Hello, !", [:name], :systemmessage), UserMessage("How are you?") ] conversation = render(schema, messages) @@ -311,7 +308,7 @@ end SystemMessage("System message 1"), UserMessage("User message {{name}}"), AIMessage("AI message"), - UserMessage("User message John", [:name], nothing, nothing, nothing, :usermessage), + UserMessage("User message John", [:name], nothing, :usermessage), AIMessage("AI message 2"), msg ] @@ -338,7 +335,7 @@ end SystemMessage("System message 1"), UserMessage("User message {{name}}"), AIMessage("AI message"), - UserMessage("User message John", [:name], nothing, nothing, nothing, :usermessage), + UserMessage("User message John", [:name], nothing, :usermessage), AIMessage("AI message 2"), msg, msg diff --git a/test/memory.jl b/test/memory.jl index 0bc804471..43c24bcfc 100644 --- a/test/memory.jl +++ b/test/memory.jl @@ -1,164 +1,170 @@ -using Test -using PromptingTools using PromptingTools: SystemMessage, UserMessage, AIMessage, AbstractMessage using PromptingTools: TestEchoOpenAISchema, ConversationMemory -using PromptingTools: issystemmessage, isusermessage, isaimessage, last_message, last_output, register_model! +using PromptingTools: issystemmessage, isusermessage, isaimessage, last_message, + last_output, register_model!, batch_start_index using HTTP, JSON3 -const TEST_RESPONSE = Dict( - "model" => "gpt-3.5-turbo", - "choices" => [Dict("message" => Dict("role" => "assistant", "content" => "Echo response"))], - "usage" => Dict("total_tokens" => 3, "prompt_tokens" => 2, "completion_tokens" => 1), - "id" => "chatcmpl-123", - "object" => "chat.completion", - "created" => Int(floor(time())) -) - -@testset "ConversationMemory" begin - # Setup test schema for all tests - response = Dict( - "model" => "gpt-3.5-turbo", - "choices" => [Dict("message" => Dict("role" => "assistant", "content" => "Echo response"))], - "usage" => Dict("total_tokens" => 3, "prompt_tokens" => 2, "completion_tokens" => 1), - "id" => "chatcmpl-123", - "object" => "chat.completion", - "created" => Int(floor(time())) - ) - - # Register test model - register_model!(; - name = "memory-echo", - schema = TestEchoOpenAISchema(; response=response), - cost_of_token_prompt = 0.0, - cost_of_token_generation = 0.0, - description = "Test echo model for memory tests" - ) +@testset "batch_start_index" begin + # Test basic batch calculation + @test batch_start_index(30, 10, 10) == 30 # Last batch of size 10 + @test batch_start_index(31, 10, 10) == 30 # Last batch of size 10 + @test batch_start_index(32, 10, 10) == 30 # Last batch of size 10 + @test batch_start_index(30, 20, 10) == 20 # Middle batch + @test batch_start_index(31, 20, 10) == 20 # Middle batch + @test batch_start_index(32, 20, 10) == 20 # Middle batch + @test batch_start_index(30, 30, 10) == 10 + @test batch_start_index(31, 30, 10) == 10 + @test batch_start_index(32, 30, 10) == 10 + + # Test edge cases + @test batch_start_index(10, 10, 5) == 5 # Last batch with exact fit + @test batch_start_index(11, 10, 5) == 5 # Last batch with exact fit + @test batch_start_index(12, 10, 5) == 5 # Last batch with exact fit + @test batch_start_index(13, 10, 5) == 5 # Last batch with exact fit + @test batch_start_index(14, 10, 5) == 5 # Last batch with exact fit + @test batch_start_index(15, 10, 5) == 10 + + # Test minimum bound + @test batch_start_index(5, 10, 10) == 1 # Should not go below 1 + + @test_throws AssertionError batch_start_index(3, 5, 10) +end + +# @testset "ConversationMemory" begin +@testset "ConversationMemory-type" begin # Test constructor and empty initialization mem = ConversationMemory() @test length(mem) == 0 @test isempty(mem.conversation) - # Test show method - io = IOBuffer() - show(io, mem) - @test String(take!(io)) == "ConversationMemory(0 messages)" - - # Test push! and length - push!(mem, SystemMessage("System prompt")) - @test length(mem) == 0 # System messages don't count in length - push!(mem, UserMessage("Hello")) - @test length(mem) == 1 - push!(mem, AIMessage("Hi there")) - @test length(mem) == 2 - - # Test last_message and last_output - @test last_message(mem).content == "Hi there" - @test last_output(mem).content == "Hi there" - - # Test with non-AI last message - push!(mem, UserMessage("How are you?")) - @test last_message(mem).content == "How are you?" - @test last_output(mem).content == "Hi there" # Still returns last AI message - - @testset "Message Retrieval" begin - mem = ConversationMemory() - - # Add test messages - push!(mem, SystemMessage("System prompt")) - push!(mem, UserMessage("First user")) - for i in 1:15 - push!(mem, AIMessage("AI message $i")) - push!(mem, UserMessage("User message $i")) - end - - # Test get_last without batch_size - recent = get_last(mem, 5) - @test length(recent) == 7 # 5 + system + first user - @test recent[1].content == "System prompt" - @test recent[2].content == "First user" - - # Test get_last with batch_size=10 - recent = get_last(mem, 20; batch_size=10) - @test 11 <= length(recent) <= 20 # Should be between 11-20 messages - @test recent[1].content == "System prompt" - @test recent[2].content == "First user" - - # Test get_last with explanation - recent = get_last(mem, 5; explain=true) - @test contains(recent[3].content, "For efficiency reasons") - - # Test get_last with verbose - mktemp() do path, io - redirect_stdout(io) do - get_last(mem, 5; verbose=true) - end - seekstart(io) - output = read(io, String) - @test contains(output, "Total messages:") - @test contains(output, "Keeping:") - end - end - - @testset "Message Deduplication" begin - mem = ConversationMemory() - - # Test append! with empty memory - msgs = [ - SystemMessage("System prompt"), - UserMessage("User 1"), - AIMessage("AI 1") - ] - append!(mem, msgs) - @test length(mem) == 2 # excluding system message - - # Test append! with run_id based deduplication - msgs_with_ids = [ - SystemMessage("System prompt"; run_id=1), - UserMessage("User 2"; run_id=2), - AIMessage("AI 2"; run_id=2) - ] - append!(mem, msgs_with_ids) - @test length(mem) == 4 # Should add new messages with higher run_id - - # Test append! with overlapping messages - msgs_overlap = [ - UserMessage("User 2"; run_id=1), # Old run_id, should be ignored - AIMessage("AI 2"; run_id=1), # Old run_id, should be ignored - UserMessage("User 3"; run_id=3), # New run_id, should be added - AIMessage("AI 3"; run_id=3) # New run_id, should be added - ] - append!(mem, msgs_overlap) - @test length(mem) == 6 # Should only add the new messages - end - - @testset "Generation Interface" begin - # Setup mock response - response = Dict( - "choices" => [Dict("message" => Dict("content" => "Test response"), "finish_reason" => "stop")], - "usage" => Dict("total_tokens" => 3, "prompt_tokens" => 2, "completion_tokens" => 1) - ) - schema = TestEchoOpenAISchema(; response=response, status=200) - OLD_PROMPT_SCHEMA = PromptingTools.PROMPT_SCHEMA - PromptingTools.PROMPT_SCHEMA = schema - - mem = ConversationMemory() - push!(mem, SystemMessage("You are a helpful assistant")) - result = mem("Hello!"; model="memory-echo") - @test result.content == "Echo response" - @test length(mem) == 2 # User message + AI response - - # Test functor interface with history truncation - for i in 1:5 - result = mem("Message $i"; model="memory-echo") - end - result = mem("Final message"; last=3, model="memory-echo") - @test length(get_last(mem, 3)) == 5 # 3 messages + system + first user - - # Test aigenerate method integration - result = aigenerate(mem, "Direct generation"; model="memory-echo") - @test result.content == "Echo response" - @test length(mem) == 14 # Previous messages + new exchange - end + # Test show method + io = IOBuffer() + show(io, mem) + @test String(take!(io)) == "ConversationMemory(0 messages)" + + # Test push! and length + push!(mem, SystemMessage("System prompt")) + show(io, mem) + @test String(take!(io)) == "ConversationMemory(0 messages)" # don't count system messages + @test length(mem) == 1 + push!(mem, UserMessage("Hello")) + @test length(mem) == 2 + push!(mem, AIMessage("Hi there")) + @test length(mem) == 3 + + # Test last_message and last_output + @test last_message(mem).content == "Hi there" + @test last_output(mem) == "Hi there" + + # Test with non-AI last message + push!(mem, UserMessage("How are you?")) + @test last_message(mem).content == "How are you?" + @test last_output(mem) == "How are you?" +end + +@testset "get_last" begin + mem = ConversationMemory() + + # Add test messages + push!(mem, SystemMessage("System prompt")) + push!(mem, UserMessage("First user")) + for i in 1:15 + push!(mem, AIMessage("AI message $i")) + push!(mem, UserMessage("User message $i")) + end + + # Test get_last without batch_size + recent = get_last(mem, 5) + @test length(recent) == 4 # 5 + system + first user + @test recent[1].content == "System prompt" + @test recent[2].content == "First user" + + # Test get_last with batch_size=10 + recent = get_last(mem, 20; batch_size = 10) + # @test 11 <= length(recent) <= 20 # Should be between 11-20 messages + @test length(recent) == 14 + @test recent[1].content == "System prompt" + @test recent[2].content == "First user" + recent = get_last(mem, 14; batch_size = 10) + @test length(recent) == 14 + # @test 11 <= length(recent) <= 14 # Should be between 11-20 messages + @test recent[1].content == "System prompt" + @test recent[2].content == "First user" + + # Test get_last with explanation + recent = get_last(mem, 5; explain = true) + @test startswith(recent[3].content, "[This is an automatically added explanation") + + # Test get_last with verbose + @test_logs (:info, r"truncated to 4/32") get_last(mem, 5; verbose = true) +end + +@testset "ConversationMemory-append!" begin + mem = ConversationMemory() + + # Test append! with empty memory + msgs = [ + SystemMessage("System prompt"), + UserMessage("User 1"), + AIMessage("AI 1"; run_id = 1) + ] + append!(mem, msgs) + @test length(mem) == 3 + + # Run again, changes nothing + append!(mem, msgs) + @test length(mem) == 3 + + # Test append! with run_id based deduplication + msgs = [ + SystemMessage("System prompt"), + UserMessage("User 1"), + AIMessage("AI 1"; run_id = 1), + UserMessage("User 2"), + AIMessage("AI 2"; run_id = 2) + ] + append!(mem, msgs) + @test length(mem) == 5 + + # Test append! with overlapping messages + msgs_overlap = [ + SystemMessage("System prompt 2"), + UserMessage("User 3"), + AIMessage("AI 3"; run_id = 3), + UserMessage("User 4"), + AIMessage("AI 4"; run_id = 4) + ] + append!(mem, msgs_overlap) + @test length(mem) == 10 +end + +@testset "ConversationMemory-aigenerate" begin + # Setup mock response + response = Dict( + :choices => [ + Dict(:message => Dict(:content => "Hello World!"), + :finish_reason => "stop") + ], + :usage => Dict(:total_tokens => 3, :prompt_tokens => 2, :completion_tokens => 1)) + + schema = TestEchoOpenAISchema(; response = response, status = 200) + register_model!(; name = "memory-echo", schema) + + mem = ConversationMemory() + push!(mem, SystemMessage("You are a helpful assistant")) + result = mem("Hello!"; model = "memory-echo") + @test result.content == "Hello World!" + @test length(mem) == 3 + + # Test functor interface with history truncation + for i in 1:5 + result = mem("Message $i"; model = "memory-echo") end + result = mem("Final message"; last = 3, model = "memory-echo") + @test length(mem) == 15 # 5x2 + final x2 + 3 + + # Test aigenerate method integration + result = aigenerate(mem; model = "memory-echo") + @test result.content == "Hello World!" end diff --git a/test/memory_basic.jl b/test/memory_basic.jl deleted file mode 100644 index 3924584cb..000000000 --- a/test/memory_basic.jl +++ /dev/null @@ -1,67 +0,0 @@ -using Test -using PromptingTools -using PromptingTools: SystemMessage, UserMessage, AIMessage, AbstractMessage, ConversationMemory -using PromptingTools: issystemmessage, isusermessage, isaimessage, TestEchoOpenAISchema -using PromptingTools: last_message, last_output - -let - @testset "ConversationMemory Basic Operations" begin - # Single basic test - mem = ConversationMemory() - @test length(mem.conversation) == 0 - - # Test single push - push!(mem, SystemMessage("Test")) - @test length(mem.conversation) == 1 - end - - @testset "ConversationMemory with AI Generation" begin - OLD_PROMPT_SCHEMA = PromptingTools.PROMPT_SCHEMA - - # Setup mock response - response = Dict( - :choices => [Dict(:message => Dict(:content => "Hello!"), :finish_reason => "stop")], - :usage => Dict(:total_tokens => 3, :prompt_tokens => 2, :completion_tokens => 1) - ) - schema = TestEchoOpenAISchema(; response, status=200) - PromptingTools.PROMPT_SCHEMA = schema - - # Test memory with AI generation - mem = ConversationMemory() - push!(mem, SystemMessage("You are a helpful assistant")) - result = mem("Hello!"; model="test-model") - - @test length(mem.conversation) == 3 # system + user + ai - @test last_message(mem).content == "Hello!" - @test isaimessage(last_message(mem)) - - # Restore schema - PromptingTools.PROMPT_SCHEMA = OLD_PROMPT_SCHEMA - end - - @testset "ConversationMemory Advanced Features" begin - # Test batch size handling - mem = ConversationMemory() - - # Add multiple messages - push!(mem, SystemMessage("System prompt")) - for i in 1:15 - push!(mem, UserMessage("User message $i")) - push!(mem, AIMessage("AI response $i")) - end - - # Test batch size truncation - recent = get_last(mem, 10; batch_size=5) - @test length(recent) == 11 # system + first user + last 9 messages - @test issystemmessage(recent[1]) - @test isusermessage(recent[2]) - - # Test explanation message - recent_explained = get_last(mem, 10; batch_size=5, explain=true) - @test length(recent_explained) == 11 - @test occursin("truncated", first(filter(isaimessage, recent_explained)).content) - - # Test verbose output - @test_nowarn get_last(mem, 10; batch_size=5, verbose=true) - end -end diff --git a/test/memory_batch.jl b/test/memory_batch.jl deleted file mode 100644 index 5ddb16d9d..000000000 --- a/test/memory_batch.jl +++ /dev/null @@ -1,47 +0,0 @@ -using Test -using PromptingTools -using PromptingTools: SystemMessage, UserMessage, AIMessage, AbstractMessage, ConversationMemory -using PromptingTools: issystemmessage, isusermessage, isaimessage -using Test: @capture_out - -@testset "ConversationMemory Batch Tests" begin - mem = ConversationMemory() - - # Add test messages - push!(mem, SystemMessage("System")) - push!(mem, UserMessage("First User")) - for i in 1:5 - push!(mem, UserMessage("User $i")) - push!(mem, AIMessage("AI $i")) - end - - # Test basic batch size - result = get_last(mem, 6; batch_size=2) - @test length(result) == 6 # system + first_user + 2 complete pairs - @test issystemmessage(result[1]) - @test isusermessage(result[2]) - - # Test explanation - result_explained = get_last(mem, 6; batch_size=2, explain=true) - @test length(result_explained) == 6 - @test any(msg -> occursin("truncated", msg.content), result_explained) - - # Test verbose output - output = @capture_out begin - get_last(mem, 6; batch_size=2, verbose=true) - end - @test contains(output, "Total messages:") - @test contains(output, "Keeping:") - @test contains(output, "Required messages:") - - # Test larger batch size - result_large = get_last(mem, 8; batch_size=4) - @test length(result_large) == 8 - @test issystemmessage(result_large[1]) - @test isusermessage(result_large[2]) - - # Test with no batch size - result_no_batch = get_last(mem, 4) - @test length(result_no_batch) == 4 - @test issystemmessage(result_no_batch[1]) -end diff --git a/test/memory_core.jl b/test/memory_core.jl deleted file mode 100644 index 14d5ef0b2..000000000 --- a/test/memory_core.jl +++ /dev/null @@ -1,45 +0,0 @@ -using Test -using PromptingTools -using PromptingTools: SystemMessage, UserMessage, AIMessage, AbstractMessage -using PromptingTools: issystemmessage, isusermessage, isaimessage - -@testset "ConversationMemory Core" begin - # Test constructor - mem = ConversationMemory() - @test length(mem.conversation) == 0 - - # Test push! - push!(mem, SystemMessage("System")) - @test length(mem.conversation) == 1 - @test issystemmessage(mem.conversation[1]) - - # Test append! - msgs = [UserMessage("User1"), AIMessage("AI1")] - append!(mem, msgs) - @test length(mem.conversation) == 3 - - # Test get_last basic functionality - result = get_last(mem, 2) - @test length(result) == 3 # system + requested 2 - @test issystemmessage(result[1]) - @test result[end].content == "AI1" - - # Test show method - mem_show = ConversationMemory() - push!(mem_show, SystemMessage("System")) - push!(mem_show, UserMessage("User1")) - @test sprint(show, mem_show) == "ConversationMemory(1 messages)" # system messages not counted - - # Test length (excluding system messages) - mem_len = ConversationMemory() - push!(mem_len, SystemMessage("System")) - @test length(mem_len) == 0 # system message not counted - push!(mem_len, UserMessage("User1")) - @test length(mem_len) == 1 - push!(mem_len, AIMessage("AI1")) - @test length(mem_len) == 2 - - # Test empty memory - empty_mem = ConversationMemory() - @test isempty(get_last(empty_mem)) -end diff --git a/test/memory_dedup.jl b/test/memory_dedup.jl deleted file mode 100644 index 2d017b7b0..000000000 --- a/test/memory_dedup.jl +++ /dev/null @@ -1,75 +0,0 @@ -using Test -using PromptingTools -using PromptingTools: SystemMessage, UserMessage, AIMessage, AbstractMessage, ConversationMemory -using PromptingTools: issystemmessage, isusermessage, isaimessage, TestEchoOpenAISchema -using PromptingTools: last_message, last_output - -@testset "ConversationMemory Deduplication" begin - # Test run_id based deduplication - mem = ConversationMemory() - - # Create messages with run_ids - msgs1 = [ - SystemMessage("System", run_id=1), - UserMessage("User1", run_id=1), - AIMessage("AI1", run_id=1) - ] - - msgs2 = [ - UserMessage("User2", run_id=2), - AIMessage("AI2", run_id=2) - ] - - # Test initial append - append!(mem, msgs1) - @test length(mem.conversation) == 3 - - # Test appending newer messages - append!(mem, msgs2) - @test length(mem.conversation) == 5 - - # Test appending older messages (should not append) - append!(mem, msgs1) - @test length(mem.conversation) == 5 - - # Test mixed run_ids (should only append newer ones) - mixed_msgs = [ - UserMessage("Old", run_id=1), - UserMessage("New", run_id=3), - AIMessage("Response", run_id=3) - ] - append!(mem, mixed_msgs) - @test length(mem.conversation) == 7 -end - -@testset "ConversationMemory AIGenerate Integration" begin - OLD_PROMPT_SCHEMA = PromptingTools.PROMPT_SCHEMA - - # Setup mock response - response = Dict( - :choices => [Dict(:message => Dict(:content => "Test response"), :finish_reason => "stop")], - :usage => Dict(:total_tokens => 3, :prompt_tokens => 2, :completion_tokens => 1) - ) - schema = TestEchoOpenAISchema(; response, status=200) - PromptingTools.PROMPT_SCHEMA = schema - - mem = ConversationMemory() - - # Test direct aigenerate integration - result = aigenerate(mem, "Test prompt"; model="test-model") - @test result.content == "Test response" - - # Test functor interface with history truncation - push!(mem, SystemMessage("System")) - for i in 1:5 - push!(mem, UserMessage("User$i")) - push!(mem, AIMessage("AI$i")) - end - - result = mem("Final prompt"; last=3, model="test-model") - @test result.content == "Test response" - @test length(get_last(mem, 3)) == 4 # system + last 3 - - # Restore schema - PromptingTools.PROMPT_SCHEMA = OLD_PROMPT_SCHEMA -end diff --git a/test/memory_minimal.jl b/test/memory_minimal.jl deleted file mode 100644 index 38e807654..000000000 --- a/test/memory_minimal.jl +++ /dev/null @@ -1,27 +0,0 @@ -using Test -using PromptingTools -using PromptingTools: SystemMessage, UserMessage, AIMessage, AbstractMessage, ConversationMemory -using PromptingTools: issystemmessage, isusermessage, isaimessage - -@testset "ConversationMemory Basic" begin - # Test constructor only - mem = ConversationMemory() - @test isa(mem, ConversationMemory) - @test isempty(mem.conversation) - - # Test push! with system message - push!(mem, SystemMessage("Test system")) - @test length(mem.conversation) == 1 - @test issystemmessage(mem.conversation[1]) - - # Test push! with user message - push!(mem, UserMessage("Test user")) - @test length(mem.conversation) == 2 - @test isusermessage(mem.conversation[2]) - - # Test get_last basic functionality - recent = get_last(mem, 2) - @test length(recent) == 2 - @test recent[1].content == "Test system" - @test recent[2].content == "Test user" -end diff --git a/test/messages.jl b/test/messages.jl index 36373fa74..2ece68c37 100644 --- a/test/messages.jl +++ b/test/messages.jl @@ -1,16 +1,13 @@ using PromptingTools: AIMessage, SystemMessage, MetadataMessage, AbstractMessage using PromptingTools: UserMessage, UserMessageWithImages, DataMessage, AIToolRequest, - ToolMessage, AnnotationMessage + ToolMessage using PromptingTools: _encode_local_image, attach_images_to_user_message, last_message, last_output, tool_calls using PromptingTools: isusermessage, issystemmessage, isdatamessage, isaimessage, - istracermessage, isaitoolrequest, istoolmessage, isabstractannotationmessage + istracermessage, isaitoolrequest, istoolmessage using PromptingTools: TracerMessageLike, TracerMessage, align_tracer!, unwrap, AbstractTracerMessage, AbstractTracer, pprint -using PromptingTools: TracerSchema, SaverSchema, TestEchoOpenAISchema, render - -# Include the detailed annotation message tests -include("test_annotation_messages.jl") +using PromptingTools: TracerSchema, SaverSchema @testset "Message constructors" begin # Creates an instance of MSG with the given content string. @@ -30,6 +27,7 @@ include("test_annotation_messages.jl") @test_throws AssertionError UserMessage(content) @test_throws AssertionError UserMessage(; content) @test_throws AssertionError SystemMessage(content) + @test_throws AssertionError SystemMessage(; content) @test_throws AssertionError UserMessageWithImages(; content, image_url = ["a"]) # Check methods @@ -42,14 +40,6 @@ include("test_annotation_messages.jl") @test UserMessage(content) != AIMessage(content) @test AIToolRequest() |> isaitoolrequest @test ToolMessage(; tool_call_id = "x", raw = "") |> istoolmessage - - # Test AnnotationMessage - annotation = AnnotationMessage(content="Debug info", comment="Test annotation") - @test isabstractannotationmessage(annotation) - @test !isabstractannotationmessage(UserMessage("test")) - @test annotation.content == "Debug info" - @test annotation.comment == "Test annotation" - ## check handling other types @test isusermessage(1) == false @test issystemmessage(nothing) == false @@ -190,17 +180,6 @@ end @test occursin("User Message", output) @test occursin("User input with image", output) - # AnnotationMessage - take!(io) - m = AnnotationMessage("Debug info", comment="Test annotation") - show(io, MIME("text/plain"), m) - @test occursin("AnnotationMessage(\"Debug info\")", String(take!(io))) - pprint(io, m) - output = String(take!(io)) - @test occursin("Annotation Message", output) - @test occursin("Debug info", output) - @test occursin("Test annotation", output) - # MetadataMessage take!(io) m = MetadataMessage("Metadata info") @@ -372,28 +351,3 @@ end @test occursin("TracerMessageLike with:", pprint_output) @test occursin("Test Message", pprint_output) end - -@testset "AnnotationMessage rendering" begin - # Test that annotations are filtered out during rendering - messages = [ - SystemMessage("System prompt"), - UserMessage("User message"), - AnnotationMessage(content="Debug info", comment="Debug note"), - AIMessage("AI response") - ] - - # Create a basic schema for testing - schema = TestEchoOpenAISchema() - rendered = render(schema, messages) - - # Verify annotation message is not in rendered output - @test !contains(rendered, "Debug info") - @test !contains(rendered, "Debug note") - - # Test single message rendering - annotation = AnnotationMessage("Debug info", comment="Debug") - @test render(schema, annotation) === nothing - - # Test that other messages still render normally - @test !isnothing(render(schema, UserMessage("Test"))) -end diff --git a/test/messages_utils.jl b/test/messages_utils.jl deleted file mode 100644 index 2e9283f57..000000000 --- a/test/messages_utils.jl +++ /dev/null @@ -1,49 +0,0 @@ -using Test -using PromptingTools -using PromptingTools: SystemMessage, UserMessage, AIMessage, AbstractMessage -using PromptingTools: last_message, last_output - -@testset "Message Utilities" begin - @testset "last_message" begin - # Test empty vector - @test last_message(AbstractMessage[]) === nothing - - # Test single message - msgs = [UserMessage("Hello")] - @test last_message(msgs).content == "Hello" - - # Test multiple messages - msgs = [ - SystemMessage("System"), - UserMessage("User"), - AIMessage("AI") - ] - @test last_message(msgs).content == "AI" - end - - @testset "last_output" begin - # Test empty vector - @test last_output(AbstractMessage[]) === nothing - - # Test no AI messages - msgs = [ - SystemMessage("System"), - UserMessage("User") - ] - @test last_output(msgs) === nothing - - # Test with AI messages - msgs = [ - SystemMessage("System"), - UserMessage("User"), - AIMessage("AI 1"), - UserMessage("User 2"), - AIMessage("AI 2") - ] - @test last_output(msgs).content == "AI 2" - - # Test with non-AI last message - push!(msgs, UserMessage("Last user")) - @test last_output(msgs).content == "AI 2" - end -end diff --git a/test/minimal_test.jl b/test/minimal_test.jl deleted file mode 100644 index 71fe12ad6..000000000 --- a/test/minimal_test.jl +++ /dev/null @@ -1,50 +0,0 @@ -module TestPromptingTools - -using Test -using Dates -using JSON3 -using HTTP -using OpenAI -using StreamCallbacks, StructTypes - -# First define the abstract types and schemas needed -abstract type AbstractPromptSchema end -abstract type AbstractMessage end - -# Import the essential files in correct order -include("../src/constants.jl") -include("../src/utils.jl") -include("../src/messages.jl") - -@testset "Basic Message Types" begin - # Test basic message creation - sys_msg = SystemMessage("test system") - @test issystemmessage(sys_msg) - - user_msg = UserMessage("test user") - @test isusermessage(user_msg) - - ai_msg = AIMessage("test ai") - @test isaimessage(ai_msg) - - # Test annotation message - annotation = AnnotationMessage("Test annotation"; - extras=Dict{Symbol,Any}(:key => "value"), - tags=Symbol[:test], - comment="Test comment") - @test isabstractannotationmessage(annotation) - - # Test conversation memory - memory = ConversationMemory() - push!(memory, sys_msg) - push!(memory, user_msg) - @test length(memory) == 1 # system messages not counted - @test last_message(memory) == user_msg - - # Test rendering with annotation message - messages = [sys_msg, annotation, user_msg, ai_msg] - rendered = render(OpenAISchema(), messages) - @test length(rendered) == 3 # annotation message should be filtered out -end - -end # module diff --git a/test/runtests.jl b/test/runtests.jl index 50eb5f67f..31c1e69c5 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -19,7 +19,7 @@ end @testset "PromptingTools.jl" begin include("utils.jl") include("messages.jl") - include("messages_utils.jl") + include("annotation.jl") include("memory.jl") include("extraction.jl") include("user_preferences.jl") diff --git a/test/runtests_memory.jl b/test/runtests_memory.jl deleted file mode 100644 index 3c4678daa..000000000 --- a/test/runtests_memory.jl +++ /dev/null @@ -1,8 +0,0 @@ -using Test -using PromptingTools -using PromptingTools: SystemMessage, UserMessage, AIMessage, AbstractMessage -using PromptingTools: TestEchoOpenAISchema, ConversationMemory -using PromptingTools: issystemmessage, isusermessage, isaimessage, last_message, last_output, register_model! - -# Run only memory tests -include("memory.jl") diff --git a/test/utils.jl b/test/utils.jl index 5a4cd8a17..bc885c881 100644 --- a/test/utils.jl +++ b/test/utils.jl @@ -215,7 +215,7 @@ end # Multiple messages conv = [AIMessage(; content = "", tokens = (1000, 2000), cost = 1.0), - UserMessage(; content = "", cost = 0.0)] + UserMessage(; content = "")] @test call_cost(conv) == 1.0 # No model provided @@ -467,4 +467,4 @@ end # Test with an array of dictionaries @test unique_permutation([ Dict(:a => 1), Dict(:b => 2), Dict(:a => 1), Dict(:c => 3)]) == [1, 2, 4] -end +end \ No newline at end of file diff --git a/trace.log b/trace.log deleted file mode 100644 index 9203a6d37..000000000 --- a/trace.log +++ /dev/null @@ -1,23 +0,0 @@ -precompile(Tuple{Base.var"##s128#247", Vararg{Any, 5}}) -precompile(Tuple{typeof(Core.kwcall), NamedTuple{(:stale_age,), Tuple{Int64}}, typeof(FileWatching.Pidfile.trymkpidlock), Function, Vararg{Any}}) -precompile(Tuple{FileWatching.Pidfile.var"##trymkpidlock#11", Base.Pairs{Symbol, Int64, Tuple{Symbol}, NamedTuple{(:stale_age,), Tuple{Int64}}}, typeof(FileWatching.Pidfile.trymkpidlock), Function, Vararg{Any}}) -precompile(Tuple{typeof(Core.kwcall), NamedTuple{(:stale_age, :wait), Tuple{Int64, Bool}}, typeof(FileWatching.Pidfile.mkpidlock), Function, String}) -precompile(Tuple{FileWatching.Pidfile.var"##mkpidlock#7", Base.Pairs{Symbol, Integer, Tuple{Symbol, Symbol}, NamedTuple{(:stale_age, :wait), Tuple{Int64, Bool}}}, typeof(FileWatching.Pidfile.mkpidlock), Base.var"#968#969"{Base.PkgId}, String, Int32}) -precompile(Tuple{typeof(Base.print), Base.GenericIOBuffer{Array{UInt8, 1}}, UInt16}) -precompile(Tuple{typeof(Base.CoreLogging.shouldlog), Logging.ConsoleLogger, Base.CoreLogging.LogLevel, Module, Symbol, Symbol}) -precompile(Tuple{typeof(Core.kwcall), NamedTuple{names, T} where T<:Tuple where names, typeof(Base.CoreLogging.handle_message), Logging.ConsoleLogger, Base.CoreLogging.LogLevel, Vararg{Any, 6}}) -precompile(Tuple{typeof(Base.pairs), NamedTuple{(:path,), Tuple{String}}}) -precompile(Tuple{typeof(Base.haskey), Base.Pairs{Symbol, String, Tuple{Symbol}, NamedTuple{(:path,), Tuple{String}}}, Symbol}) -precompile(Tuple{typeof(Base.isopen), Base.GenericIOBuffer{Array{UInt8, 1}}}) -precompile(Tuple{Type{Base.IOContext{IO_t} where IO_t<:IO}, Base.GenericIOBuffer{Array{UInt8, 1}}, Base.TTY}) -precompile(Tuple{typeof(Logging.showvalue), Base.IOContext{Base.GenericIOBuffer{Array{UInt8, 1}}}, String}) -precompile(Tuple{typeof(Logging.default_metafmt), Base.CoreLogging.LogLevel, Vararg{Any, 5}}) -precompile(Tuple{typeof(Base.string), Module}) -precompile(Tuple{typeof(Core.kwcall), NamedTuple{(:bold, :color), Tuple{Bool, Symbol}}, typeof(Base.printstyled), Base.IOContext{Base.GenericIOBuffer{Array{UInt8, 1}}}, String}) -precompile(Tuple{typeof(Core.kwcall), NamedTuple{(:bold, :color), Tuple{Bool, Symbol}}, typeof(Base.printstyled), Base.IOContext{Base.GenericIOBuffer{Array{UInt8, 1}}}, String, Vararg{String}}) -precompile(Tuple{Base.var"##printstyled#995", Bool, Bool, Bool, Bool, Bool, Bool, Symbol, typeof(Base.printstyled), Base.IOContext{Base.GenericIOBuffer{Array{UInt8, 1}}}, String, Vararg{Any}}) -precompile(Tuple{typeof(Core.kwcall), NamedTuple{(:bold, :italic, :underline, :blink, :reverse, :hidden), NTuple{6, Bool}}, typeof(Base.with_output_color), Function, Symbol, Base.IOContext{Base.GenericIOBuffer{Array{UInt8, 1}}}, String, Vararg{Any}}) -precompile(Tuple{typeof(Base.write), Base.TTY, Array{UInt8, 1}}) -precompile(Tuple{typeof(Core.kwcall), NamedTuple{(:cpu_target,), Tuple{Nothing}}, typeof(Base.julia_cmd)}) -precompile(Tuple{typeof(Core.kwcall), NamedTuple{(:stderr, :stdout), Tuple{Base.TTY, Base.TTY}}, typeof(Base.pipeline), Base.Cmd}) -precompile(Tuple{typeof(Base.open), Base.CmdRedirect, String, Base.TTY})