Skip to content

Commit

Permalink
update tests
Browse files Browse the repository at this point in the history
  • Loading branch information
svilupp committed Nov 26, 2024
1 parent 2599ce7 commit 58ca841
Show file tree
Hide file tree
Showing 9 changed files with 122 additions and 82 deletions.
5 changes: 4 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,7 @@
docs/package-lock.json

# Ignore Cursor rules
.cursorrules
.cursorrules

# Ignore any local preferences
**/LocalPreferences.toml
10 changes: 3 additions & 7 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,20 +10,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Fixed

## [0.66.0]

### Added
- Added a new `AnnotationMessage` type for keeping human-only information in the message changes. See `?annotate!` on how to use it.
- Added a new `ConversationMemory` type to enable long multi-turn conversations with a truncated memory of the conversation history. Truncation works in "batches" to not prevent caching. See `?ConversationMemory` and `get_last` for more information.


## [0.65.0]

### Breaking
- Changed the official ENV variable for MistralAI API from `MISTRALAI_API_KEY` to `MISTRAL_API_KEY` to be compatible with the Mistral docs.

### Added
- Added a new Gemini Experimental model from November 2024 (`gemini-exp-1121` with alias `gemexp`).
- Added a new `AnnotationMessage` type for keeping human-only information in the message changes. See `?annotate!` on how to use it.
- Added a new `ConversationMemory` type to enable long multi-turn conversations with a truncated memory of the conversation history. Truncation works in "batches" to not prevent caching. See `?ConversationMemory` and `get_last` for more information.


### Updated
- Changed the ENV variable for MistralAI API from `MISTRALAI_API_KEY` to `MISTRAL_API_KEY` to be compatible with the Mistral docs.
Expand Down
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "PromptingTools"
uuid = "670122d1-24a8-4d70-bfce-740807c42192"
authors = ["J S @svilupp and contributors"]
version = "0.66.0"
version = "0.65.0"

[deps]
AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
Expand Down
1 change: 0 additions & 1 deletion src/memory.jl
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,6 @@ function Base.append!(mem::ConversationMemory, msgs::Vector{<:AbstractMessage})
if !isnothing(match_idx)
# Found match - append everything after this message
(idx + 1 <= length(msgs)) && append!(mem.conversation, msgs[(idx + 1):end])
@info idx
return mem
end
end
Expand Down
11 changes: 9 additions & 2 deletions src/messages.jl
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ for automatic operations.
"""
Base.@kwdef struct AnnotationMessage{T <: AbstractString} <: AbstractAnnotationMessage
content::T
extras::Dict{Symbol, <:Any} = Dict{Symbol, Any}()
extras::Union{Nothing, Dict{Symbol, Any}} = nothing
tags::Vector{Symbol} = Symbol[]
comment::String = ""
run_id::Union{Nothing, Int} = Int(rand(Int32))
Expand All @@ -275,7 +275,14 @@ function (MSG::Type{<:AbstractChatMessage})(prompt::AbstractString; kwargs...)
MSG(; content = prompt, kwargs...)
end
function (MSG::Type{<:AbstractAnnotationMessage})(content::AbstractString; kwargs...)
MSG(; content, kwargs...)
## Re-type extras to be generic Dict{Symbol, Any}
new_kwargs = if haskey(kwargs, :extras)
[f == :extras ? f => convert(Dict{Symbol, Any}, kwargs[f]) : f => kwargs[f]
for f in keys(kwargs)]
else
kwargs
end
MSG(; content, new_kwargs...)
end
function (MSG::Type{<:AbstractChatMessage})(msg::AbstractChatMessage)
MSG(; msg.content)
Expand Down
89 changes: 24 additions & 65 deletions test/annotation.jl
Original file line number Diff line number Diff line change
@@ -1,29 +1,34 @@
using PromptingTools: isabstractannotationmessage, annotate!, pprint
using PromptingTools: OpenAISchema, AnthropicSchema, OllamaSchema, GoogleSchema, TestEchoOpenAISchema, render, NoSchema
using PromptingTools: AnnotationMessage, SystemMessage, TracerMessage,UserMessage, AIMessage
using PromptingTools: OpenAISchema, AnthropicSchema, OllamaSchema, GoogleSchema,
TestEchoOpenAISchema, render, NoSchema
using PromptingTools: AnnotationMessage, SystemMessage, TracerMessage, UserMessage,
AIMessage

@testset "Annotation Message Rendering" begin
# Create a mix of messages including annotation messages
messages = [
SystemMessage("Be helpful"),
AnnotationMessage("This is metadata", extras=Dict{Symbol,Any}(:key => "value")),
AnnotationMessage("This is metadata", extras = Dict{Symbol, Any}(:key => "value")),
UserMessage("Hello"),
AnnotationMessage("More metadata"),
AIMessage("Hi there!") # No status needed for basic message
]

@testset "Basic Message Filtering" begin
# Test OpenAI Schema with TestEcho
schema = TestEchoOpenAISchema(;
response=Dict(
"choices" => [Dict("message" => Dict("content" => "Test response", "role" => "assistant"), "index" => 0, "finish_reason" => "stop")],
"usage" => Dict("prompt_tokens" => 10, "completion_tokens" => 20, "total_tokens" => 30),
schema = TestEchoOpenAISchema(;
response = Dict(
"choices" => [Dict(
"message" => Dict("content" => "Test response", "role" => "assistant"),
"index" => 0, "finish_reason" => "stop")],
"usage" => Dict(
"prompt_tokens" => 10, "completion_tokens" => 20, "total_tokens" => 30),
"model" => "gpt-3.5-turbo",
"id" => "test-id",
"object" => "chat.completion",
"created" => 1234567890
),
status=200
status = 200
)
rendered = render(schema, messages)
@test length(rendered) == 3 # Should only have system, user, and AI messages
Expand All @@ -36,7 +41,9 @@ using PromptingTools: AnnotationMessage, SystemMessage, TracerMessage,UserMessag
@test !isnothing(rendered.system) # System message should be preserved separately
@test all(msg["role"] in ["user", "assistant"] for msg in rendered.conversation)
@test !contains(rendered.system, "metadata") # Check system message
@test !any(msg -> any(content -> contains(content["text"], "metadata"), msg["content"]), rendered.conversation)
@test !any(
msg -> any(content -> contains(content["text"], "metadata"), msg["content"]),
rendered.conversation)

# Test Ollama Schema
rendered = render(OllamaSchema(), messages)
Expand All @@ -48,61 +55,13 @@ using PromptingTools: AnnotationMessage, SystemMessage, TracerMessage,UserMessag
rendered = render(GoogleSchema(), messages)
@test length(rendered) == 2 # Google schema combines system message with first user message
@test all(msg[:role] in ["user", "model"] for msg in rendered) # Google uses "model" instead of "assistant"
@test !any(msg -> any(part -> contains(part["text"], "metadata"), msg[:parts]), rendered)
end
end


@testset "AnnotationMessage" begin
# Test creation and basic properties
annotation = AnnotationMessage(
content="Test annotation",
extras=Dict{Symbol,Any}(:key => "value"),
tags=[:debug, :test],
comment="Test comment"
)
@test annotation.content == "Test annotation"
@test annotation.extras[:key] == "value"
@test :debug in annotation.tags
@test annotation.comment == "Test comment"
@test isabstractannotationmessage(annotation)
@test !isabstractannotationmessage(UserMessage("test"))

# Test that annotations are filtered out during rendering
messages = [
SystemMessage("System prompt"),
UserMessage("User message"),
AnnotationMessage(content="Debug info", comment="Debug note"),
AIMessage("AI response")
]
@test !any(
msg -> any(part -> contains(part["text"], "metadata"), msg[:parts]), rendered)

# Create a basic schema for testing
schema = NoSchema()
rendered = render(schema, messages)

# Verify annotation message is not in rendered output
@test length(rendered) == 3 # Only system, user, and AI messages
@test all(!isabstractannotationmessage, rendered)

# Test annotate! utility
msgs = [UserMessage("Hello"), AIMessage("Hi")]
msgs=annotate!(msgs, "Debug info", tags=[:debug])
@test length(msgs) == 3
@test isabstractannotationmessage(msgs[1])
@test msgs[1].tags == [:debug]

# Test single message annotation
msg = UserMessage("Test")
result = annotate!(msg, "Annotation", comment="Note")
@test length(result) == 2
@test isabstractannotationmessage(result[1])
@test result[1].comment == "Note"

# Test pretty printing
io = IOBuffer()
pprint(io, annotation)
output = String(take!(io))
@test contains(output, "Test annotation")
@test contains(output, "debug")
@test contains(output, "Test comment")
# Create a basic NoSchema
schema = NoSchema()
rendered = render(schema, messages)
@test length(rendered) == 3
@test all(!isabstractannotationmessage, rendered)
end
end
9 changes: 8 additions & 1 deletion test/memory.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
using PromptingTools: SystemMessage, UserMessage, AIMessage, AbstractMessage
using PromptingTools: TestEchoOpenAISchema, ConversationMemory
using PromptingTools: issystemmessage, isusermessage, isaimessage, last_message,
last_output, register_model!, batch_start_index, get_last
last_output, register_model!, batch_start_index,
get_last, pprint

@testset "batch_start_index" begin
# Test basic batch calculation
Expand Down Expand Up @@ -39,6 +40,8 @@ end
io = IOBuffer()
show(io, mem)
@test String(take!(io)) == "ConversationMemory(0 messages)"
pprint(io, mem)
@test String(take!(io)) == ""

# Test push! and length
push!(mem, SystemMessage("System prompt"))
Expand All @@ -58,6 +61,10 @@ end
push!(mem, UserMessage("How are you?"))
@test last_message(mem).content == "How are you?"
@test last_output(mem) == "How are you?"

pprint(io, mem)
output = String(take!(io))
@test occursin("How are you?", output)
end

@testset "get_last" begin
Expand Down
62 changes: 60 additions & 2 deletions test/messages.jl
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
using PromptingTools: AIMessage, SystemMessage, MetadataMessage, AbstractMessage
using PromptingTools: UserMessage, UserMessageWithImages, DataMessage, AIToolRequest,
ToolMessage
ToolMessage, AnnotationMessage
using PromptingTools: _encode_local_image, attach_images_to_user_message, last_message,
last_output, tool_calls
using PromptingTools: isusermessage, issystemmessage, isdatamessage, isaimessage,
istracermessage, isaitoolrequest, istoolmessage
istracermessage, isaitoolrequest, istoolmessage,
isabstractannotationmessage
using PromptingTools: TracerMessageLike, TracerMessage, align_tracer!, unwrap,
AbstractTracerMessage, AbstractTracer, pprint
using PromptingTools: TracerSchema, SaverSchema
Expand Down Expand Up @@ -47,6 +48,63 @@ using PromptingTools: TracerSchema, SaverSchema
@test isaimessage(missing) == false
@test istracermessage(1) == false
end

@testset "AnnotationMessage" begin
# Test creation and basic properties
annotation = AnnotationMessage(
content = "Test annotation",
extras = Dict{Symbol, Any}(:key => "value"),
tags = [:debug, :test],
comment = "Test comment"
)
@test annotation.content == "Test annotation"
@test annotation.extras[:key] == "value"
@test :debug in annotation.tags
@test annotation.comment == "Test comment"
@test isabstractannotationmessage(annotation)
@test !isabstractannotationmessage(UserMessage("test"))

# Test that annotations are filtered out during rendering
messages = [
SystemMessage("System prompt"),
UserMessage("User message"),
AnnotationMessage(content = "Debug info", comment = "Debug note"),
AIMessage("AI response")
]

# Test annotate! utility
msgs = [UserMessage("Hello"), AIMessage("Hi")]
msgs = annotate!(msgs, "Debug info", tags = [:debug])
@test length(msgs) == 3
@test isabstractannotationmessage(msgs[1])
@test msgs[1].tags == [:debug]

# Test single message annotation
msg = UserMessage("Test")
result = annotate!(msg, "Annotation", comment = "Note")
@test length(result) == 2
@test isabstractannotationmessage(result[1])
@test result[1].comment == "Note"

# Test pretty printing
io = IOBuffer()
pprint(io, annotation)
output = String(take!(io))
@test occursin("Test annotation", output)
@test occursin("debug", output)
@test occursin("Test comment", output)

# Test show method
io = IOBuffer()
show(io, MIME("text/plain"), annotation)
output = String(take!(io))
@test occursin("AnnotationMessage", output)
@test occursin("Test annotation", output)
@test !occursin("extras", output) # Should only show type and content
@test !occursin("tags", output)
@test !occursin("comment", output)
end

@testset "UserMessageWithImages" begin
content = "Hello, world!"
image_path = joinpath(@__DIR__, "data", "julia.png")
Expand Down
15 changes: 13 additions & 2 deletions test/serialization.jl
Original file line number Diff line number Diff line change
@@ -1,12 +1,17 @@
using PromptingTools: AIMessage,
SystemMessage, UserMessage, UserMessageWithImages, AbstractMessage,
DataMessage, ShareGPTSchema, Tool, ToolMessage, AIToolRequest
DataMessage, ShareGPTSchema, Tool, ToolMessage, AIToolRequest,
AnnotationMessage, AbstractAnnotationMessage
using PromptingTools: save_conversation, load_conversation, save_conversations
using PromptingTools: save_template, load_template

@testset "Serialization - Messages" begin
# Test save_conversation
messages = AbstractMessage[SystemMessage("System message 1"),
messages = AbstractMessage[AnnotationMessage(;
content = "Annotation message"),
AnnotationMessage(;
content = "Annotation message 2", extras = Dict{Symbol, Any}(:a => 1, :b => 2)),
SystemMessage("System message 1"),
UserMessage("User message"),
AIMessage("AI message"),
UserMessageWithImages(; content = "a", image_url = String["b", "c"]),
Expand All @@ -22,6 +27,12 @@ using PromptingTools: save_template, load_template
# Test load_conversation
loaded_messages = load_conversation(tmp)
@test loaded_messages == messages

# save and load AbstractAnnotationMessage
msg = AnnotationMessage("Annotation message"; extras = Dict(:a => 1))
JSON3.write(tmp, msg)
loaded_msg = JSON3.read(tmp, AbstractAnnotationMessage)
@test loaded_msg == msg
end

@testset "Serialization - Templates" begin
Expand Down

0 comments on commit 58ca841

Please sign in to comment.