Skip to content

Commit

Permalink
Fix usage keys for GoogleAI (#258)
Browse files Browse the repository at this point in the history
  • Loading branch information
svilupp authored Dec 21, 2024
1 parent 7bc7fbd commit 045379e
Show file tree
Hide file tree
Showing 4 changed files with 57 additions and 8 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Added

### Fixed

## [0.69.1]

### Fixed
- Added assertion in `response_to_message` for missing `:tool_calls` key in the response message. It's model failure but it wasn't obvious from the original error.
- Fixes error for usage information in CamelCase from OpenAI servers (Gemini proxy now sends it in CamelCase).

## [0.69.0]

Expand Down
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "PromptingTools"
uuid = "670122d1-24a8-4d70-bfce-740807c42192"
authors = ["J S @svilupp and contributors"]
version = "0.69.0"
version = "0.69.1"

[deps]
AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
Expand Down
46 changes: 39 additions & 7 deletions src/llm_openai.jl
Original file line number Diff line number Diff line change
Expand Up @@ -176,9 +176,19 @@ function response_to_message(schema::AbstractOpenAISchema,
else
nothing
end
# Extract usage information with default values for tokens
tokens_prompt = 0
tokens_completion = 0
# Merge with response usage if available
if haskey(resp.response, :usage)
response_usage = resp.response[:usage]
# Handle both snake_case and camelCase keys
tokens_prompt = get(response_usage, :prompt_tokens,
get(response_usage, :promptTokens, 0))
tokens_completion = get(response_usage, :completion_tokens,
get(response_usage, :completionTokens, 0))
end
## calculate cost
tokens_prompt = get(resp.response, :usage, Dict(:prompt_tokens => 0))[:prompt_tokens]
tokens_completion = get(resp.response, :usage, Dict(:completion_tokens => 0))[:completion_tokens]
cost = call_cost(tokens_prompt, tokens_completion, model_id)
extras = Dict{Symbol, Any}()
if has_log_prob
Expand Down Expand Up @@ -438,7 +448,9 @@ function aiembed(prompt_schema::AbstractOpenAISchema,
model_id;
http_kwargs,
api_kwargs...)
tokens_prompt = get(r.response, :usage, Dict(:prompt_tokens => 0))[:prompt_tokens]
tokens_prompt = haskey(r.response, :usage) ?
get(
r.response[:usage], :prompt_tokens, get(r.response[:usage], :promptTokens, 0)) : 0
msg = DataMessage(;
content = mapreduce(x -> postprocess(x[:embedding]), hcat, r.response[:data]),
status = Int(r.status),
Expand Down Expand Up @@ -844,9 +856,19 @@ function response_to_message(schema::AbstractOpenAISchema,
else
nothing
end
# Extract usage information with default values for tokens
tokens_prompt = 0
tokens_completion = 0
# Merge with response usage if available
if haskey(resp.response, :usage)
response_usage = resp.response[:usage]
# Handle both snake_case and camelCase keys
tokens_prompt = get(response_usage, :prompt_tokens,
get(response_usage, :promptTokens, 0))
tokens_completion = get(response_usage, :completion_tokens,
get(response_usage, :completionTokens, 0))
end
## calculate cost
tokens_prompt = get(resp.response, :usage, Dict(:prompt_tokens => 0))[:prompt_tokens]
tokens_completion = get(resp.response, :usage, Dict(:completion_tokens => 0))[:completion_tokens]
cost = call_cost(tokens_prompt, tokens_completion, model_id)
# "Safe" parsing of the response - it still fails if JSON is invalid
tools_array = if json_mode == true
Expand Down Expand Up @@ -1490,9 +1512,19 @@ function response_to_message(schema::AbstractOpenAISchema,
else
nothing
end
# Extract usage information with default values for tokens
tokens_prompt = 0
tokens_completion = 0
# Merge with response usage if available
if haskey(resp.response, :usage)
response_usage = resp.response[:usage]
# Handle both snake_case and camelCase keys
tokens_prompt = get(response_usage, :prompt_tokens,
get(response_usage, :promptTokens, 0))
tokens_completion = get(response_usage, :completion_tokens,
get(response_usage, :completionTokens, 0))
end
## calculate cost
tokens_prompt = get(resp.response, :usage, Dict(:prompt_tokens => 0))[:prompt_tokens]
tokens_completion = get(resp.response, :usage, Dict(:completion_tokens => 0))[:completion_tokens]
cost = call_cost(tokens_prompt, tokens_completion, model_id)
# "Safe" parsing of the response - it still fails if JSON is invalid
has_tools = haskey(choice[:message], :tool_calls) &&
Expand Down
12 changes: 12 additions & 0 deletions test/llm_openai.jl
Original file line number Diff line number Diff line change
Expand Up @@ -434,6 +434,18 @@ end
@test msg.sample_id == nothing
@test msg.cost == call_cost(2, 1, "gpt4t")

## CamelCase usage keys
mock_response2 = (;
response = Dict(:choices => [mock_choice],
:usage => Dict(:totalTokens => 3, :promptTokens => 2, :completionTokens => 1)),
status = 200)
msg2 = response_to_message(OpenAISchema(),
AIMessage,
mock_choice,
mock_response2;
model_id = "gpt4t")
@test msg.tokens == (2, 1)

# Test without logprobs
choice = deepcopy(mock_choice)
delete!(choice, :logprobs)
Expand Down

2 comments on commit 045379e

@svilupp
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator register

Release notes:

Fixed

  • Added assertion in response_to_message for missing :tool_calls key in the response message. It's model failure but it wasn't obvious from the original error.
  • Fixed error for usage information in CamelCase from OpenAI servers (Gemini proxy now sends it in CamelCase).

Commits

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request created: JuliaRegistries/General/121810

Tagging

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a v0.69.1 -m "<description of version>" 045379eac021f614c82e5d8672b17d06ff5f666c
git push origin v0.69.1

Please sign in to comment.