Skip to content

Commit

Permalink
Merge pull request #46 from FormulaMonks/feat/vertex-ai-metadata
Browse files Browse the repository at this point in the history
feat: add usage/fingerprint metadata to `KurtResult` for `KurtVertexAI` adapter
  • Loading branch information
jemc authored Jun 26, 2024
2 parents 6c760a2 + 843821d commit 9ba96be
Show file tree
Hide file tree
Showing 14 changed files with 429 additions and 270 deletions.
9 changes: 5 additions & 4 deletions packages/kurt-vertex-ai/spec/generateNaturalLanguage.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ describe("KurtVertexAI generateNaturalLanguage", () => {
prompt: "Say hello!",
})
)
expect(result.text).toEqual("Hello! How can I assist you today?")
expect(result.text).toEqual("Hello! 👋 😊\n")
})

test("writes a haiku with high temperature", async () => {
Expand All @@ -24,9 +24,10 @@ describe("KurtVertexAI generateNaturalLanguage", () => {
)
expect(result.text).toEqual(
[
"Moon paints silver path,",
"Water sings to sleeping stones,",
"Night sighs on the wind.",
"Moon bathes silver stream,",
"Whispers flow through sleeping wood,",
"Stones dream in the dark.",
"",
].join("\n")
)
})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ describe("KurtVertexAI generateWithOptionalTools", () => {
],
})
)
expect(result.text).toEqual("That's about 324.")
expect(result.text).toEqual("That's about 324. \n")
})

test("calculator (with parallel tool calls)", async () => {
Expand Down Expand Up @@ -123,7 +123,7 @@ describe("KurtVertexAI generateWithOptionalTools", () => {
[
"1. 8026256882 divided by 3402398 is 2359.",
"2. 1185835515 divided by 348263 is 3405.",
"3. 90135094495 minus 89944954350 is 190140145.",
"3. 90135094495 minus 89944954350 is 190140145. ",
"",
].join("\n")
)
Expand Down
13 changes: 9 additions & 4 deletions packages/kurt-vertex-ai/spec/snapshots.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ export async function snapshotAndMock<T>(
// Here's the data structure we will use to snapshot a request/response cycle.
const snapshot: {
step1Request?: VertexAIRequest
step2RawChunks: VertexAIResponseChunkCandidate[]
step2RawChunks: VertexAIResponseChunk[]
step3KurtEvents: KurtStreamEvent<T>[]
} = {
step1Request: undefined,
Expand Down Expand Up @@ -75,7 +75,7 @@ export async function snapshotAndMock<T>(
snapshot.step2RawChunks = savedRawChunks
async function* generator(): AsyncIterable<VertexAIResponseChunk> {
for await (const rawChunk of savedRawChunks) {
yield { candidates: [rawChunk] }
yield rawChunk
}
}
return { stream: generator() }
Expand All @@ -95,9 +95,14 @@ export async function snapshotAndMock<T>(
for await (const rawEvent of response.stream) {
const candidate = rawEvent.candidates?.at(0)
if (candidate) {
const rawChunk = { ...candidate }
const partialCandidate = { ...candidate }
// biome-ignore lint/performance/noDelete: we don't care about performance in this test code
delete rawChunk.safetyRatings
delete partialCandidate.safetyRatings

const rawChunk = {
candidates: [partialCandidate],
usageMetadata: rawEvent.usageMetadata,
}
snapshot.step2RawChunks.push(rawChunk)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,24 +8,37 @@ step1Request:
parts:
- text: Say hello!
step2RawChunks:
- content:
role: model
parts:
- text: Hello!
index: 0
- content:
role: model
parts:
- text: " How can I assist you today?"
index: 0
- content:
role: model
parts:
- text: ""
finishReason: STOP
index: 0
- candidates:
- content:
role: model
parts:
- text: Hello
index: 0
- candidates:
- content:
role: model
parts:
- text: |
! 👋 😊
index: 0
- candidates:
- content:
role: model
parts:
- text: ""
finishReason: STOP
index: 0
usageMetadata:
promptTokenCount: 3
candidatesTokenCount: 7
totalTokenCount: 10
step3KurtEvents:
- chunk: Hello!
- chunk: " How can I assist you today?"
- chunk: Hello
- chunk: |
! 👋 😊
- finished: true
text: Hello! How can I assist you today?
text: |
Hello! 👋 😊
metadata:
totalInputTokens: 3
totalOutputTokens: 7
Original file line number Diff line number Diff line change
Expand Up @@ -8,41 +8,52 @@ step1Request:
parts:
- text: Compose a haiku about a mountain stream at night.
step2RawChunks:
- content:
role: model
parts:
- text: Moon
index: 0
- content:
role: model
parts:
- text: |2-
paints silver path,
Water sings to sleeping stones,
Night sighs on the
index: 0
- content:
role: model
parts:
- text: |2-
wind.
index: 0
- content:
role: model
parts:
- text: ""
finishReason: STOP
index: 0
- candidates:
- content:
role: model
parts:
- text: Moon
index: 0
- candidates:
- content:
role: model
parts:
- text: |2-
bathes silver stream,
Whispers flow through sleeping wood,
Stones dream
index: 0
- candidates:
- content:
role: model
parts:
- text: |2
in the dark.
index: 0
- candidates:
- content:
role: model
parts:
- text: ""
finishReason: STOP
index: 0
usageMetadata:
promptTokenCount: 10
candidatesTokenCount: 23
totalTokenCount: 33
step3KurtEvents:
- chunk: Moon
- chunk: |2-
paints silver path,
Water sings to sleeping stones,
Night sighs on the
- chunk: |2-
wind.
bathes silver stream,
Whispers flow through sleeping wood,
Stones dream
- chunk: |2
in the dark.
- finished: true
text: |-
Moon paints silver path,
Water sings to sleeping stones,
Night sighs on the wind.
text: |
Moon bathes silver stream,
Whispers flow through sleeping wood,
Stones dream in the dark.
metadata:
totalInputTokens: 10
totalOutputTokens: 23
Original file line number Diff line number Diff line change
Expand Up @@ -25,17 +25,32 @@ step1Request:
allowed_function_names:
- structured_data
step2RawChunks:
- content:
role: model
parts:
- functionCall:
name: structured_data
args:
say: hello
index: 0
- candidates:
- content:
role: model
parts:
- functionCall:
name: structured_data
args:
say: hello
index: 0
- candidates:
- content:
role: model
parts:
- text: ""
finishReason: STOP
index: 0
usageMetadata:
promptTokenCount: 16
candidatesTokenCount: 5
totalTokenCount: 21
step3KurtEvents:
- chunk: '{"say":"hello"}'
- finished: true
text: '{"say":"hello"}'
data:
say: hello
metadata:
totalInputTokens: 16
totalOutputTokens: 5
Original file line number Diff line number Diff line change
Expand Up @@ -25,19 +25,34 @@ step1Request:
allowed_function_names:
- structured_data
step2RawChunks:
- content:
role: model
parts:
- functionCall:
name: call
args:
function: structured_data
say: hello
extension: default_api
index: 0
- candidates:
- content:
role: model
parts:
- functionCall:
name: call
args:
function: structured_data
say: hello
extension: default_api
index: 0
- candidates:
- content:
role: model
parts:
- text: ""
finishReason: STOP
index: 0
usageMetadata:
promptTokenCount: 16
candidatesTokenCount: 8
totalTokenCount: 24
step3KurtEvents:
- chunk: '{"say":"hello"}'
- finished: true
text: '{"say":"hello"}'
data:
say: hello
metadata:
totalInputTokens: 16
totalOutputTokens: 8
Original file line number Diff line number Diff line change
Expand Up @@ -25,18 +25,33 @@ step1Request:
allowed_function_names:
- structured_data
step2RawChunks:
- content:
role: model
parts:
- functionCall:
name: call
args: { say: "hello" }
function: structured_data
extension: default_api"
index: 0
- candidates:
- content:
role: model
parts:
- functionCall:
name: call
args: { say: "hello" }
function: structured_data
extension: default_api"
index: 0
- candidates:
- content:
role: model
parts:
- text: ""
finishReason: STOP
index: 0
usageMetadata:
promptTokenCount: 16
candidatesTokenCount: 8
totalTokenCount: 21
step3KurtEvents:
- chunk: '{"say":"hello"}'
- finished: true
text: '{"say":"hello"}'
data:
say: hello
metadata:
totalInputTokens: 16
totalOutputTokens: 8
Loading

0 comments on commit 9ba96be

Please sign in to comment.