Skip to content

Commit

Permalink
Merge pull request #57 from FormulaMonks/fix/vertex-ai-system-prompt
Browse files Browse the repository at this point in the history
fix: system prompt formatting for Vertex AI
  • Loading branch information
jemc authored Nov 27, 2024
2 parents b12320d + 4e5ab14 commit 02bd154
Show file tree
Hide file tree
Showing 3 changed files with 67 additions and 1 deletion.
10 changes: 10 additions & 0 deletions packages/kurt-vertex-ai/spec/generateNaturalLanguage.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,16 @@ describe("KurtVertexAI generateNaturalLanguage", () => {
expect(result.text).toEqual("Hello! 👋 😊\n")
})

test("properly formats a system prompt for Vertex AI", async () => {
const result = await snapshotAndMock((kurt) =>
kurt.generateNaturalLanguage({
systemPrompt: "Don't be evil.", // sometimes Google needs to remind themselves
prompt: "Say hello!",
})
)
expect(result.text).toEqual("Hello! 👋 😊\n")
})

test("writes a haiku with high temperature", async () => {
const result = await snapshotAndMock((kurt) =>
kurt.generateNaturalLanguage({
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
step1Request:
generationConfig:
maxOutputTokens: 4096
temperature: 0.5
topP: 0.95
contents:
- role: user
parts:
- text: Say hello!
systemInstruction:
role: system
parts:
- text: Don't be evil.
step2RawChunks:
- candidates:
- content:
role: model
parts:
- text: Hello
index: 0
usageMetadata: {}
- candidates:
- content:
role: model
parts:
- text: |
! 👋 😊
finishReason: STOP
index: 0
usageMetadata:
promptTokenCount: 9
candidatesTokenCount: 6
totalTokenCount: 15
step3KurtEvents:
- chunk: Hello
- chunk: |
! 👋 😊
- finished: true
text: |
Hello! 👋 😊
metadata:
totalInputTokens: 9
totalOutputTokens: 6
15 changes: 14 additions & 1 deletion packages/kurt-vertex-ai/src/KurtVertexAI.ts
Original file line number Diff line number Diff line change
Expand Up @@ -91,13 +91,26 @@ export class KurtVertexAI
model: this.options.model,
}) as VertexAIGenerativeModel

// VertexAI requires that system messages be sent as a single message,
// so we filter them out from the main messages array to send separately.
const normalMessages = options.messages.filter((m) => m.role !== "system")
const systemMessages = options.messages.filter((m) => m.role === "system")
const singleSystemMessage: VertexAIMessage | undefined =
systemMessages.length === 0
? undefined
: {
role: "system",
parts: systemMessages.flatMap((m) => m.parts),
}

const req: VertexAIRequest = {
generationConfig: {
maxOutputTokens: options.sampling.maxOutputTokens,
temperature: options.sampling.temperature,
topP: options.sampling.topP,
},
contents: options.messages,
contents: normalMessages,
systemInstruction: singleSystemMessage,
}

const tools = Object.values(options.tools)
Expand Down

0 comments on commit 02bd154

Please sign in to comment.