Skip to content

Commit

Permalink
fix: Better handling of multiple messages
Browse files Browse the repository at this point in the history
  • Loading branch information
teilomillet committed Dec 27, 2024
1 parent 0a0d0c1 commit 17a367e
Show file tree
Hide file tree
Showing 2 changed files with 345 additions and 45 deletions.
82 changes: 50 additions & 32 deletions server/processing/processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ import (

"github.com/teilomillet/gollm"
"github.com/teilomillet/hapax/config"
"github.com/teilomillet/hapax/server/middleware"
)

// Processor handles request processing and response formatting for LLM interactions.
Expand Down Expand Up @@ -91,51 +90,70 @@ func (p *Processor) ProcessRequest(ctx context.Context, req *Request) (*Response
return nil, fmt.Errorf("request cannot be nil")
}

// Select the appropriate template, falling back to default
tmpl := p.templates["default"]
if t, ok := p.templates[req.Type]; ok {
tmpl = t
}
if tmpl == nil {
return nil, fmt.Errorf("no template found for type: %s", req.Type)
}
var promptMessages []gollm.PromptMessage

// Execute the template with the request data
var buf bytes.Buffer
err := tmpl.Execute(&buf, req)
if err != nil {
return nil, fmt.Errorf("template execution failed: %w", err)
// Always start with system prompt if we have one
if p.defaultPrompt != "" {
promptMessages = append(promptMessages, gollm.PromptMessage{
Role: "system",
Content: p.defaultPrompt,
})
}

// Create an LLM prompt with system context
prompt := &gollm.Prompt{
Messages: []gollm.PromptMessage{
{
Role: "system",
Content: p.defaultPrompt,
},
{
Role: "user",
Content: buf.String(),
},
},
}
// Now we have two clear paths - either conversation or single input
if len(req.Messages) > 0 {
// For conversations, we just need to convert the messages directly
for _, msg := range req.Messages {
promptMessages = append(promptMessages, gollm.PromptMessage{
Role: msg.Role,
Content: msg.Content,
})
}
} else if req.Input != "" {
// For single inputs, we still use the template system
tmpl := p.templates["default"]
if t, ok := p.templates[req.Type]; ok {
tmpl = t
}
if tmpl == nil {
return nil, fmt.Errorf("no template found for type: %s", req.Type)
}

// Pass timeout header to LLM context if present
if timeoutHeader := ctx.Value("X-Test-Timeout"); timeoutHeader != nil {
ctx = context.WithValue(ctx, middleware.XTestTimeoutKey, timeoutHeader)
var buf bytes.Buffer
if err := tmpl.Execute(&buf, req); err != nil {
return nil, fmt.Errorf("template execution failed: %w", err)
}

promptMessages = append(promptMessages, gollm.PromptMessage{
Role: "user",
Content: buf.String(),
})
} else {
return nil, fmt.Errorf("request must contain either messages or input")
}

// Send request to LLM
prompt := &gollm.Prompt{Messages: promptMessages}

response, err := p.llm.Generate(ctx, prompt)
if err != nil {
return nil, fmt.Errorf("LLM processing failed: %w", err)
}

// Apply response formatting
return p.formatResponse(response), nil
}

// Helper function to convert our Message type to gollm.PromptMessage
func convertMessages(messages []Message) []gollm.PromptMessage {

Check failure on line 146 in server/processing/processor.go

View workflow job for this annotation

GitHub Actions / Test & Lint

func `convertMessages` is unused (unused)

Check failure on line 146 in server/processing/processor.go

View workflow job for this annotation

GitHub Actions / Test & Lint

func `convertMessages` is unused (unused)
promptMessages := make([]gollm.PromptMessage, len(messages))
for i, msg := range messages {
promptMessages[i] = gollm.PromptMessage{
Role: msg.Role,
Content: msg.Content,
}
}
return promptMessages
}

// formatResponse applies configured formatting options to the LLM response:
// 1. Cleans JSON if enabled (removes markdown blocks, formats JSON)
// 2. Trims whitespace if enabled
Expand Down
Loading

0 comments on commit 17a367e

Please sign in to comment.