Skip to content

Commit

Permalink
Merge pull request #96 from teknologi-umum/rearchitect
Browse files Browse the repository at this point in the history
Refactor to choreography architecture
  • Loading branch information
ronnygunawan authored Dec 31, 2023
2 parents 0a73f85 + 0da53aa commit 3a0da7f
Show file tree
Hide file tree
Showing 101 changed files with 3,418 additions and 2,650 deletions.
6 changes: 4 additions & 2 deletions .editorconfig
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
[*]
indent_style = space
indent_size = 2
end_of_line=lf
# XAML files
[*.xaml]
indent_size = 4
Expand Down Expand Up @@ -243,11 +242,14 @@ dotnet_diagnostic.RG0021.severity = error
dotnet_diagnostic.RG0022.severity = error

# IDE0055: Fix formatting
dotnet_diagnostic.IDE0055.severity = warning
dotnet_diagnostic.IDE0055.severity = none

# CA1822: Mark members as static
dotnet_diagnostic.CA1822.severity = warning

# CS9124: Parameter is captured into the state of the enclosing type and its value is also used to initialize a field, property, or event.
dotnet_diagnostic.CS9124.severity = none

[*.vb]
# Modifier preferences
visual_basic_preferred_modifier_order = Partial,Default,Private,Protected,Public,Friend,NotOverridable,Overridable,MustOverride,Overloads,Overrides,MustInherit,NotInheritable,Static,Shared,Shadows,ReadOnly,WriteOnly,Dim,Const,WithEvents,Widening,Narrowing,Custom,Async:suggestion
Expand Down
99 changes: 99 additions & 0 deletions BotNet.CommandHandlers/AI/OpenAI/AskCommandHandler.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
using BotNet.Commands;
using BotNet.Commands.AI.OpenAI;
using BotNet.Commands.BotUpdate.Message;
using BotNet.Commands.CommandPrioritization;
using BotNet.Services.MarkdownV2;
using BotNet.Services.OpenAI;
using BotNet.Services.OpenAI.Models;
using BotNet.Services.RateLimit;
using Microsoft.Extensions.Logging;
using Telegram.Bot;
using Telegram.Bot.Types;
using Telegram.Bot.Types.Enums;

namespace BotNet.CommandHandlers.AI.OpenAI {
public sealed class AskCommandHandler(
ITelegramBotClient telegramBotClient,
OpenAIClient openAIClient,
ITelegramMessageCache telegramMessageCache,
ILogger<AskCommandHandler> logger
) : ICommandHandler<AskCommand> {
private readonly ITelegramBotClient _telegramBotClient = telegramBotClient;
private readonly OpenAIClient _openAIClient = openAIClient;
private readonly ITelegramMessageCache _telegramMessageCache = telegramMessageCache;
private readonly ILogger<AskCommandHandler> _logger = logger;

public async Task Handle(AskCommand command, CancellationToken cancellationToken) {
try {
OpenAITextPromptHandler.CHAT_RATE_LIMITER.ValidateActionRate(
chatId: command.ChatId,
userId: command.SenderId
);
} catch (RateLimitExceededException exc) {
await _telegramBotClient.SendTextMessageAsync(
chatId: command.ChatId,
text: $"<code>Anda terlalu banyak memanggil AI. Coba lagi {exc.Cooldown}.</code>",
parseMode: ParseMode.Html,
replyToMessageId: command.PromptMessageId,
cancellationToken: cancellationToken
);
return;
}

// Fire and forget
Task _ = Task.Run(async () => {
List<ChatMessage> messages = [
ChatMessage.FromText("system", "The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly."),
ChatMessage.FromText("user", command.Prompt)
];

messages.AddRange(
from message in command.Thread.Take(10).Reverse()
select ChatMessage.FromText(
role: message.SenderName switch {
"AI" or "Bot" or "GPT" => "assistant",
_ => "user"
},
text: message.Text
)
);

Message responseMessage = await _telegramBotClient.SendTextMessageAsync(
chatId: command.ChatId,
text: MarkdownV2Sanitizer.Sanitize("… ⏳"),
parseMode: ParseMode.MarkdownV2,
replyToMessageId: command.PromptMessageId
);

string response = await _openAIClient.ChatAsync(
model: command.CommandPriority switch {
CommandPriority.VIPChat or CommandPriority.HomeGroupChat => "gpt-4-1106-preview",
_ => "gpt-3.5-turbo"
},
messages: messages,
maxTokens: 512,
cancellationToken: cancellationToken
);

// Finalize message
try {
responseMessage = await telegramBotClient.EditMessageTextAsync(
chatId: command.ChatId,
messageId: responseMessage.MessageId,
text: MarkdownV2Sanitizer.Sanitize(response),
parseMode: ParseMode.MarkdownV2,
cancellationToken: cancellationToken
);
} catch (Exception exc) {
_logger.LogError(exc, null);
throw;
}

// Track thread
_telegramMessageCache.Add(
message: AIResponseMessage.FromMessage(responseMessage, "AI")
);
});
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
using BotNet.Commands;
using BotNet.Commands.AI.OpenAI;
using BotNet.Commands.BotUpdate.Message;
using BotNet.Services.OpenAI.Skills;
using BotNet.Services.RateLimit;
using Microsoft.Extensions.Logging;
using Telegram.Bot;
using Telegram.Bot.Types;
using Telegram.Bot.Types.Enums;

namespace BotNet.CommandHandlers.AI.OpenAI {
public sealed class OpenAIImageGenerationPromptHandler(
ITelegramBotClient telegramBotClient,
ImageGenerationBot imageGenerationBot,
ITelegramMessageCache telegramMessageCache,
ILogger<OpenAIImageGenerationPromptHandler> logger
) : ICommandHandler<OpenAIImageGenerationPrompt> {
private static readonly RateLimiter IMAGE_GENERATION_RATE_LIMITER = RateLimiter.PerUser(1, TimeSpan.FromMinutes(5));

private readonly ITelegramBotClient _telegramBotClient = telegramBotClient;
private readonly ImageGenerationBot _imageGenerationBot = imageGenerationBot;
private readonly ITelegramMessageCache _telegramMessageCache = telegramMessageCache;
private readonly ILogger<OpenAIImageGenerationPromptHandler> _logger = logger;

public Task Handle(OpenAIImageGenerationPrompt command, CancellationToken cancellationToken) {
try {
IMAGE_GENERATION_RATE_LIMITER.ValidateActionRate(command.ChatId, command.SenderId);
} catch (RateLimitExceededException exc) {
return _telegramBotClient.SendTextMessageAsync(
chatId: command.ChatId,
text: $"Anda belum mendapat giliran. Coba lagi {exc.Cooldown}.",
parseMode: ParseMode.Html,
replyToMessageId: command.PromptMessageId,
cancellationToken: cancellationToken
);
}

// Fire and forget
Task.Run(async () => {
try {
Uri generatedImageUrl;
try {
generatedImageUrl = await _imageGenerationBot.GenerateImageAsync(
prompt: command.Prompt,
cancellationToken: cancellationToken
);
} catch (Exception exc) {
_logger.LogError(exc, "Could not generate image");
await _telegramBotClient.EditMessageTextAsync(
chatId: command.ChatId,
messageId: command.ResponseMessageId,
text: "<code>Failed to generate image.</code>",
parseMode: ParseMode.Html,
cancellationToken: cancellationToken
);
return;
}

// Delete busy message
try {
await _telegramBotClient.DeleteMessageAsync(
chatId: command.ChatId,
messageId: command.ResponseMessageId,
cancellationToken: cancellationToken
);
} catch (OperationCanceledException) {
return;
}

// Send generated image
Message responseMessage = await _telegramBotClient.SendPhotoAsync(
chatId: command.ChatId,
photo: new InputFileUrl(generatedImageUrl),
replyToMessageId: command.PromptMessageId,
cancellationToken: cancellationToken
);

// Track thread
_telegramMessageCache.Add(
NormalMessage.FromMessage(responseMessage)
);
} catch (OperationCanceledException) {
// Terminate gracefully
// TODO: tie up loose ends
}
});

return Task.CompletedTask;
}
}
}
151 changes: 151 additions & 0 deletions BotNet.CommandHandlers/AI/OpenAI/OpenAITextPromptHandler.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
using BotNet.Commands;
using BotNet.Commands.AI.OpenAI;
using BotNet.Commands.AI.Stability;
using BotNet.Commands.BotUpdate.Message;
using BotNet.Commands.CommandPrioritization;
using BotNet.Services.MarkdownV2;
using BotNet.Services.OpenAI;
using BotNet.Services.OpenAI.Models;
using BotNet.Services.RateLimit;
using Microsoft.Extensions.Logging;
using Telegram.Bot;
using Telegram.Bot.Types;
using Telegram.Bot.Types.Enums;

namespace BotNet.CommandHandlers.AI.OpenAI {
public sealed class OpenAITextPromptHandler(
ITelegramBotClient telegramBotClient,
ICommandQueue commandQueue,
ITelegramMessageCache telegramMessageCache,
OpenAIClient openAIClient,
ILogger<OpenAITextPromptHandler> logger
) : ICommandHandler<OpenAITextPrompt> {
internal static readonly RateLimiter CHAT_RATE_LIMITER = RateLimiter.PerUserPerChat(5, TimeSpan.FromMinutes(15));

private readonly ITelegramBotClient _telegramBotClient = telegramBotClient;
private readonly ICommandQueue _commandQueue = commandQueue;
private readonly ITelegramMessageCache _telegramMessageCache = telegramMessageCache;
private readonly OpenAIClient _openAIClient = openAIClient;
private readonly ILogger<OpenAITextPromptHandler> _logger = logger;

public Task Handle(OpenAITextPrompt command, CancellationToken cancellationToken) {
try {
CHAT_RATE_LIMITER.ValidateActionRate(
chatId: command.ChatId,
userId: command.SenderId
);
} catch (RateLimitExceededException exc) {
return _telegramBotClient.SendTextMessageAsync(
chatId: command.ChatId,
text: $"<code>Anda terlalu banyak memanggil AI. Coba lagi {exc.Cooldown}.</code>",
parseMode: ParseMode.Html,
replyToMessageId: command.PromptMessageId,
cancellationToken: cancellationToken
);
}

// Fire and forget
Task.Run(async () => {
List<ChatMessage> messages = [
ChatMessage.FromText("system", "The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly. When user asks for an image to be generated, the AI assistant should respond with \"ImageGeneration:\" followed by comma separated list of features to be expected from the generated image."),
ChatMessage.FromText("user", command.Prompt)
];

messages.AddRange(
from message in command.Thread.Take(10).Reverse()
select ChatMessage.FromText(
role: message.SenderName switch {
"AI" or "Bot" or "GPT" => "assistant",
_ => "user"
},
text: message.Text
)
);

Message responseMessage = await _telegramBotClient.SendTextMessageAsync(
chatId: command.ChatId,
text: MarkdownV2Sanitizer.Sanitize("… ⏳"),
parseMode: ParseMode.MarkdownV2,
replyToMessageId: command.PromptMessageId
);

string response = await _openAIClient.ChatAsync(
model: command.CommandPriority switch {
CommandPriority.VIPChat or CommandPriority.HomeGroupChat => "gpt-4-1106-preview",
_ => "gpt-3.5-turbo"
},
messages: messages,
maxTokens: 512,
cancellationToken: cancellationToken
);

// Handle image generation intent
if (response.StartsWith("ImageGeneration:")) {
string imageGenerationPrompt = response.Substring(response.IndexOf(':') + 1).Trim();
switch (command.CommandPriority) {
case CommandPriority.VIPChat:
await _commandQueue.DispatchAsync(
command: new OpenAIImageGenerationPrompt(
callSign: command.CallSign,
prompt: imageGenerationPrompt,
promptMessageId: command.PromptMessageId,
responseMessageId: responseMessage.MessageId,
chatId: command.ChatId,
senderId: command.SenderId,
commandPriority: command.CommandPriority
)
);
break;
case CommandPriority.HomeGroupChat:
await _commandQueue.DispatchAsync(
command: new StabilityTextToImagePrompt(
callSign: command.CallSign,
prompt: imageGenerationPrompt,
promptMessageId: command.PromptMessageId,
responseMessageId: responseMessage.MessageId,
chatId: command.ChatId,
senderId: command.SenderId,
commandPriority: command.CommandPriority
)
);
break;
default:
await _telegramBotClient.EditMessageTextAsync(
chatId: command.ChatId,
messageId: responseMessage.MessageId,
text: MarkdownV2Sanitizer.Sanitize("Image generation tidak bisa dipakai di sini."),
parseMode: ParseMode.MarkdownV2,
cancellationToken: cancellationToken
);
break;
}
return;
}

// Finalize message
try {
responseMessage = await telegramBotClient.EditMessageTextAsync(
chatId: command.ChatId,
messageId: responseMessage.MessageId,
text: MarkdownV2Sanitizer.Sanitize(response),
parseMode: ParseMode.MarkdownV2,
cancellationToken: cancellationToken
);
} catch (Exception exc) {
_logger.LogError(exc, null);
throw;
}

// Track thread
_telegramMessageCache.Add(
message: AIResponseMessage.FromMessage(
responseMessage,
command.CallSign
)
);
});

return Task.CompletedTask;
}
}
}
Loading

0 comments on commit 3a0da7f

Please sign in to comment.