diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 00000000..187667c3
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,13 @@
+.git
+.env
+.DS_Store
+.github
+.vercel
+.wrangler
+dist
+doc
+node_modules
+plugins
+scripts
+wrangler.toml
+config.json
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index ad08e406..8729fca1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -152,3 +152,4 @@ out
/wrangler-test.toml
/dist/index.cjs
/dist/index.d.ts
+/dist/src
diff --git a/Dockerfile b/Dockerfile
index 6d976034..637e0efd 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,16 +1,15 @@
-FROM node:alpine as DEV
-
+FROM node:alpine AS build
WORKDIR /app
-COPY package.json vite.config.ts tsconfig.json ./
-COPY src ./src
-RUN npm install && npm run build:local
+COPY package.json tsconfig.json vite.config.ts ./
+RUN npm install
+COPY src src
+RUN npm run build:local
-FROM node:alpine as PROD
+FROM node:alpine AS production
WORKDIR /app
-COPY --from=DEV /app/dist/index.js /app/dist/index.js
-COPY --from=DEV /app/package.json /app/
-RUN npm install --only=production --omit=dev
-RUN apk add --no-cache sqlite
+COPY package.json ./
+RUN npm install --omit=dev
+COPY --from=build /app/dist ./dist
EXPOSE 8787
-CMD ["npm", "run", "start:dist"]
+CMD ["npm", "run", "start:dist"]
\ No newline at end of file
diff --git a/README.md b/README.md
index 309d463e..19530eb7 100644
--- a/README.md
+++ b/README.md
@@ -28,6 +28,7 @@ You can customize the system initialization information so that your debugged pe
- Serverless deployment
- Multi-platform deployment support (Cloudflare Workers, Vercel, Docker[...](doc/en/PLATFORM.md))
- Adaptation to multiple AI service providers (OpenAI, Azure OpenAI, Cloudflare AI, Cohere, Anthropic, Mistral...)
+- Switching Models with InlineKeyboards
- Custom commands (can achieve quick switching of models, switching of robot presets)
- Support for multiple Telegram bots
- Streaming output
diff --git a/README_CN.md b/README_CN.md
index fefab568..29578034 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -25,6 +25,7 @@ ChatGPT-Telegram-Workers
- 无服务器部署
- 多平台部署支持(Cloudflare Workers, Vercel, Docker[...](doc/cn/PLATFORM.md))
- 适配多种AI服务商(OpenAI, Azure OpenAI, Cloudflare AI, Cohere, Anthropic, Mistral...)
+- 使用 InlineKeyboards 切换模型
- 自定义指令(可以实现快速切换模型,切换机器人预设)
- 支持多个Telegram机器人
- 流式输出
diff --git a/dist/buildinfo.json b/dist/buildinfo.json
index 7e303aa2..0b68ae4d 100644
--- a/dist/buildinfo.json
+++ b/dist/buildinfo.json
@@ -1 +1 @@
-{"sha":"74ab291","timestamp":1731380657}
\ No newline at end of file
+{"sha":"fe9ef45","timestamp":1731464647}
diff --git a/dist/index.js b/dist/index.js
index 309bd0a4..b3635375 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -1,10 +1,10 @@
-const en = { "env": { "system_init_message": "You are a helpful assistant" }, "command": { "help": { "summary": "The following commands are currently supported:\n", "help": "Get command help", "new": "Start a new conversation", "start": "Get your ID and start a new conversation", "img": "Generate an image, the complete command format is `/img image description`, for example `/img beach at moonlight`", "version": "Get the current version number to determine whether to update", "setenv": "Set user configuration, the complete command format is /setenv KEY=VALUE", "setenvs": 'Batch set user configurations, the full format of the command is /setenvs {"KEY1": "VALUE1", "KEY2": "VALUE2"}', "delenv": "Delete user configuration, the complete command format is /delenv KEY", "clearenv": "Clear all user configuration", "system": "View some system information", "redo": "Redo the last conversation, /redo with modified content or directly /redo", "echo": "Echo the message" }, "new": { "new_chat_start": "A new conversation has started" } } };
+const en = { "env": { "system_init_message": "You are a helpful assistant" }, "command": { "help": { "summary": "The following commands are currently supported:\n", "help": "Get command help", "new": "Start a new conversation", "start": "Get your ID and start a new conversation", "img": "Generate an image, the complete command format is `/img image description`, for example `/img beach at moonlight`", "version": "Get the current version number to determine whether to update", "setenv": "Set user configuration, the complete command format is /setenv KEY=VALUE", "setenvs": 'Batch set user configurations, the full format of the command is /setenvs {"KEY1": "VALUE1", "KEY2": "VALUE2"}', "delenv": "Delete user configuration, the complete command format is /delenv KEY", "clearenv": "Clear all user configuration", "system": "View some system information", "redo": "Redo the last conversation, /redo with modified content or directly /redo", "echo": "Echo the message", "models": "switch chat model" }, "new": { "new_chat_start": "A new conversation has started" } }, "callback_query": { "open_model_list": "Open models list", "select_provider": "Select a provider:", "select_model": "Choose model:", "change_model": "Change model to " } };
-const pt = { "env": { "system_init_message": "Você é um assistente útil" }, "command": { "help": { "summary": "Os seguintes comandos são suportados atualmente:\n", "help": "Obter ajuda sobre comandos", "new": "Iniciar uma nova conversa", "start": "Obter seu ID e iniciar uma nova conversa", "img": "Gerar uma imagem, o formato completo do comando é `/img descrição da imagem`, por exemplo `/img praia ao luar`", "version": "Obter o número da versão atual para determinar se é necessário atualizar", "setenv": "Definir configuração do usuário, o formato completo do comando é /setenv CHAVE=VALOR", "setenvs": 'Definir configurações do usuário em lote, o formato completo do comando é /setenvs {"CHAVE1": "VALOR1", "CHAVE2": "VALOR2"}', "delenv": "Excluir configuração do usuário, o formato completo do comando é /delenv CHAVE", "clearenv": "Limpar todas as configurações do usuário", "system": "Ver algumas informações do sistema", "redo": "Refazer a última conversa, /redo com conteúdo modificado ou diretamente /redo", "echo": "Repetir a mensagem" }, "new": { "new_chat_start": "Uma nova conversa foi iniciada" } } };
+const pt = { "env": { "system_init_message": "Você é um assistente útil" }, "command": { "help": { "summary": "Os seguintes comandos são suportados atualmente:\n", "help": "Obter ajuda sobre comandos", "new": "Iniciar uma nova conversa", "start": "Obter seu ID e iniciar uma nova conversa", "img": "Gerar uma imagem, o formato completo do comando é `/img descrição da imagem`, por exemplo `/img praia ao luar`", "version": "Obter o número da versão atual para determinar se é necessário atualizar", "setenv": "Definir configuração do usuário, o formato completo do comando é /setenv CHAVE=VALOR", "setenvs": 'Definir configurações do usuário em lote, o formato completo do comando é /setenvs {"CHAVE1": "VALOR1", "CHAVE2": "VALOR2"}', "delenv": "Excluir configuração do usuário, o formato completo do comando é /delenv CHAVE", "clearenv": "Limpar todas as configurações do usuário", "system": "Ver algumas informações do sistema", "redo": "Refazer a última conversa, /redo com conteúdo modificado ou diretamente /redo", "echo": "Repetir a mensagem", "models": "Mudar o modelo de diálogo" }, "new": { "new_chat_start": "Uma nova conversa foi iniciada" } }, "callback_query": { "open_model_list": "Abra a lista de modelos", "select_provider": "Escolha um fornecedor de modelos.:", "select_model": "Escolha um modelo:", "change_model": "O modelo de diálogo já foi modificado para" } };
-const zhHans = { "env": { "system_init_message": "你是一个得力的助手" }, "command": { "help": { "summary": "当前支持以下命令:\n", "help": "获取命令帮助", "new": "发起新的对话", "start": "获取你的ID, 并发起新的对话", "img": "生成一张图片, 命令完整格式为 `/img 图片描述`, 例如`/img 月光下的沙滩`", "version": "获取当前版本号, 判断是否需要更新", "setenv": "设置用户配置,命令完整格式为 /setenv KEY=VALUE", "setenvs": '批量设置用户配置, 命令完整格式为 /setenvs {"KEY1": "VALUE1", "KEY2": "VALUE2"}', "delenv": "删除用户配置,命令完整格式为 /delenv KEY", "clearenv": "清除所有用户配置", "system": "查看当前一些系统信息", "redo": "重做上一次的对话, /redo 加修改过的内容 或者 直接 /redo", "echo": "回显消息" }, "new": { "new_chat_start": "新的对话已经开始" } } };
+const zhHans = { "env": { "system_init_message": "你是一个得力的助手" }, "command": { "help": { "summary": "当前支持以下命令:\n", "help": "获取命令帮助", "new": "发起新的对话", "start": "获取你的ID, 并发起新的对话", "img": "生成一张图片, 命令完整格式为 `/img 图片描述`, 例如`/img 月光下的沙滩`", "version": "获取当前版本号, 判断是否需要更新", "setenv": "设置用户配置,命令完整格式为 /setenv KEY=VALUE", "setenvs": '批量设置用户配置, 命令完整格式为 /setenvs {"KEY1": "VALUE1", "KEY2": "VALUE2"}', "delenv": "删除用户配置,命令完整格式为 /delenv KEY", "clearenv": "清除所有用户配置", "system": "查看当前一些系统信息", "redo": "重做上一次的对话, /redo 加修改过的内容 或者 直接 /redo", "echo": "回显消息", "models": "切换对话模型" }, "new": { "new_chat_start": "新的对话已经开始" } }, "callback_query": { "open_model_list": "打开模型列表", "select_provider": "选择一个模型提供商:", "select_model": "选择一个模型:", "change_model": "对话模型已修改至" } };
-const zhHant = { "env": { "system_init_message": "你是一個得力的助手" }, "command": { "help": { "summary": "當前支持的命令如下:\n", "help": "獲取命令幫助", "new": "開始一個新對話", "start": "獲取您的ID並開始一個新對話", "img": "生成圖片,完整命令格式為`/img 圖片描述`,例如`/img 海灘月光`", "version": "獲取當前版本號確認是否需要更新", "setenv": "設置用戶配置,完整命令格式為/setenv KEY=VALUE", "setenvs": '批量設置用户配置, 命令完整格式為 /setenvs {"KEY1": "VALUE1", "KEY2": "VALUE2"}', "delenv": "刪除用戶配置,完整命令格式為/delenv KEY", "clearenv": "清除所有用戶配置", "system": "查看一些系統信息", "redo": "重做上一次的對話 /redo 加修改過的內容 或者 直接 /redo", "echo": "回显消息" }, "new": { "new_chat_start": "開始一個新對話" } } };
+const zhHant = { "env": { "system_init_message": "你是一個得力的助手" }, "command": { "help": { "summary": "當前支持的命令如下:\n", "help": "獲取命令幫助", "new": "開始一個新對話", "start": "獲取您的ID並開始一個新對話", "img": "生成圖片,完整命令格式為`/img 圖片描述`,例如`/img 海灘月光`", "version": "獲取當前版本號確認是否需要更新", "setenv": "設置用戶配置,完整命令格式為/setenv KEY=VALUE", "setenvs": '批量設置用户配置, 命令完整格式為 /setenvs {"KEY1": "VALUE1", "KEY2": "VALUE2"}', "delenv": "刪除用戶配置,完整命令格式為/delenv KEY", "clearenv": "清除所有用戶配置", "system": "查看一些系統信息", "redo": "重做上一次的對話 /redo 加修改過的內容 或者 直接 /redo", "echo": "回显消息", "models": "切換對話模式" }, "new": { "new_chat_start": "開始一個新對話" } }, "callback_query": { "open_model_list": "打開模型清單", "select_provider": "選擇一個模型供應商:", "select_model": "選擇一個模型:", "change_model": "對話模型已經修改至" } };
function loadI18n(lang) {
switch (lang?.toLowerCase()) {
@@ -38,6 +38,7 @@ class EnvironmentConfig {
TELEGRAM_MIN_STREAM_INTERVAL = 0;
TELEGRAM_PHOTO_SIZE_OFFSET = 1;
TELEGRAM_IMAGE_TRANSFER_MODE = "url";
+ MODEL_LIST_COLUMNS = 1;
I_AM_A_GENEROUS_PERSON = false;
CHAT_WHITE_LIST = [];
LOCK_USER_CONFIG_KEYS = [
@@ -69,50 +70,58 @@ class AgentShareConfig {
AI_PROVIDER = "auto";
AI_IMAGE_PROVIDER = "auto";
SYSTEM_INIT_MESSAGE = null;
- SYSTEM_INIT_MESSAGE_ROLE = "system";
}
class OpenAIConfig {
OPENAI_API_KEY = [];
OPENAI_CHAT_MODEL = "gpt-4o-mini";
OPENAI_API_BASE = "https://api.openai.com/v1";
OPENAI_API_EXTRA_PARAMS = {};
+ OPENAI_CHAT_MODELS_LIST = "";
}
-class DalleAIConfig {
- DALL_E_MODEL = "dall-e-2";
+class DallEConfig {
+ DALL_E_MODEL = "dall-e-3";
DALL_E_IMAGE_SIZE = "512x512";
DALL_E_IMAGE_QUALITY = "standard";
DALL_E_IMAGE_STYLE = "vivid";
}
class AzureConfig {
AZURE_API_KEY = null;
- AZURE_COMPLETIONS_API = null;
- AZURE_DALLE_API = null;
+ AZURE_RESOURCE_NAME = null;
+ AZURE_CHAT_MODEL = null;
+ AZURE_IMAGE_MODEL = null;
+ AZURE_API_VERSION = "2024-06-01";
+ AZURE_CHAT_MODELS_LIST = "[]";
}
class WorkersConfig {
CLOUDFLARE_ACCOUNT_ID = null;
CLOUDFLARE_TOKEN = null;
WORKERS_CHAT_MODEL = "@cf/mistral/mistral-7b-instruct-v0.1 ";
WORKERS_IMAGE_MODEL = "@cf/stabilityai/stable-diffusion-xl-base-1.0";
+ WORKERS_CHAT_MODELS_LIST = "";
}
class GeminiConfig {
GOOGLE_API_KEY = null;
- GOOGLE_COMPLETIONS_API = "https://generativelanguage.googleapis.com/v1beta/models/";
- GOOGLE_COMPLETIONS_MODEL = "gemini-pro";
+ GOOGLE_API_BASE = "https://generativelanguage.googleapis.com/v1beta";
+ GOOGLE_COMPLETIONS_MODEL = "gemini-1.5-flash";
+ GOOGLE_CHAT_MODELS_LIST = `["gemini-1.5-flash"]`;
}
class MistralConfig {
MISTRAL_API_KEY = null;
MISTRAL_API_BASE = "https://api.mistral.ai/v1";
MISTRAL_CHAT_MODEL = "mistral-tiny";
+ MISTRAL_CHAT_MODELS_LIST = "";
}
class CohereConfig {
COHERE_API_KEY = null;
COHERE_API_BASE = "https://api.cohere.com/v2";
COHERE_CHAT_MODEL = "command-r-plus";
+ COHERE_CHAT_MODELS_LIST = "";
}
class AnthropicConfig {
ANTHROPIC_API_KEY = null;
ANTHROPIC_API_BASE = "https://api.anthropic.com/v1";
- ANTHROPIC_CHAT_MODEL = "claude-3-haiku-20240307";
+ ANTHROPIC_CHAT_MODEL = "claude-3-5-haiku-latest";
+ ANTHROPIC_CHAT_MODELS_LIST = `["claude-3-5-sonnet-latest", "claude-3-5-haiku-latest"]`;
}
class DefineKeys {
DEFINE_KEYS = [];
@@ -190,13 +199,16 @@ class ConfigMerger {
}
}
+const BUILD_TIMESTAMP = 1731464647;
+const BUILD_VERSION = "fe9ef45";
+
function createAgentUserConfig() {
return Object.assign(
{},
new DefineKeys(),
new AgentShareConfig(),
new OpenAIConfig(),
- new DalleAIConfig(),
+ new DallEConfig(),
new AzureConfig(),
new WorkersConfig(),
new GeminiConfig(),
@@ -211,8 +223,8 @@ const ENV_KEY_MAPPER = {
WORKERS_AI_MODEL: "WORKERS_CHAT_MODEL"
};
class Environment extends EnvironmentConfig {
- BUILD_TIMESTAMP = 1731380657 ;
- BUILD_VERSION = "74ab291" ;
+ BUILD_TIMESTAMP = BUILD_TIMESTAMP;
+ BUILD_VERSION = BUILD_VERSION;
I18N = loadI18n();
PLUGINS_ENV = {};
USER_CONFIG = createAgentUserConfig();
@@ -298,6 +310,24 @@ class Environment extends EnvironmentConfig {
if (!this.USER_CONFIG.SYSTEM_INIT_MESSAGE) {
this.USER_CONFIG.SYSTEM_INIT_MESSAGE = this.I18N?.env?.system_init_message || "You are a helpful assistant";
}
+ if (source.GOOGLE_COMPLETIONS_API && !this.USER_CONFIG.GOOGLE_API_BASE) {
+ this.USER_CONFIG.GOOGLE_API_BASE = source.GOOGLE_COMPLETIONS_API.replace(/\/models\/?$/, "");
+ }
+ if (source.GOOGLE_COMPLETIONS_MODEL && !this.USER_CONFIG.GOOGLE_CHAT_MODEL) {
+ this.USER_CONFIG.GOOGLE_CHAT_MODEL = source.GOOGLE_COMPLETIONS_MODEL;
+ }
+ if (source.AZURE_COMPLETIONS_API && !this.USER_CONFIG.AZURE_CHAT_MODEL) {
+ const url = new URL(source.AZURE_COMPLETIONS_API);
+ this.USER_CONFIG.AZURE_RESOURCE_NAME = url.hostname.split(".").at(0) || null;
+ this.USER_CONFIG.AZURE_CHAT_MODEL = url.pathname.split("/").at(3) || null;
+ this.USER_CONFIG.AZURE_API_VERSION = url.searchParams.get("api-version") || "2024-06-01";
+ }
+ if (source.AZURE_DALLE_API && !this.USER_CONFIG.AZURE_IMAGE_MODEL) {
+ const url = new URL(source.AZURE_DALLE_API);
+ this.USER_CONFIG.AZURE_RESOURCE_NAME = url.hostname.split(".").at(0) || null;
+ this.USER_CONFIG.AZURE_IMAGE_MODEL = url.pathname.split("/").at(3) || null;
+ this.USER_CONFIG.AZURE_API_VERSION = url.searchParams.get("api-version") || "2024-06-01";
+ }
}
}
const ENV = new Environment();
@@ -579,18 +609,36 @@ function handleEscape(text, type = "text") {
class MessageContext {
chat_id;
message_id = null;
- reply_to_message_id;
+ reply_to_message_id = null;
parse_mode = null;
allow_sending_without_reply = null;
disable_web_page_preview = null;
- constructor(message) {
- this.chat_id = message.chat.id;
+ constructor(chatID) {
+ this.chat_id = chatID;
+ }
+ static fromMessage(message) {
+ const ctx = new MessageContext(message.chat.id);
if (message.chat.type === "group" || message.chat.type === "supergroup") {
- this.reply_to_message_id = message.message_id;
- this.allow_sending_without_reply = true;
+ ctx.reply_to_message_id = message.message_id;
+ ctx.allow_sending_without_reply = true;
} else {
- this.reply_to_message_id = null;
+ ctx.reply_to_message_id = null;
}
+ return ctx;
+ }
+ static fromCallbackQuery(callbackQuery) {
+ const chat = callbackQuery.message?.chat;
+ if (!chat) {
+ throw new Error("Chat not found");
+ }
+ const ctx = new MessageContext(chat.id);
+ if (chat.type === "group" || chat.type === "supergroup") {
+ ctx.reply_to_message_id = callbackQuery.message.message_id;
+ ctx.allow_sending_without_reply = true;
+ } else {
+ ctx.reply_to_message_id = null;
+ }
+ return ctx;
}
}
class MessageSender {
@@ -603,12 +651,20 @@ class MessageSender {
this.sendPlainText = this.sendPlainText.bind(this);
this.sendPhoto = this.sendPhoto.bind(this);
}
- static from(token, message) {
- return new MessageSender(token, new MessageContext(message));
+ static fromMessage(token, message) {
+ return new MessageSender(token, MessageContext.fromMessage(message));
}
- with(message) {
- this.context = new MessageContext(message);
- return this;
+ static fromCallbackQuery(token, callbackQuery) {
+ return new MessageSender(token, MessageContext.fromCallbackQuery(callbackQuery));
+ }
+ static fromUpdate(token, update) {
+ if (update.callback_query) {
+ return MessageSender.fromCallbackQuery(token, update.callback_query);
+ }
+ if (update.message) {
+ return MessageSender.fromMessage(token, update.message);
+ }
+ throw new Error("Invalid update");
}
update(context) {
if (!this.context) {
@@ -687,6 +743,12 @@ class MessageSender {
}
return lastMessageResponse;
}
+ sendRawMessage(message) {
+ return this.api.sendMessage(message);
+ }
+ editRawMessage(message) {
+ return this.api.editMessageText(message);
+ }
sendRichText(message, parseMode = ENV.DEFAULT_PARSE_MODE) {
if (!this.context) {
throw new Error("Message context not set");
@@ -724,10 +786,8 @@ class MessageSender {
}
}
-async function loadChatRoleWithContext(message, context) {
+async function loadChatRoleWithContext(chatId, speakerId, context) {
const { groupAdminsKey } = context.SHARE_CONTEXT;
- const chatId = message.chat.id;
- const speakerId = message.from?.id || chatId;
if (!groupAdminsKey) {
return null;
}
@@ -810,10 +870,9 @@ async function fetchImage(url) {
});
}
async function urlToBase64String(url) {
- try {
- const { Buffer } = await import('node:buffer');
+ if (typeof Buffer !== "undefined") {
return fetchImage(url).then((blob) => blob.arrayBuffer()).then((buffer) => Buffer.from(buffer).toString("base64"));
- } catch {
+ } else {
return fetchImage(url).then((blob) => blob.arrayBuffer()).then((buffer) => btoa(String.fromCharCode.apply(null, new Uint8Array(buffer))));
}
}
@@ -840,9 +899,6 @@ async function imageToBase64String(url) {
format: `image/${format}`
};
}
-function renderBase64DataURI(params) {
- return `data:${params.format};base64,${params.data}`;
-}
class Stream {
response;
@@ -1049,10 +1105,10 @@ function fixOpenAICompatibleOptions(options) {
return new Stream(r, c);
};
options.contentExtractor = options.contentExtractor || function(d) {
- return d?.choices?.[0]?.delta?.content;
+ return d?.choices?.at(0)?.delta?.content;
};
options.fullContentExtractor = options.fullContentExtractor || function(d) {
- return d.choices?.[0]?.message.content;
+ return d.choices?.at(0)?.message.content;
};
options.errorExtractor = options.errorExtractor || function(d) {
return d.error?.message;
@@ -1072,11 +1128,43 @@ function isEventStreamResponse(resp) {
}
return false;
}
-async function requestChatCompletions(url, header, body, onStream, onResult = null, options = null) {
+async function streamHandler(stream, contentExtractor, onStream) {
+ let contentFull = "";
+ let lengthDelta = 0;
+ let updateStep = 50;
+ let lastUpdateTime = Date.now();
+ try {
+ for await (const part of stream) {
+ const textPart = contentExtractor(part);
+ if (!textPart) {
+ continue;
+ }
+ lengthDelta += textPart.length;
+ contentFull = contentFull + textPart;
+ if (lengthDelta > updateStep) {
+ if (ENV.TELEGRAM_MIN_STREAM_INTERVAL > 0) {
+ const delta = Date.now() - lastUpdateTime;
+ if (delta < ENV.TELEGRAM_MIN_STREAM_INTERVAL) {
+ continue;
+ }
+ lastUpdateTime = Date.now();
+ }
+ lengthDelta = 0;
+ updateStep += 20;
+ await onStream(`${contentFull}
+...`);
+ }
+ }
+ } catch (e) {
+ contentFull += `
+Error: ${e.message}`;
+ }
+ return contentFull;
+}
+async function requestChatCompletions(url, header, body, onStream, options = null) {
const controller = new AbortController();
const { signal } = controller;
let timeoutID = null;
- let lastUpdateTime = Date.now();
if (ENV.CHAT_COMPLETE_API_TIMEOUT > 0) {
timeoutID = setTimeout(() => controller.abort(), ENV.CHAT_COMPLETE_API_TIMEOUT);
}
@@ -1095,36 +1183,7 @@ async function requestChatCompletions(url, header, body, onStream, onResult = nu
if (!stream) {
throw new Error("Stream builder error");
}
- let contentFull = "";
- let lengthDelta = 0;
- let updateStep = 50;
- try {
- for await (const data of stream) {
- const c = options.contentExtractor?.(data) || "";
- if (c === "") {
- continue;
- }
- lengthDelta += c.length;
- contentFull = contentFull + c;
- if (lengthDelta > updateStep) {
- if (ENV.TELEGRAM_MIN_STREAM_INTERVAL > 0) {
- const delta = Date.now() - lastUpdateTime;
- if (delta < ENV.TELEGRAM_MIN_STREAM_INTERVAL) {
- continue;
- }
- lastUpdateTime = Date.now();
- }
- lengthDelta = 0;
- updateStep += 20;
- await onStream(`${contentFull}
-...`);
- }
- }
- } catch (e) {
- contentFull += `
-ERROR: ${e.message}`;
- }
- return contentFull;
+ return streamHandler(stream, options.contentExtractor, onStream);
}
if (!isJsonResponse(resp)) {
throw new Error(resp.statusText);
@@ -1136,13 +1195,65 @@ ERROR: ${e.message}`;
if (options.errorExtractor?.(result)) {
throw new Error(options.errorExtractor?.(result) || "Unknown error");
}
- try {
- await onResult?.(result);
- return options.fullContentExtractor?.(result) || "";
- } catch (e) {
- console.error(e);
- throw new Error(JSON.stringify(result));
+ return options.fullContentExtractor?.(result) || "";
+}
+
+function extractTextContent(history) {
+ if (typeof history.content === "string") {
+ return history.content;
+ }
+ if (Array.isArray(history.content)) {
+ return history.content.map((item) => {
+ if (item.type === "text") {
+ return item.text;
+ }
+ return "";
+ }).join("");
}
+ return "";
+}
+function extractImageContent(imageData) {
+ if (imageData instanceof URL) {
+ return { url: imageData.href };
+ }
+ if (typeof imageData === "string") {
+ if (imageData.startsWith("http")) {
+ return { url: imageData };
+ } else {
+ return { base64: imageData };
+ }
+ }
+ if (imageData instanceof Uint8Array) {
+ return { base64: Buffer.from(imageData).toString("base64") };
+ }
+ if (Buffer.isBuffer(imageData)) {
+ return { base64: Buffer.from(imageData).toString("base64") };
+ }
+ return {};
+}
+async function convertStringToResponseMessages(input) {
+ const text = await input;
+ return {
+ text,
+ responses: [{ role: "assistant", content: await input }]
+ };
+}
+async function loadModelsList(raw, remoteLoader) {
+ if (!raw) {
+ return [];
+ }
+ if (raw.startsWith("[") && raw.endsWith("]")) {
+ try {
+ return JSON.parse(raw);
+ } catch (e) {
+ console.error(e);
+ return [];
+ }
+ }
+ if (raw.startsWith("http") && remoteLoader) {
+ return await remoteLoader(raw);
+ }
+ return [];
}
class Anthropic {
@@ -1156,16 +1267,30 @@ class Anthropic {
role: item.role,
content: item.content
};
- if (item.images && item.images.length > 0) {
- res.content = [];
- if (item.content) {
- res.content.push({ type: "text", text: item.content });
- }
- for (const image of item.images) {
- res.content.push(await imageToBase64String(image).then(({ format, data }) => {
- return { type: "image", source: { type: "base64", media_type: format, data } };
- }));
+ if (item.role === "system") {
+ return null;
+ }
+ if (Array.isArray(item.content)) {
+ const contents = [];
+ for (const content of item.content) {
+ switch (content.type) {
+ case "text":
+ contents.push({ type: "text", text: content.text });
+ break;
+ case "image": {
+ const data = extractImageContent(content.image);
+ if (data.url) {
+ contents.push(await imageToBase64String(data.url).then(({ format, data: data2 }) => {
+ return { type: "image", source: { type: "base64", media_type: format, data: data2 } };
+ }));
+ } else if (data.base64) {
+ contents.push({ type: "image", source: { type: "base64", media_type: "image/jpeg", data: data.base64 } });
+ }
+ break;
+ }
+ }
}
+ res.content = contents;
}
return res;
};
@@ -1192,21 +1317,20 @@ class Anthropic {
}
}
request = async (params, context, onStream) => {
- const { message, images, prompt, history } = params;
+ const { prompt, messages } = params;
const url = `${context.ANTHROPIC_API_BASE}/messages`;
const header = {
"x-api-key": context.ANTHROPIC_API_KEY || "",
"anthropic-version": "2023-06-01",
"content-type": "application/json"
};
- const messages = (history || []).concat({ role: "user", content: message, images });
if (messages.length > 0 && messages[0].role === "assistant") {
messages.shift();
}
const body = {
system: prompt,
model: context.ANTHROPIC_CHAT_MODEL,
- messages: await Promise.all(messages.map((item) => this.render(item))),
+ messages: (await Promise.all(messages.map((item) => this.render(item)))).filter((i) => i !== null),
stream: onStream != null,
max_tokens: ENV.MAX_TOKEN_LENGTH > 0 ? ENV.MAX_TOKEN_LENGTH : 2048
};
@@ -1221,41 +1345,56 @@ class Anthropic {
return data?.delta?.text;
};
options.fullContentExtractor = function(data) {
- return data?.content?.[0].text;
+ return data?.content?.at(0).text;
};
options.errorExtractor = function(data) {
return data?.error?.message;
};
- return requestChatCompletions(url, header, body, onStream, null, options);
+ return convertStringToResponseMessages(requestChatCompletions(url, header, body, onStream, options));
+ };
+ modelList = async (context) => {
+ return loadModelsList(context.ANTHROPIC_CHAT_MODELS_LIST);
};
}
-async function renderOpenAIMessage(item) {
+async function renderOpenAIMessage(item, supportImage) {
const res = {
role: item.role,
content: item.content
};
- if (item.images && item.images.length > 0) {
- res.content = [];
- if (item.content) {
- res.content.push({ type: "text", text: item.content });
- }
- for (const image of item.images) {
- switch (ENV.TELEGRAM_IMAGE_TRANSFER_MODE) {
- case "base64":
- res.content.push({ type: "image_url", image_url: {
- url: renderBase64DataURI(await imageToBase64String(image))
- } });
+ if (Array.isArray(item.content)) {
+ const contents = [];
+ for (const content of item.content) {
+ switch (content.type) {
+ case "text":
+ contents.push({ type: "text", text: content.text });
break;
- case "url":
- default:
- res.content.push({ type: "image_url", image_url: { url: image } });
+ case "image":
+ if (supportImage) {
+ const data = extractImageContent(content.image);
+ if (data.url) {
+ contents.push({ type: "image_url", image_url: { url: data.url } });
+ } else if (data.base64) {
+ contents.push({ type: "image_url", image_url: { url: data.base64 } });
+ }
+ }
break;
}
}
+ res.content = contents;
}
return res;
}
+async function renderOpenAIMessages(prompt, items, supportImage) {
+ const messages = await Promise.all(items.map((r) => renderOpenAIMessage(r, supportImage)));
+ if (prompt) {
+ if (messages.length > 0 && messages[0].role === "system") {
+ messages.shift();
+ }
+ messages.unshift({ role: "system", content: prompt });
+ }
+ return messages;
+}
class OpenAIBase {
name = "openai";
apikey = (context) => {
@@ -1275,23 +1414,30 @@ class OpenAI extends OpenAIBase {
return renderOpenAIMessage(item);
};
request = async (params, context, onStream) => {
- const { message, images, prompt, history } = params;
+ const { prompt, messages } = params;
const url = `${context.OPENAI_API_BASE}/chat/completions`;
const header = {
"Content-Type": "application/json",
"Authorization": `Bearer ${this.apikey(context)}`
};
- const messages = [...history || [], { role: "user", content: message, images }];
- if (prompt) {
- messages.unshift({ role: context.SYSTEM_INIT_MESSAGE_ROLE, content: prompt });
- }
const body = {
model: context.OPENAI_CHAT_MODEL,
...context.OPENAI_API_EXTRA_PARAMS,
- messages: await Promise.all(messages.map(this.render)),
+ messages: await renderOpenAIMessages(prompt, messages, true),
stream: onStream != null
};
- return requestChatCompletions(url, header, body, onStream);
+ return convertStringToResponseMessages(requestChatCompletions(url, header, body, onStream));
+ };
+ modelList = async (context) => {
+ if (context.OPENAI_CHAT_MODELS_LIST === "") {
+ context.OPENAI_CHAT_MODELS_LIST = `${context.OPENAI_API_BASE}/models`;
+ }
+ return loadModelsList(context.OPENAI_CHAT_MODELS_LIST, async (url) => {
+ const data = await fetch(url, {
+ headers: { Authorization: `Bearer ${this.apikey(context)}` }
+ }).then((res) => res.json());
+ return data.data?.map((model) => model.id) || [];
+ });
};
}
class Dalle extends OpenAIBase {
@@ -1326,7 +1472,7 @@ class Dalle extends OpenAIBase {
if (resp.error?.message) {
throw new Error(resp.error.message);
}
- return resp?.data?.[0]?.url;
+ return resp?.data?.at(0)?.url;
};
}
@@ -1345,33 +1491,29 @@ class AzureBase {
};
}
class AzureChatAI extends AzureBase {
- modelKey = "AZURE_COMPLETIONS_API";
+ modelKey = "AZURE_CHAT_MODEL";
enable = (context) => {
- return !!(context.AZURE_API_KEY && context.AZURE_COMPLETIONS_API);
+ return !!(context.AZURE_API_KEY && context.AZURE_RESOURCE_NAME);
};
model = (ctx) => {
- return this.modelFromURI(ctx.AZURE_COMPLETIONS_API);
+ return ctx.AZURE_CHAT_MODEL;
};
request = async (params, context, onStream) => {
- const { message, images, prompt, history } = params;
- const url = context.AZURE_COMPLETIONS_API;
- if (!url || !context.AZURE_API_KEY) {
- throw new Error("Azure Completions API is not set");
- }
+ const { prompt, messages } = params;
+ const url = `https://${context.AZURE_RESOURCE_NAME}.openai.azure.com/openai/deployments/${context.AZURE_CHAT_MODEL}/chat/completions?api-version=${context.AZURE_API_VERSION}`;
const header = {
"Content-Type": "application/json",
- "api-key": context.AZURE_API_KEY
+ "api-key": context.AZURE_API_KEY || ""
};
- const messages = [...history || [], { role: "user", content: message, images }];
- if (prompt) {
- messages.unshift({ role: context.SYSTEM_INIT_MESSAGE_ROLE, content: prompt });
- }
const body = {
...context.OPENAI_API_EXTRA_PARAMS,
- messages: await Promise.all(messages.map(renderOpenAIMessage)),
+ messages: await renderOpenAIMessages(prompt, messages, true),
stream: onStream != null
};
- return requestChatCompletions(url, header, body, onStream);
+ return convertStringToResponseMessages(requestChatCompletions(url, header, body, onStream));
+ };
+ modelList = async (context) => {
+ return loadModelsList(context.AZURE_CHAT_MODELS_LIST);
};
}
class AzureImageAI extends AzureBase {
@@ -1383,13 +1525,10 @@ class AzureImageAI extends AzureBase {
return this.modelFromURI(ctx.AZURE_DALLE_API);
};
request = async (prompt, context) => {
- const url = context.AZURE_DALLE_API;
- if (!url || !context.AZURE_API_KEY) {
- throw new Error("Azure DALL-E API is not set");
- }
+ const url = `https://${context.AZURE_RESOURCE_NAME}.openai.azure.com/openai/deployments/${context.AZURE_CHAT_MODEL}/images/generations?api-version=${context.AZURE_API_VERSION}`;
const header = {
"Content-Type": "application/json",
- "api-key": context.AZURE_API_KEY
+ "api-key": context.AZURE_API_KEY || ""
};
const body = {
prompt,
@@ -1410,7 +1549,7 @@ class AzureImageAI extends AzureBase {
if (resp.error?.message) {
throw new Error(resp.error.message);
}
- return resp?.data?.[0]?.url;
+ return resp?.data?.at(0)?.url;
};
}
@@ -1424,19 +1563,15 @@ class Cohere {
return ctx.COHERE_CHAT_MODEL;
};
request = async (params, context, onStream) => {
- const { message, prompt, history } = params;
+ const { prompt, messages } = params;
const url = `${context.COHERE_API_BASE}/chat`;
const header = {
"Authorization": `Bearer ${context.COHERE_API_KEY}`,
"Content-Type": "application/json",
"Accept": onStream !== null ? "text/event-stream" : "application/json"
};
- const messages = [...history || [], { role: "user", content: message }];
- if (prompt) {
- messages.unshift({ role: "assistant", content: prompt });
- }
const body = {
- messages,
+ messages: await renderOpenAIMessages(prompt, messages),
model: context.COHERE_CHAT_MODEL,
stream: onStream != null
};
@@ -1445,77 +1580,53 @@ class Cohere {
return data?.delta?.message?.content?.text;
};
options.fullContentExtractor = function(data) {
- return data?.messages[0].content;
+ return data?.messages?.at(0)?.content;
};
options.errorExtractor = function(data) {
return data?.message;
};
- return requestChatCompletions(url, header, body, onStream, null, options);
+ return convertStringToResponseMessages(requestChatCompletions(url, header, body, onStream, options));
+ };
+ modelList = async (context) => {
+ if (context.COHERE_CHAT_MODELS_LIST === "") {
+ const { protocol, host } = new URL(context.COHERE_API_BASE);
+ context.COHERE_CHAT_MODELS_LIST = `${protocol}://${host}/v2/models`;
+ }
+ return loadModelsList(context.COHERE_CHAT_MODELS_LIST, async (url) => {
+ const data = await fetch(url, {
+ headers: { Authorization: `Bearer ${context.COHERE_API_KEY}` }
+ }).then((res) => res.json());
+ return data.models?.filter((model) => model.endpoints?.includes("chat")).map((model) => model.name) || [];
+ });
};
}
class Gemini {
name = "gemini";
modelKey = "GOOGLE_COMPLETIONS_MODEL";
- static GEMINI_ROLE_MAP = {
- assistant: "model",
- system: "user",
- user: "user"
- };
enable = (context) => {
return !!context.GOOGLE_API_KEY;
};
model = (ctx) => {
return ctx.GOOGLE_COMPLETIONS_MODEL;
};
- render = (item) => {
- return {
- role: Gemini.GEMINI_ROLE_MAP[item.role],
- parts: [
- {
- text: item.content || ""
- }
- ]
+ request = async (params, context, onStream) => {
+ const { prompt, messages } = params;
+ const url = `${context.GOOGLE_API_BASE}/chat`;
+ const header = {
+ "Authorization": `Bearer ${context.GOOGLE_API_KEY}`,
+ "Content-Type": "application/json",
+ "Accept": onStream !== null ? "text/event-stream" : "application/json"
+ };
+ const body = {
+ messages: await renderOpenAIMessages(prompt, messages),
+ model: context.GOOGLE_COMPLETIONS_MODEL,
+ stream: onStream != null
};
+ return convertStringToResponseMessages(requestChatCompletions(url, header, body, onStream));
};
- request = async (params, context, onStream) => {
- const { message, prompt, history } = params;
- if (onStream !== null) {
- console.warn("Stream mode is not supported");
- }
- const mode = "generateContent";
- const url = `${context.GOOGLE_COMPLETIONS_API}${context.GOOGLE_COMPLETIONS_MODEL}:${mode}`;
- const contentsTemp = [...history || [], { role: "user", content: message }];
- if (prompt) {
- contentsTemp.unshift({ role: "assistant", content: prompt });
- }
- const contents = [];
- for (const msg of contentsTemp) {
- msg.role = Gemini.GEMINI_ROLE_MAP[msg.role];
- if (contents.length === 0 || contents[contents.length - 1].role !== msg.role) {
- contents.push(this.render(msg));
- } else {
- contents[contents.length - 1].parts[0].text += msg.content;
- }
- }
- const resp = await fetch(url, {
- method: "POST",
- headers: {
- "Content-Type": "application/json",
- "x-goog-api-key": context.GOOGLE_API_KEY
- },
- body: JSON.stringify({ contents })
- });
- const data = await resp.json();
- try {
- return data.candidates[0].content.parts[0].text;
- } catch (e) {
- console.error(e);
- if (!data) {
- throw new Error("Empty response");
- }
- throw new Error(data?.error?.message || JSON.stringify(data));
- }
+ modelList = async (context) => {
+ return loadModelsList(context.GOOGLE_CHAT_MODELS_LIST);
};
}
@@ -1528,29 +1639,30 @@ class Mistral {
model = (ctx) => {
return ctx.MISTRAL_CHAT_MODEL;
};
- render = (item) => {
- return {
- role: item.role,
- content: item.content
- };
- };
request = async (params, context, onStream) => {
- const { message, prompt, history } = params;
+ const { prompt, messages } = params;
const url = `${context.MISTRAL_API_BASE}/chat/completions`;
const header = {
"Content-Type": "application/json",
"Authorization": `Bearer ${context.MISTRAL_API_KEY}`
};
- const messages = [...history || [], { role: "user", content: message }];
- if (prompt) {
- messages.unshift({ role: context.SYSTEM_INIT_MESSAGE_ROLE, content: prompt });
- }
const body = {
model: context.MISTRAL_CHAT_MODEL,
- messages: messages.map(this.render),
+ messages: await renderOpenAIMessages(prompt, messages),
stream: onStream != null
};
- return requestChatCompletions(url, header, body, onStream);
+ return convertStringToResponseMessages(requestChatCompletions(url, header, body, onStream));
+ };
+ modelList = async (context) => {
+ if (context.MISTRAL_CHAT_MODELS_LIST === "") {
+ context.MISTRAL_CHAT_MODELS_LIST = `${context.MISTRAL_API_BASE}/models`;
+ }
+ return loadModelsList(context.MISTRAL_CHAT_MODELS_LIST, async (url) => {
+ const data = await fetch(url, {
+ headers: { Authorization: `Bearer ${context.MISTRAL_API_KEY}` }
+ }).then((res) => res.json());
+ return data.data?.map((model) => model.id) || [];
+ });
};
}
@@ -1582,7 +1694,7 @@ class WorkersChat extends WorkerBase {
};
};
request = async (params, context, onStream) => {
- const { message, prompt, history } = params;
+ const { prompt, messages } = params;
const id = context.CLOUDFLARE_ACCOUNT_ID;
const token = context.CLOUDFLARE_TOKEN;
const model = context.WORKERS_CHAT_MODEL;
@@ -1590,12 +1702,8 @@ class WorkersChat extends WorkerBase {
const header = {
Authorization: `Bearer ${token}`
};
- const messages = [...history || [], { role: "user", content: message }];
- if (prompt) {
- messages.unshift({ role: context.SYSTEM_INIT_MESSAGE_ROLE, content: prompt });
- }
const body = {
- messages: messages.map(this.render),
+ messages: await renderOpenAIMessages(prompt, messages),
stream: onStream !== null
};
const options = {};
@@ -1606,9 +1714,22 @@ class WorkersChat extends WorkerBase {
return data?.result?.response;
};
options.errorExtractor = function(data) {
- return data?.errors?.[0]?.message;
+ return data?.errors?.at(0)?.message;
};
- return requestChatCompletions(url, header, body, onStream, null, options);
+ return convertStringToResponseMessages(requestChatCompletions(url, header, body, onStream, options));
+ };
+ modelList = async (context) => {
+ if (context.WORKERS_CHAT_MODELS_LIST === "") {
+ const id = context.CLOUDFLARE_ACCOUNT_ID;
+ context.WORKERS_CHAT_MODELS_LIST = `https://api.cloudflare.com/client/v4/accounts/${id}/ai/models/search?task=Text%20Generation`;
+ }
+ return loadModelsList(context.WORKERS_CHAT_MODELS_LIST, async (url) => {
+ const header = {
+ Authorization: `Bearer ${context.CLOUDFLARE_TOKEN}`
+ };
+ const data = await fetch(url, { headers: header }).then((res) => res.json());
+ return data.result?.map((model) => model.name) || [];
+ });
};
}
class WorkersImage extends WorkerBase {
@@ -1635,24 +1756,23 @@ class WorkersImage extends WorkerBase {
};
}
async function base64StringToBlob(base64String) {
- try {
- const { Buffer } = await import('node:buffer');
+ if (typeof Buffer !== "undefined") {
const buffer = Buffer.from(base64String, "base64");
return new Blob([buffer], { type: "image/png" });
- } catch {
+ } else {
const uint8Array = Uint8Array.from(atob(base64String), (c) => c.charCodeAt(0));
return new Blob([uint8Array], { type: "image/png" });
}
}
const CHAT_AGENTS = [
+ new OpenAI(),
new Anthropic(),
new AzureChatAI(),
+ new WorkersChat(),
new Cohere(),
new Gemini(),
- new Mistral(),
- new OpenAI(),
- new WorkersChat()
+ new Mistral()
];
function loadChatLLM(context) {
for (const llm of CHAT_AGENTS) {
@@ -1686,6 +1806,51 @@ function loadImageGen(context) {
return null;
}
+function isTelegramChatTypeGroup(type) {
+ return type === "group" || type === "supergroup";
+}
+async function setUserConfig(values, context) {
+ for (const ent of Object.entries(values || {})) {
+ let [key, value] = ent;
+ key = ENV_KEY_MAPPER[key] || key;
+ if (ENV.LOCK_USER_CONFIG_KEYS.includes(key)) {
+ throw new Error(`Key ${key} is locked`);
+ }
+ const configKeys = Object.keys(context.USER_CONFIG || {}) || [];
+ if (!configKeys.includes(key)) {
+ throw new Error(`Key ${key} is not allowed`);
+ }
+ context.USER_CONFIG.DEFINE_KEYS.push(key);
+ ConfigMerger.merge(context.USER_CONFIG, {
+ [key]: value
+ });
+ console.log("Update user config: ", key, context.USER_CONFIG[key]);
+ }
+ context.USER_CONFIG.DEFINE_KEYS = Array.from(new Set(context.USER_CONFIG.DEFINE_KEYS));
+ await ENV.DATABASE.put(
+ context.SHARE_CONTEXT.configStoreKey,
+ JSON.stringify(ConfigMerger.trim(context.USER_CONFIG, ENV.LOCK_USER_CONFIG_KEYS))
+ );
+}
+
+const TELEGRAM_AUTH_CHECKER = {
+ default(chatType) {
+ if (isTelegramChatTypeGroup(chatType)) {
+ return ["administrator", "creator"];
+ }
+ return null;
+ },
+ shareModeGroup(chatType) {
+ if (isTelegramChatTypeGroup(chatType)) {
+ if (!ENV.GROUP_CHAT_BOT_SHARE_MODE) {
+ return null;
+ }
+ return ["administrator", "creator"];
+ }
+ return null;
+ }
+};
+
function tokensCounter() {
return (text) => {
return text.length;
@@ -1712,7 +1877,7 @@ async function loadHistory(key) {
const historyItem = list[i];
let length = 0;
if (historyItem.content) {
- length = counter(historyItem.content);
+ length = counter(extractTextContent(historyItem));
} else {
historyItem.content = "";
}
@@ -1738,32 +1903,30 @@ async function requestCompletionsFromLLM(params, context, agent, modifier, onStr
}
let history = await loadHistory(historyKey);
if (modifier) {
- const modifierData = modifier(history, params.message || null);
+ const modifierData = modifier(history, params || null);
history = modifierData.history;
- params.message = modifierData.message;
+ params = modifierData.message;
+ }
+ if (!params) {
+ throw new Error("Message is empty");
}
+ history.push(params);
const llmParams = {
- ...params,
- history,
- prompt: context.USER_CONFIG.SYSTEM_INIT_MESSAGE
+ prompt: context.USER_CONFIG.SYSTEM_INIT_MESSAGE || void 0,
+ messages: history
};
- const answer = await agent.request(llmParams, context.USER_CONFIG, onStream);
+ const { text, responses } = await agent.request(llmParams, context.USER_CONFIG, onStream);
if (!historyDisable) {
- const userMessage = { role: "user", content: params.message || "", images: params.images };
- if (ENV.HISTORY_IMAGE_PLACEHOLDER && userMessage.images && userMessage.images.length > 0) {
- delete userMessage.images;
- userMessage.content = `${ENV.HISTORY_IMAGE_PLACEHOLDER}
-${userMessage.content}`;
- }
- history.push(userMessage);
- history.push({ role: "assistant", content: answer });
+ if (ENV.HISTORY_IMAGE_PLACEHOLDER) ;
+ history.push(params);
+ history.push(...responses);
await ENV.DATABASE.put(historyKey, JSON.stringify(history)).catch(console.error);
}
- return answer;
+ return text;
}
async function chatWithLLM(message, params, context, modifier) {
- const sender = MessageSender.from(context.SHARE_CONTEXT.botToken, message);
+ const sender = MessageSender.fromMessage(context.SHARE_CONTEXT.botToken, message);
try {
try {
const msg = await sender.sendPlainText("...").then((r) => r.json());
@@ -1835,48 +1998,35 @@ function findPhotoFileID(photos, offset) {
}
class ChatHandler {
handle = async (message, context) => {
+ const text = message.text || message.caption || "";
const params = {
- message: message.text || message.caption || ""
+ role: "user",
+ content: text
};
if (message.photo && message.photo.length > 0) {
const id = findPhotoFileID(message.photo, ENV.TELEGRAM_PHOTO_SIZE_OFFSET);
const api = createTelegramBotAPI(context.SHARE_CONTEXT.botToken);
const file = await api.getFileWithReturns({ file_id: id });
- const url = file.result.file_path;
- if (url) {
- params.images = [`${ENV.TELEGRAM_API_DOMAIN}/file/bot${context.SHARE_CONTEXT.botToken}/${url}`];
+ const filePath = file.result.file_path;
+ if (filePath) {
+ const url = URL.parse(`${ENV.TELEGRAM_API_DOMAIN}/file/bot${context.SHARE_CONTEXT.botToken}/${filePath}`);
+ if (url) {
+ params.content = [
+ { type: "text", text },
+ { type: "image", image: url }
+ ];
+ }
}
}
return chatWithLLM(message, params, context, null);
};
}
-function isTelegramChatTypeGroup(type) {
- return type === "group" || type === "supergroup";
-}
-
-const COMMAND_AUTH_CHECKER = {
- default(chatType) {
- if (isTelegramChatTypeGroup(chatType)) {
- return ["administrator", "creator"];
- }
- return null;
- },
- shareModeGroup(chatType) {
- if (isTelegramChatTypeGroup(chatType)) {
- if (!ENV.GROUP_CHAT_BOT_SHARE_MODE) {
- return null;
- }
- return ["administrator", "creator"];
- }
- return null;
- }
-};
class ImgCommandHandler {
command = "/img";
scopes = ["all_private_chats", "all_chat_administrators"];
handle = async (message, subcommand, context) => {
- const sender = MessageSender.from(context.SHARE_CONTEXT.botToken, message);
+ const sender = MessageSender.fromMessage(context.SHARE_CONTEXT.botToken, message);
if (subcommand === "") {
return sender.sendPlainText(ENV.I18N.command.help.img);
}
@@ -1905,7 +2055,7 @@ class HelpCommandHandler {
command = "/help";
scopes = ["all_private_chats", "all_chat_administrators"];
handle = async (message, subcommand, context) => {
- const sender = MessageSender.from(context.SHARE_CONTEXT.botToken, message);
+ const sender = MessageSender.fromMessage(context.SHARE_CONTEXT.botToken, message);
let helpMsg = `${ENV.I18N.command.help.summary}
`;
for (const [k, v] of Object.entries(ENV.I18N.command.help)) {
@@ -1969,33 +2119,17 @@ class StartCommandHandler extends BaseNewCommandHandler {
}
class SetEnvCommandHandler {
command = "/setenv";
- needAuth = COMMAND_AUTH_CHECKER.shareModeGroup;
+ needAuth = TELEGRAM_AUTH_CHECKER.shareModeGroup;
handle = async (message, subcommand, context) => {
- const sender = MessageSender.from(context.SHARE_CONTEXT.botToken, message);
+ const sender = MessageSender.fromMessage(context.SHARE_CONTEXT.botToken, message);
const kv = subcommand.indexOf("=");
if (kv === -1) {
return sender.sendPlainText(ENV.I18N.command.help.setenv);
}
- let key = subcommand.slice(0, kv);
+ const key = subcommand.slice(0, kv);
const value = subcommand.slice(kv + 1);
- key = ENV_KEY_MAPPER[key] || key;
- if (ENV.LOCK_USER_CONFIG_KEYS.includes(key)) {
- return sender.sendPlainText(`Key ${key} is locked`);
- }
- if (!Object.keys(context.USER_CONFIG).includes(key)) {
- return sender.sendPlainText(`Key ${key} not found`);
- }
try {
- context.USER_CONFIG.DEFINE_KEYS.push(key);
- context.USER_CONFIG.DEFINE_KEYS = Array.from(new Set(context.USER_CONFIG.DEFINE_KEYS));
- ConfigMerger.merge(context.USER_CONFIG, {
- [key]: value
- });
- console.log("Update user config: ", key, context.USER_CONFIG[key]);
- await ENV.DATABASE.put(
- context.SHARE_CONTEXT.configStoreKey,
- JSON.stringify(ConfigMerger.trim(context.USER_CONFIG, ENV.LOCK_USER_CONFIG_KEYS))
- );
+ await setUserConfig({ [key]: value }, context);
return sender.sendPlainText("Update user config success");
} catch (e) {
return sender.sendPlainText(`ERROR: ${e.message}`);
@@ -2004,32 +2138,12 @@ class SetEnvCommandHandler {
}
class SetEnvsCommandHandler {
command = "/setenvs";
- needAuth = COMMAND_AUTH_CHECKER.shareModeGroup;
+ needAuth = TELEGRAM_AUTH_CHECKER.shareModeGroup;
handle = async (message, subcommand, context) => {
- const sender = MessageSender.from(context.SHARE_CONTEXT.botToken, message);
+ const sender = MessageSender.fromMessage(context.SHARE_CONTEXT.botToken, message);
try {
const values = JSON.parse(subcommand);
- const configKeys = Object.keys(context.USER_CONFIG);
- for (const ent of Object.entries(values)) {
- let [key, value] = ent;
- key = ENV_KEY_MAPPER[key] || key;
- if (ENV.LOCK_USER_CONFIG_KEYS.includes(key)) {
- return sender.sendPlainText(`Key ${key} is locked`);
- }
- if (!configKeys.includes(key)) {
- return sender.sendPlainText(`Key ${key} not found`);
- }
- context.USER_CONFIG.DEFINE_KEYS.push(key);
- ConfigMerger.merge(context.USER_CONFIG, {
- [key]: value
- });
- console.log("Update user config: ", key, context.USER_CONFIG[key]);
- }
- context.USER_CONFIG.DEFINE_KEYS = Array.from(new Set(context.USER_CONFIG.DEFINE_KEYS));
- await ENV.DATABASE.put(
- context.SHARE_CONTEXT.configStoreKey,
- JSON.stringify(ConfigMerger.trim(context.USER_CONFIG, ENV.LOCK_USER_CONFIG_KEYS))
- );
+ await setUserConfig(values, context);
return sender.sendPlainText("Update user config success");
} catch (e) {
return sender.sendPlainText(`ERROR: ${e.message}`);
@@ -2038,9 +2152,9 @@ class SetEnvsCommandHandler {
}
class DelEnvCommandHandler {
command = "/delenv";
- needAuth = COMMAND_AUTH_CHECKER.shareModeGroup;
+ needAuth = TELEGRAM_AUTH_CHECKER.shareModeGroup;
handle = async (message, subcommand, context) => {
- const sender = MessageSender.from(context.SHARE_CONTEXT.botToken, message);
+ const sender = MessageSender.fromMessage(context.SHARE_CONTEXT.botToken, message);
if (ENV.LOCK_USER_CONFIG_KEYS.includes(subcommand)) {
const msg = `Key ${subcommand} is locked`;
return sender.sendPlainText(msg);
@@ -2060,9 +2174,9 @@ class DelEnvCommandHandler {
}
class ClearEnvCommandHandler {
command = "/clearenv";
- needAuth = COMMAND_AUTH_CHECKER.shareModeGroup;
+ needAuth = TELEGRAM_AUTH_CHECKER.shareModeGroup;
handle = async (message, subcommand, context) => {
- const sender = MessageSender.from(context.SHARE_CONTEXT.botToken, message);
+ const sender = MessageSender.fromMessage(context.SHARE_CONTEXT.botToken, message);
try {
await ENV.DATABASE.put(
context.SHARE_CONTEXT.configStoreKey,
@@ -2078,7 +2192,7 @@ class VersionCommandHandler {
command = "/version";
scopes = ["all_private_chats", "all_chat_administrators"];
handle = async (message, subcommand, context) => {
- const sender = MessageSender.from(context.SHARE_CONTEXT.botToken, message);
+ const sender = MessageSender.fromMessage(context.SHARE_CONTEXT.botToken, message);
const current = {
ts: ENV.BUILD_TIMESTAMP,
sha: ENV.BUILD_VERSION
@@ -2106,7 +2220,7 @@ class SystemCommandHandler {
command = "/system";
scopes = ["all_private_chats", "all_chat_administrators"];
handle = async (message, subcommand, context) => {
- const sender = MessageSender.from(context.SHARE_CONTEXT.botToken, message);
+ const sender = MessageSender.fromMessage(context.SHARE_CONTEXT.botToken, message);
const chatAgent = loadChatLLM(context.USER_CONFIG);
const imageAgent = loadImageGen(context.USER_CONFIG);
const agent = {
@@ -2148,8 +2262,8 @@ class RedoCommandHandler {
command = "/redo";
scopes = ["all_private_chats", "all_group_chats", "all_chat_administrators"];
handle = async (message, subcommand, context) => {
- const mf = (history, text) => {
- let nextText = text;
+ const mf = (history, message2) => {
+ let nextMessage = message2;
if (!(history && Array.isArray(history) && history.length > 0)) {
throw new Error("History not found");
}
@@ -2159,18 +2273,43 @@ class RedoCommandHandler {
if (data === void 0 || data === null) {
break;
} else if (data.role === "user") {
- if (text === "" || text === void 0 || text === null) {
- nextText = data.content || null;
- }
+ nextMessage = data;
break;
}
}
if (subcommand) {
- nextText = subcommand;
+ nextMessage = {
+ role: "user",
+ content: subcommand
+ };
}
- return { history: historyCopy, message: nextText };
+ if (nextMessage === null) {
+ throw new Error("Redo message not found");
+ }
+ return { history: historyCopy, message: nextMessage };
};
- return chatWithLLM(message, { message: null }, context, mf);
+ return chatWithLLM(message, null, context, mf);
+ };
+}
+class ModelsCommandHandler {
+ command = "/models";
+ scopes = ["all_private_chats", "all_group_chats", "all_chat_administrators"];
+ handle = async (message, subcommand, context) => {
+ const sender = MessageSender.fromMessage(context.SHARE_CONTEXT.botToken, message);
+ const chatAgent = loadChatLLM(context.USER_CONFIG);
+ const params = {
+ chat_id: message.chat.id,
+ text: `${chatAgent?.name || "Nan"} | ${chatAgent?.model(context.USER_CONFIG) || "Nan"}`,
+ reply_markup: {
+ inline_keyboard: [[
+ {
+ text: ENV.I18N.callback_query.open_model_list,
+ callback_data: "al:"
+ }
+ ]]
+ }
+ };
+ return sender.sendRawMessage(params);
};
}
class EchoCommandHandler {
@@ -2179,7 +2318,7 @@ class EchoCommandHandler {
let msg = "
";
msg += JSON.stringify({ message }, null, 2);
msg += "
";
- return MessageSender.from(context.SHARE_CONTEXT.botToken, message).sendRichText(msg, "HTML");
+ return MessageSender.fromMessage(context.SHARE_CONTEXT.botToken, message).sendRichText(msg, "HTML");
};
}
@@ -2194,15 +2333,19 @@ const SYSTEM_COMMANDS = [
new ClearEnvCommandHandler(),
new VersionCommandHandler(),
new SystemCommandHandler(),
+ new ModelsCommandHandler(),
new HelpCommandHandler()
];
async function handleSystemCommand(message, raw, command, context) {
- const sender = MessageSender.from(context.SHARE_CONTEXT.botToken, message);
+ const sender = MessageSender.fromMessage(context.SHARE_CONTEXT.botToken, message);
try {
+ const chatId = message.chat.id;
+ const speakerId = message.from?.id || chatId;
+ const chatType = message.chat.type;
if (command.needAuth) {
- const roleList = command.needAuth(message.chat.type);
+ const roleList = command.needAuth(chatType);
if (roleList) {
- const chatRole = await loadChatRoleWithContext(message, context);
+ const chatRole = await loadChatRoleWithContext(chatId, speakerId, context);
if (chatRole === null) {
return sender.sendPlainText("ERROR: Get chat role failed");
}
@@ -2222,7 +2365,7 @@ async function handleSystemCommand(message, raw, command, context) {
}
}
async function handlePluginCommand(message, command, raw, template, context) {
- const sender = MessageSender.from(context.SHARE_CONTEXT.botToken, message);
+ const sender = MessageSender.fromMessage(context.SHARE_CONTEXT.botToken, message);
try {
const subcommand = raw.substring(command.length).trim();
if (template.input?.required && !subcommand) {
@@ -2290,10 +2433,13 @@ function commandsBindScope() {
if (!scopeCommandMap[scope]) {
scopeCommandMap[scope] = [];
}
- scopeCommandMap[scope].push({
- command: cmd.command,
- description: ENV.I18N.command.help[cmd.command.substring(1)] || ""
- });
+ const desc = ENV.I18N.command.help[cmd.command.substring(1)] || "";
+ if (desc) {
+ scopeCommandMap[scope].push({
+ command: cmd.command,
+ description: desc
+ });
+ }
}
}
}
@@ -2340,7 +2486,7 @@ class ShareContext {
lastMessageKey;
configStoreKey;
groupAdminsKey;
- constructor(token, message) {
+ constructor(token, update) {
const botId = Number.parseInt(token.split(":")[0]);
const telegramIndex = ENV.TELEGRAM_AVAILABLE_TOKENS.indexOf(token);
if (telegramIndex === -1) {
@@ -2351,7 +2497,7 @@ class ShareContext {
}
this.botToken = token;
this.botId = botId;
- const id = message?.chat?.id;
+ const id = update.chatID;
if (id === void 0 || id === null) {
throw new Error("Chat id not found");
}
@@ -2361,20 +2507,20 @@ class ShareContext {
historyKey += `:${botId}`;
configStoreKey += `:${botId}`;
}
- switch (message.chat.type) {
+ switch (update.chatType) {
case "group":
case "supergroup":
- if (!ENV.GROUP_CHAT_BOT_SHARE_MODE && message.from?.id) {
- historyKey += `:${message.from.id}`;
- configStoreKey += `:${message.from.id}`;
+ if (!ENV.GROUP_CHAT_BOT_SHARE_MODE && update.fromUserID) {
+ historyKey += `:${update.fromUserID}`;
+ configStoreKey += `:${update.fromUserID}`;
}
this.groupAdminsKey = `group_admin:${id}`;
break;
}
- if (message?.chat.is_forum && message?.is_topic_message) {
- if (message?.message_thread_id) {
- historyKey += `:${message.message_thread_id}`;
- configStoreKey += `:${message.message_thread_id}`;
+ if (update.isForum && update.isTopicMessage) {
+ if (update.messageThreadID) {
+ historyKey += `:${update.messageThreadID}`;
+ configStoreKey += `:${update.messageThreadID}`;
}
}
this.chatHistoryKey = historyKey;
@@ -2389,8 +2535,9 @@ class WorkerContext {
this.USER_CONFIG = USER_CONFIG;
this.SHARE_CONTEXT = SHARE_CONTEXT;
}
- static async from(token, message) {
- const SHARE_CONTEXT = new ShareContext(token, message);
+ static async from(token, update) {
+ const context = new UpdateContext(update);
+ const SHARE_CONTEXT = new ShareContext(token, context);
const USER_CONFIG = Object.assign({}, ENV.USER_CONFIG);
try {
const userConfig = JSON.parse(await ENV.DATABASE.get(SHARE_CONTEXT.configStoreKey));
@@ -2401,6 +2548,31 @@ class WorkerContext {
return new WorkerContext(USER_CONFIG, SHARE_CONTEXT);
}
}
+class UpdateContext {
+ fromUserID;
+ chatID;
+ chatType;
+ isForum;
+ isTopicMessage;
+ messageThreadID;
+ constructor(update) {
+ if (update.message) {
+ this.fromUserID = update.message.from?.id;
+ this.chatID = update.message.chat.id;
+ this.chatType = update.message.chat.type;
+ this.isForum = update.message.chat.is_forum;
+ this.isTopicMessage = update.message.is_topic_message;
+ this.messageThreadID = update.message.message_thread_id;
+ } else if (update.callback_query) {
+ this.fromUserID = update.callback_query.from.id;
+ this.chatID = update.callback_query.message?.chat.id;
+ this.chatType = update.callback_query.message?.chat.type;
+ this.isForum = update.callback_query.message?.chat.is_forum;
+ } else {
+ console.error("Unknown update type");
+ }
+ }
+}
function checkMention(content, entities, botName, botId) {
let isMention = false;
@@ -2475,74 +2647,309 @@ ${message.text}`;
};
}
-class SaveLastMessage {
- handle = async (message, context) => {
- if (!ENV.DEBUG_MODE) {
- return null;
+class AgentListCallbackQueryHandler {
+ prefix = "al:";
+ needAuth = TELEGRAM_AUTH_CHECKER.shareModeGroup;
+ handle = async (query, data, context) => {
+ if (!query.message) {
+ throw new Error("no message");
+ }
+ const names = CHAT_AGENTS.filter((agent) => agent.enable(ENV.USER_CONFIG)).map((agent) => agent.name);
+ const sender = MessageSender.fromCallbackQuery(context.SHARE_CONTEXT.botToken, query);
+ const keyboards = [];
+ for (let i = 0; i < names.length; i += 2) {
+ const row = [];
+ for (let j = 0; j < 2; j++) {
+ const index = i + j;
+ if (index >= names.length) {
+ break;
+ }
+ row.push({
+ text: names[index],
+ callback_data: `ca:${JSON.stringify([names[index], 0])}`
+ });
+ }
+ keyboards.push(row);
}
- const lastMessageKey = `last_message:${context.SHARE_CONTEXT.chatHistoryKey}`;
- await ENV.DATABASE.put(lastMessageKey, JSON.stringify(message), { expirationTtl: 3600 });
- return null;
+ const params = {
+ chat_id: query.message.chat.id,
+ message_id: query.message.message_id,
+ text: ENV.I18N.callback_query.select_provider,
+ reply_markup: {
+ inline_keyboard: keyboards
+ }
+ };
+ return sender.editRawMessage(params);
};
}
-class OldMessageFilter {
- handle = async (message, context) => {
- if (!ENV.SAFE_MODE) {
- return null;
+class ModelListCallbackQueryHandler {
+ prefix = "ca:";
+ needAuth = TELEGRAM_AUTH_CHECKER.shareModeGroup;
+ async handle(query, data, context) {
+ if (!query.message) {
+ throw new Error("no message");
}
- let idList = [];
- try {
- idList = JSON.parse(await ENV.DATABASE.get(context.SHARE_CONTEXT.lastMessageKey).catch(() => "[]")) || [];
- } catch (e) {
- console.error(e);
+ const sender = MessageSender.fromCallbackQuery(context.SHARE_CONTEXT.botToken, query);
+ const [agent, page] = JSON.parse(data.substring(this.prefix.length));
+ const conf = {
+ ...ENV.USER_CONFIG,
+ AI_PROVIDER: agent
+ };
+ const chatAgent = loadChatLLM(conf);
+ if (!chatAgent) {
+ throw new Error(`agent not found: ${agent}`);
+ }
+ const models = await chatAgent.modelList(conf);
+ const keyboard = [];
+ const maxRow = 10;
+ const maxCol = Math.max(1, Math.min(5, ENV.MODEL_LIST_COLUMNS));
+ const maxPage = Math.ceil(models.length / maxRow / maxCol);
+ let currentRow = [];
+ for (let i = page * maxRow * maxCol; i < models.length; i++) {
+ if (i % maxCol === 0) {
+ keyboard.push(currentRow);
+ currentRow = [];
+ }
+ if (keyboard.length >= maxRow) {
+ break;
+ }
+ currentRow.push({
+ text: models[i],
+ callback_data: `cm:${JSON.stringify([agent, models[i]])}`
+ });
}
- if (idList.includes(message.message_id)) {
- throw new Error("Ignore old message");
- } else {
- idList.push(message.message_id);
- if (idList.length > 100) {
- idList.shift();
+ if (currentRow.length > 0) {
+ keyboard.push(currentRow);
+ currentRow = [];
+ }
+ keyboard.push([
+ {
+ text: "<",
+ callback_data: `ca:${JSON.stringify([agent, Math.max(page - 1, 0)])}`
+ },
+ {
+ text: `${page + 1}/${maxPage}`,
+ callback_data: `ca:${JSON.stringify([agent, page])}`
+ },
+ {
+ text: ">",
+ callback_data: `ca:${JSON.stringify([agent, Math.min(page + 1, maxPage - 1)])}`
+ },
+ {
+ text: "⇤",
+ callback_data: `al:`
}
- await ENV.DATABASE.put(context.SHARE_CONTEXT.lastMessageKey, JSON.stringify(idList));
+ ]);
+ if (models.length > (page + 1) * maxRow * maxCol) {
+ currentRow.push();
}
- return null;
+ keyboard.push(currentRow);
+ const message = {
+ chat_id: query.message.chat.id,
+ message_id: query.message.message_id,
+ text: `${agent} ${ENV.I18N.callback_query.select_model}`,
+ reply_markup: {
+ inline_keyboard: keyboard
+ }
+ };
+ return sender.editRawMessage(message);
+ }
+}
+class ModelChangeCallbackQueryHandler {
+ prefix = "cm:";
+ needAuth = TELEGRAM_AUTH_CHECKER.shareModeGroup;
+ async handle(query, data, context) {
+ if (!query.message) {
+ throw new Error("no message");
+ }
+ const sender = MessageSender.fromCallbackQuery(context.SHARE_CONTEXT.botToken, query);
+ const [agent, model] = JSON.parse(data.substring(this.prefix.length));
+ const conf = {
+ ...ENV.USER_CONFIG,
+ AI_PROVIDER: agent
+ };
+ const chatAgent = loadChatLLM(conf);
+ if (!agent) {
+ throw new Error(`agent not found: ${agent}`);
+ }
+ if (!chatAgent?.modelKey) {
+ throw new Error(`modelKey not found: ${agent}`);
+ }
+ await setUserConfig({
+ AI_PROVIDER: agent,
+ [chatAgent.modelKey]: model
+ }, context);
+ const message = {
+ chat_id: query.message.chat.id,
+ message_id: query.message.message_id,
+ text: `${ENV.I18N.callback_query.change_model} ${agent} > ${model}`
+ };
+ return sender.editRawMessage(message);
+ }
+}
+
+const QUERY_HANDLERS = [
+ new AgentListCallbackQueryHandler(),
+ new ModelListCallbackQueryHandler(),
+ new ModelChangeCallbackQueryHandler()
+];
+async function handleCallbackQuery(callbackQuery, context) {
+ const sender = MessageSender.fromCallbackQuery(context.SHARE_CONTEXT.botToken, callbackQuery);
+ const answerCallbackQuery = (msg) => {
+ return sender.api.answerCallbackQuery({
+ callback_query_id: callbackQuery.id,
+ text: msg
+ });
};
+ try {
+ if (!callbackQuery.message) {
+ return null;
+ }
+ const chatId = callbackQuery.message.chat.id;
+ const speakerId = callbackQuery.from?.id || chatId;
+ const chatType = callbackQuery.message.chat.type;
+ for (const handler of QUERY_HANDLERS) {
+ if (handler.needAuth) {
+ const roleList = handler.needAuth(chatType);
+ if (roleList) {
+ const chatRole = await loadChatRoleWithContext(chatId, speakerId, context);
+ if (chatRole === null) {
+ return answerCallbackQuery("ERROR: Get chat role failed");
+ }
+ if (!roleList.includes(chatRole)) {
+ return answerCallbackQuery(`ERROR: Permission denied, need ${roleList.join(" or ")}`);
+ }
+ }
+ }
+ if (callbackQuery.data) {
+ if (callbackQuery.data.startsWith(handler.prefix)) {
+ return handler.handle(callbackQuery, callbackQuery.data, context);
+ }
+ }
+ }
+ } catch (e) {
+ return answerCallbackQuery(`ERROR: ${e.message}`);
+ }
+ return null;
}
+
class EnvChecker {
- handle = async (message, context) => {
+ handle = async (update, context) => {
if (!ENV.DATABASE) {
- return MessageSender.from(context.SHARE_CONTEXT.botToken, message).sendPlainText("DATABASE Not Set");
+ return MessageSender.fromUpdate(context.SHARE_CONTEXT.botToken, update).sendPlainText("DATABASE Not Set");
}
return null;
};
}
class WhiteListFilter {
- handle = async (message, context) => {
+ handle = async (update, context) => {
if (ENV.I_AM_A_GENEROUS_PERSON) {
return null;
}
- const sender = MessageSender.from(context.SHARE_CONTEXT.botToken, message);
- const text = `You are not in the white list, please contact the administrator to add you to the white list. Your chat_id: ${message.chat.id}`;
- if (message.chat.type === "private") {
- if (!ENV.CHAT_WHITE_LIST.includes(`${message.chat.id}`)) {
+ const sender = MessageSender.fromUpdate(context.SHARE_CONTEXT.botToken, update);
+ let chatType = "";
+ let chatID = 0;
+ if (update.message) {
+ chatType = update.message.chat.type;
+ chatID = update.message.chat.id;
+ } else if (update.callback_query?.message) {
+ chatType = update.callback_query.message.chat.type;
+ chatID = update.callback_query.message.chat.id;
+ }
+ if (!chatType || !chatID) {
+ throw new Error("Invalid chat type or chat id");
+ }
+ const text = `You are not in the white list, please contact the administrator to add you to the white list. Your chat_id: ${chatID}`;
+ if (chatType === "private") {
+ if (!ENV.CHAT_WHITE_LIST.includes(`${chatID}`)) {
return sender.sendPlainText(text);
}
return null;
}
- if (isTelegramChatTypeGroup(message.chat.type)) {
+ if (isTelegramChatTypeGroup(chatType)) {
if (!ENV.GROUP_CHAT_BOT_ENABLE) {
throw new Error("Not support");
}
- if (!ENV.CHAT_GROUP_WHITE_LIST.includes(`${message.chat.id}`)) {
+ if (!ENV.CHAT_GROUP_WHITE_LIST.includes(`${chatID}`)) {
return sender.sendPlainText(text);
}
return null;
}
return sender.sendPlainText(
- `Not support chat type: ${message.chat.type}`
+ `Not support chat type: ${chatType}`
);
};
}
+class Update2MessageHandler {
+ messageHandlers;
+ constructor(messageHandlers) {
+ this.messageHandlers = messageHandlers;
+ }
+ loadMessage(body) {
+ if (body.edited_message) {
+ throw new Error("Ignore edited message");
+ }
+ if (body.message) {
+ return body?.message;
+ } else {
+ throw new Error("Invalid message");
+ }
+ }
+ handle = async (update, context) => {
+ const message = this.loadMessage(update);
+ if (!message) {
+ return null;
+ }
+ for (const handler of this.messageHandlers) {
+ const result = await handler.handle(message, context);
+ if (result) {
+ return result;
+ }
+ }
+ return null;
+ };
+}
+class CallbackQueryHandler {
+ handle = async (update, context) => {
+ if (update.callback_query) {
+ return handleCallbackQuery(update.callback_query, context);
+ }
+ return null;
+ };
+}
+class SaveLastMessage {
+ handle = async (message, context) => {
+ if (!ENV.DEBUG_MODE) {
+ return null;
+ }
+ const lastMessageKey = `last_message:${context.SHARE_CONTEXT.chatHistoryKey}`;
+ await ENV.DATABASE.put(lastMessageKey, JSON.stringify(message), { expirationTtl: 3600 });
+ return null;
+ };
+}
+class OldMessageFilter {
+ handle = async (message, context) => {
+ if (!ENV.SAFE_MODE) {
+ return null;
+ }
+ let idList = [];
+ try {
+ idList = JSON.parse(await ENV.DATABASE.get(context.SHARE_CONTEXT.lastMessageKey).catch(() => "[]")) || [];
+ } catch (e) {
+ console.error(e);
+ }
+ if (idList.includes(message.message_id)) {
+ throw new Error("Ignore old message");
+ } else {
+ idList.push(message.message_id);
+ if (idList.length > 100) {
+ idList.shift();
+ }
+ await ENV.DATABASE.put(context.SHARE_CONTEXT.lastMessageKey, JSON.stringify(idList));
+ }
+ return null;
+ };
+}
class MessageFilter {
handle = async (message, context) => {
if (message.text) {
@@ -2566,32 +2973,24 @@ class CommandHandler {
};
}
-function loadMessage(body) {
- if (body.edited_message) {
- throw new Error("Ignore edited message");
- }
- if (body.message) {
- return body?.message;
- } else {
- throw new Error("Invalid message");
- }
-}
const SHARE_HANDLER = [
new EnvChecker(),
new WhiteListFilter(),
- new MessageFilter(),
- new GroupMention(),
- new OldMessageFilter(),
- new SaveLastMessage(),
- new CommandHandler(),
- new ChatHandler()
+ new CallbackQueryHandler(),
+ new Update2MessageHandler([
+ new MessageFilter(),
+ new GroupMention(),
+ new OldMessageFilter(),
+ new SaveLastMessage(),
+ new CommandHandler(),
+ new ChatHandler()
+ ])
];
async function handleUpdate(token, update) {
- const message = loadMessage(update);
- const context = await WorkerContext.from(token, message);
+ const context = await WorkerContext.from(token, update);
for (const handler of SHARE_HANDLER) {
try {
- const result = await handler.handle(message, context);
+ const result = await handler.handle(update, context);
if (result) {
return result;
}
diff --git a/doc/cn/CHANGELOG.md b/doc/cn/CHANGELOG.md
index 321332ff..4a9626c2 100644
--- a/doc/cn/CHANGELOG.md
+++ b/doc/cn/CHANGELOG.md
@@ -1,5 +1,8 @@
# 更新日志
+- v1.10.0
+ - 使用 InlineKeyboards 切换模型
+
- v1.9.0
- 添加插件系统
diff --git a/doc/cn/CONFIG.md b/doc/cn/CONFIG.md
index 28cfe207..1cfdc27a 100644
--- a/doc/cn/CONFIG.md
+++ b/doc/cn/CONFIG.md
@@ -80,12 +80,12 @@ OPENAI_API_BASE,GOOGLE_COMPLETIONS_API,MISTRAL_API_BASE,COHERE_API_BASE,ANTHROPI
### 通用配置
-| KEY | 名称 | 默认值 | 描述 |
-|--------------------------|-------------|-------------|------------------------------------------------------------------------|
-| AI_PROVIDER | AI提供商 | `auto` | 可选值 `auto, openai, azure, workers, gemini, mistral, cohere, anthropic` |
-| AI_IMAGE_PROVIDER | AI图片提供商 | `auto` | 可选值 `auto, openai, azure, workers` |
-| SYSTEM_INIT_MESSAGE | 全局默认初始化消息 | `你是一个得力的助手` | 根据绑定的语言自动选择默认值 |
-| SYSTEM_INIT_MESSAGE_ROLE | 全局默认初始化消息角色 | `system` | |
+| KEY | 名称 | 默认值 | 描述 |
+|------------------------------|-----------------|-------------|------------------------------------------------------------------------|
+| AI_PROVIDER | AI提供商 | `auto` | 可选值 `auto, openai, azure, workers, gemini, mistral, cohere, anthropic` |
+| AI_IMAGE_PROVIDER | AI图片提供商 | `auto` | 可选值 `auto, openai, azure, workers` |
+| SYSTEM_INIT_MESSAGE | 全局默认初始化消息 | `你是一个得力的助手` | 根据绑定的语言自动选择默认值 |
+| ~~SYSTEM_INIT_MESSAGE_ROLE~~ | ~~全局默认初始化消息角色~~ | `system` | 废弃 |
### OpenAI
@@ -95,7 +95,7 @@ OPENAI_API_BASE,GOOGLE_COMPLETIONS_API,MISTRAL_API_BASE,COHERE_API_BASE,ANTHROPI
| OPENAI_CHAT_MODEL | OpenAI的模型名称 | `gpt-4o-mini` |
| OPENAI_API_BASE | OpenAI API BASE | `https://api.openai.com/v1` |
| OPENAI_API_EXTRA_PARAMS | OpenAI API Extra Params | `{}` |
-| DALL_E_MODEL | DALL-E的模型名称 | `dall-e-2` |
+| DALL_E_MODEL | DALL-E的模型名称 | `dall-e-3` |
| DALL_E_IMAGE_SIZE | DALL-E图片尺寸 | `512x512` |
| DALL_E_IMAGE_QUALITY | DALL-E图片质量 | `standard` |
| DALL_E_IMAGE_STYLE | DALL-E图片风格 | `vivid` |
@@ -106,11 +106,15 @@ OPENAI_API_BASE,GOOGLE_COMPLETIONS_API,MISTRAL_API_BASE,COHERE_API_BASE,ANTHROPI
> AZURE_DALLE_API `https://RESOURCE_NAME.openai.azure.com/openai/deployments/MODEL_NAME/images/generations?api-version=VERSION_NAME`
-| KEY | 名称 | 默认值 |
-|--------------------------|-------------------------|------------------------------------------------------------|
-| AZURE_API_KEY | Azure API Key | `null` |
-| AZURE_COMPLETIONS_API | Azure Completions API | `null` |
-| AZURE_DALLE_API | Azure DallE API | `null` |
+| KEY | 名称 | 默认值 |
+|---------------------------|---------------------------|--------------|
+| AZURE_API_KEY | Azure API Key | `null` |
+| ~~AZURE_COMPLETIONS_API~~ | ~~Azure Completions API~~ | `null` |
+| ~~AZURE_DALLE_API~~ | ~~Azure DallE API~~ | `null` |
+| AZURE_RESOURCE_NAME | Azure 资源名称 | `null` |
+| AZURE_CHAT_MODEL | Azure 对话模型 | `null` |
+| AZURE_IMAGE_MODEL | Azure 图片模型 | `null` |
+| AZURE_API_VERSION | Azure API 版本号 | `2024-06-01` |
### Workers
@@ -124,13 +128,14 @@ OPENAI_API_BASE,GOOGLE_COMPLETIONS_API,MISTRAL_API_BASE,COHERE_API_BASE,ANTHROPI
### Gemini
-cloudflare workers 暂时不支持访问
+> cloudflare workers 暂时不支持访问
-| KEY | 名称 | 默认值 |
-|--------------------------|-------------------------|------------------------------------------------------------|
-| GOOGLE_API_KEY | Google Gemini API Key | `null` |
-| GOOGLE_COMPLETIONS_API | Google Gemini API | `https://generativelanguage.googleapis.com/v1beta/models/` |
-| GOOGLE_COMPLETIONS_MODEL | Google Gemini Model | `gemini-pro` |
+| KEY | 名称 | 默认值 |
+|----------------------------|----------------------------------|------------------------------------------------------------|
+| GOOGLE_API_KEY | Google Gemini API Key | `null` |
+| ~~GOOGLE_COMPLETIONS_API~~ | ~~Google Gemini API~~ | `https://generativelanguage.googleapis.com/v1beta/models/` |
+| GOOGLE_COMPLETIONS_MODEL | Google Gemini Model | `gemini-pro` |
+| GOOGLE_API_BASE | 支持Openai API 格式的 Gemini API Base | `https://generativelanguage.googleapis.com/v1beta` |
### Mistral
@@ -158,19 +163,20 @@ cloudflare workers 暂时不支持访问
## 支持命令
-| 命令 | 说明 | 示例 |
-|:-----------|:--------------------------|:------------------------------------------------|
-| `/help` | 获取命令帮助 | `/help` |
-| `/new` | 发起新的对话 | `/new` |
-| `/start` | 获取你的ID,并发起新的对话 | `/start` |
-| `/img` | 生成一张图片 | `/img 图片描述` |
-| `/version` | 获取当前版本号,判断是否需要更新 | `/version` |
-| `/setenv` | 设置用户配置, 详情见`用户配置` | `/setenv KEY=VALUE` |
-| `/setenvs` | 批量设置用户配置, 详情见`用户配置` | `/setenvs {"KEY1": "VALUE1", "KEY2": "VALUE2"}` |
-| `/delenv` | 删除用户配置 | `/delenv KEY` |
-| `/system` | 查看当前一些系统信息 | `/system` |
-| `/redo` | 修改上一个提问或者换一个回答 | `/redo 修改过的内容` 或者 `/redo` |
-| `/echo` | 回显消息,仅开发模式可用 | `/echo` |
+| 命令 | 说明 | 示例 |
+|:-----------|:--------------------|:------------------------------------------------|
+| `/help` | 获取命令帮助 | `/help` |
+| `/new` | 发起新的对话 | `/new` |
+| `/start` | 获取你的ID,并发起新的对话 | `/start` |
+| `/img` | 生成一张图片 | `/img 图片描述` |
+| `/version` | 获取当前版本号,判断是否需要更新 | `/version` |
+| `/setenv` | 设置用户配置, 详情见`用户配置` | `/setenv KEY=VALUE` |
+| `/setenvs` | 批量设置用户配置, 详情见`用户配置` | `/setenvs {"KEY1": "VALUE1", "KEY2": "VALUE2"}` |
+| `/delenv` | 删除用户配置 | `/delenv KEY` |
+| `/system` | 查看当前一些系统信息 | `/system` |
+| `/redo` | 修改上一个提问或者换一个回答 | `/redo 修改过的内容` 或者 `/redo` |
+| `/models` | 切换对话模型 | `/models` 后通过内置菜单选择模型 |
+| `/echo` | 回显消息,仅开发模式可用 | `/echo` |
## 自定义命令
@@ -223,3 +229,21 @@ COMMAND_DESCRIPTION_cn2en = '将对话内容翻译成英文'
```
如果你想将自定义命令绑定到telegram的菜单中,你可以添加如下环境变量`COMMAND_SCOPE_azure = "all_private_chats,all_group_chats,all_chat_administrators"`,这样插件就会在所有的私聊,群聊和群组中生效。
+
+
+## 模型列表
+
+支持使用 `/models` 命令获取支持的模型列表,并且通过菜单选择切换。
+模型列表支持的配置项的类型为 URL 或者 json 数组。 如果是 URL,会自动请求获取模型列表,如果是 json 数组,会直接使用该数组。
+当前支持从URL获取模型列表的AI提供商有 `openai, workers, mistral, cohere`。只支持 json 数组的AI提供商有 `azure, gemini, anthropic`。
+当支持从URL获取模型列表的AI提供商的模型列表配置项为空时候,会默认根据其 base api 自动拼接获取模型列表的URL。
+
+| AI提供商 | 模型列表配置项 | 默认值 | 自动拼接生成的值 |
+|:----------|--------------------------------|-----------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|
+| openai | OPENAI_CHAT_MODELS_LIST | `` | `${OPENAI_API_BASE}/models` |
+| workers | WORKERS_CHAT_MODELS_LIST | `` | `https://api.cloudflare.com/client/v4/accounts/${CLOUDFLARE_ACCOUNT_ID}/ai/models/search?task=Text%20Generation` |
+| mistral | MISTRAL_CHAT_MODELS_LIST | `` | `${MISTRAL_API_BASE}/models` |
+| cohere | COHERE_CHAT_MODELS_LIST | `` | `https://api.cohere.com/v1/models` |
+| azure | AZURE_CHAT_MODELS_LIST | `[]` | |
+| gemini | GOOGLE_COMPLETIONS_MODELS_LIST | `["gemini-1.5-flash"]` | |
+| anthropic | ANTHROPIC_CHAT_MODELS_LIST | `["claude-3-5-sonnet-latest", "claude-3-5-haiku-latest"]` | |
diff --git a/doc/cn/VERCEL.md b/doc/cn/VERCEL.md
index a5d2c4ee..49cba6f1 100644
--- a/doc/cn/VERCEL.md
+++ b/doc/cn/VERCEL.md
@@ -1,6 +1,6 @@
# 使用Vercel部署 (实验性)
-`/src/adapter/vercel`中提供了示例代码,可以完成Vercel部署,和基础的功能测试。但是无法保证所有功能都能正常工作。
+`/src/entry/vercel`中提供了示例代码,可以完成Vercel部署,和基础的功能测试。但是无法保证所有功能都能正常工作。
### 自动部署
diff --git a/doc/en/CHANGELOG.md b/doc/en/CHANGELOG.md
index d3d72e98..cce3c21c 100644
--- a/doc/en/CHANGELOG.md
+++ b/doc/en/CHANGELOG.md
@@ -1,5 +1,8 @@
# Changelog
+- v1.10.0
+ - Switching Models with InlineKeyboards
+
- v1.9.0
- Add plugin system
diff --git a/doc/en/CONFIG.md b/doc/en/CONFIG.md
index a9254c1a..cfd42a48 100644
--- a/doc/en/CONFIG.md
+++ b/doc/en/CONFIG.md
@@ -80,12 +80,12 @@ Each user's custom configuration can only be modified by sending a message throu
### General configuration
-| KEY | Name | Default | Description |
-|--------------------------|--------------------------------------|-------------------------------|----------------------------------------------------------------------------|
-| AI_PROVIDER | AI provider | `auto` | Options `auto, openai, azure, workers, gemini, mistral, cohere, anthropic` |
-| AI_IMAGE_PROVIDER | AI image provider | `auto` | Options `auto, openai, azure, workers` |
-| SYSTEM_INIT_MESSAGE | Default initialization message. | `You are a helpful assistant` | Automatically select default values based on the bound language. |
-| SYSTEM_INIT_MESSAGE_ROLE | Default initialization message role. | `system` | |
+| KEY | Name | Default | Description |
+|------------------------------|------------------------------------------|-------------------------------|----------------------------------------------------------------------------|
+| AI_PROVIDER | AI provider | `auto` | Options `auto, openai, azure, workers, gemini, mistral, cohere, anthropic` |
+| AI_IMAGE_PROVIDER | AI image provider | `auto` | Options `auto, openai, azure, workers` |
+| SYSTEM_INIT_MESSAGE | Default initialization message. | `You are a helpful assistant` | Automatically select default values based on the bound language. |
+| ~~SYSTEM_INIT_MESSAGE_ROLE~~ | ~~Default initialization message role.~~ | `system` | Deprecated |
### OpenAI
@@ -95,7 +95,7 @@ Each user's custom configuration can only be modified by sending a message throu
| OPENAI_CHAT_MODEL | OpenAI Model | `gpt-4o-mini` |
| OPENAI_API_BASE | OpenAI API BASE | `https://api.openai.com/v1` |
| OPENAI_API_EXTRA_PARAMS | OpenAI API Extra Params | `{}` |
-| DALL_E_MODEL | DALL-E model name. | `dall-e-2` |
+| DALL_E_MODEL | DALL-E model name. | `dall-e-3` |
| DALL_E_IMAGE_SIZE | DALL-E Image size | `512x512` |
| DALL_E_IMAGE_QUALITY | DALL-E Image quality | `standard` |
| DALL_E_IMAGE_STYLE | DALL-E Image style | `vivid` |
@@ -106,11 +106,16 @@ Each user's custom configuration can only be modified by sending a message throu
> AZURE_DALLE_API `https://RESOURCE_NAME.openai.azure.com/openai/deployments/MODEL_NAME/images/generations?api-version=VERSION_NAME`
-| KEY | Name | Default |
-|-----------------------|-----------------------|---------|
-| AZURE_API_KEY | Azure API Key | `null` |
-| AZURE_COMPLETIONS_API | Azure Completions API | `null` |
-| AZURE_DALLE_API | Azure DallE API | `null` |
+| KEY | 名称 | 默认值 |
+|---------------------------|---------------------------|--------------|
+| AZURE_API_KEY | Azure API Key | `null` |
+| ~~AZURE_COMPLETIONS_API~~ | ~~Azure Completions API~~ | `null` |
+| ~~AZURE_DALLE_API~~ | ~~Azure DallE API~~ | `null` |
+| AZURE_RESOURCE_NAME | Azure Resource Name | `null` |
+| AZURE_CHAT_MODEL | Azure Chat Model | `null` |
+| AZURE_IMAGE_MODEL | Azure Image Model | `null` |
+| AZURE_API_VERSION | Azure API version number | `2024-06-01` |
+
### Workers
@@ -123,14 +128,17 @@ Each user's custom configuration can only be modified by sending a message throu
### Gemini
-| KEY | Name | Default |
-|--------------------------|-----------------------|------------------------------------------------------------|
-| GOOGLE_API_KEY | Google Gemini API Key | `null` |
-| GOOGLE_COMPLETIONS_API | Google Gemini API | `https://generativelanguage.googleapis.com/v1beta/models/` |
-| GOOGLE_COMPLETIONS_MODEL | Google Gemini Model | `gemini-pro` |
-
> Cloudflare Workers currently do not support accessing Gemini.
+| KEY | Name | Default |
+|----------------------------|-----------------------------------------------|------------------------------------------------------------|
+| GOOGLE_API_KEY | Google Gemini API Key | `null` |
+| ~~GOOGLE_COMPLETIONS_API~~ | ~~Google Gemini API~~ | `https://generativelanguage.googleapis.com/v1beta/models/` |
+| GOOGLE_COMPLETIONS_MODEL | Google Gemini Model | `gemini-pro` |
+| GOOGLE_API_BASE | Supports Gemini API Base in OpenAI API format | `https://generativelanguage.googleapis.com/v1beta` |
+
+
+
### Mistral
| KEY | Name | Default |
@@ -157,19 +165,20 @@ Each user's custom configuration can only be modified by sending a message throu
## Command
-| Command | Description | Example |
-|:-----------|:------------------------------------------------------------------------|:------------------------------------------------|
-| `/help` | Get command help. | `/help` |
-| `/new` | Initiate a new conversation. | `/new` |
-| `/start` | Get your ID and start a new conversation. | `/start` |
-| `/img` | Generate an image. | `/img Image Description` |
-| `/version` | Get the current version number and determine if an update is needed. | `/version` |
-| `/setenv` | Set user configuration, see `User Configuration` for details. | `/setenv KEY=VALUE` |
-| `/setenvs` | Batch setting user configuration, see "User Configuration" for details. | `/setenvs {"KEY1": "VALUE1", "KEY2": "VALUE2"}` |
-| `/delenv` | Delete user configuration. | `/delenv KEY` |
-| `/system` | View some current system information. | `/system` |
-| `/redo` | Edit the previous question or provide a different answer. | `/redo Modified content.` or `/redo` |
-| `/echo` | Echo message, only available in development mode. | `/echo` |
+| Command | Description | Example |
+|:-----------|:------------------------------------------------------------------------|:------------------------------------------------------------------|
+| `/help` | Get command help. | `/help` |
+| `/new` | Initiate a new conversation. | `/new` |
+| `/start` | Get your ID and start a new conversation. | `/start` |
+| `/img` | Generate an image. | `/img Image Description` |
+| `/version` | Get the current version number and determine if an update is needed. | `/version` |
+| `/setenv` | Set user configuration, see `User Configuration` for details. | `/setenv KEY=VALUE` |
+| `/setenvs` | Batch setting user configuration, see "User Configuration" for details. | `/setenvs {"KEY1": "VALUE1", "KEY2": "VALUE2"}` |
+| `/delenv` | Delete user configuration. | `/delenv KEY` |
+| `/system` | View some current system information. | `/system` |
+| `/redo` | Edit the previous question or provide a different answer. | `/redo Modified content.` or `/redo` |
+| `/models` | Switch chat model | `/models` After that, select the model through the built-in menu. |
+| `/echo` | Echo message, only available in development mode. | `/echo` |
## Custom command
@@ -221,4 +230,22 @@ COMMAND_DESCRIPTION_gpt4 = 'Switch AI provider to OpenAI GPT-4.'
COMMAND_DESCRIPTION_cn2en = 'Translate the conversation content into English.'
```
-If you want to bind custom commands to the menu of Telegram, you can add the following environment variable `COMMAND_SCOPE_azure = "all_private_chats,all_group_chats,all_chat_administrators"`, so that the plugin will take effect in all private chats, group chats and groups.
\ No newline at end of file
+If you want to bind custom commands to the menu of Telegram, you can add the following environment variable `COMMAND_SCOPE_azure = "all_private_chats,all_group_chats,all_chat_administrators"`, so that the plugin will take effect in all private chats, group chats and groups.
+
+
+## Model List
+
+Supports using the `/models` command to get a list of supported models and switching between them via menu selections.
+The supported configuration items for the models list are of type URL or json array. If it is a URL, the list of models will be requested automatically, if it is a json array, the array will be used directly.
+Current AI providers that support fetching the model list from a URL are `openai, workers, mistral, cohere`. AI providers that only support json arrays are `azure, gemini, anthropic`.
+When the model list configuration is empty for an AI provider that supports fetching the model list from a URL, the URL for fetching the model list will be automatically spliced according to its base api by default.
+
+| AI provider | Model List Configuration Key | Default value | Automatically generated value |
+|:------------|--------------------------------|-----------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|
+| openai | OPENAI_CHAT_MODELS_LIST | `` | `${OPENAI_API_BASE}/models` |
+| workers | WORKERS_CHAT_MODELS_LIST | `` | `https://api.cloudflare.com/client/v4/accounts/${CLOUDFLARE_ACCOUNT_ID}/ai/models/search?task=Text%20Generation` |
+| mistral | MISTRAL_CHAT_MODELS_LIST | `` | `${MISTRAL_API_BASE}/models` |
+| cohere | COHERE_CHAT_MODELS_LIST | `` | `https://api.cohere.com/v1/models` |
+| azure | AZURE_CHAT_MODELS_LIST | `[]` | |
+| gemini | GOOGLE_COMPLETIONS_MODELS_LIST | `["gemini-1.5-flash"]` | |
+| anthropic | ANTHROPIC_CHAT_MODELS_LIST | `["claude-3-5-sonnet-latest", "claude-3-5-haiku-latest"]` | |
diff --git a/doc/en/VERCEL.md b/doc/en/VERCEL.md
index cf4fda4c..1994ac8b 100644
--- a/doc/en/VERCEL.md
+++ b/doc/en/VERCEL.md
@@ -1,6 +1,6 @@
# Deploy using Vercel (experimental)
-The `/src/adapter/vercel` provides sample code that can complete Vercel deployment and basic functional testing. However, it cannot guarantee that all functions will work properly.
+The `/src/entry/vercel` provides sample code that can complete Vercel deployment and basic functional testing. However, it cannot guarantee that all functions will work properly.
### Automatic deployment
diff --git a/docker-compose.yaml b/docker-compose.yaml
index 24201fbe..932f99f6 100644
--- a/docker-compose.yaml
+++ b/docker-compose.yaml
@@ -6,4 +6,6 @@ services:
- "8787:8787"
volumes:
- ./config.json:/app/config.json:ro # change `./config.json` to your local path
- - ./wrangler.toml:/app/config.toml:ro # change `./wrangler.toml` to your local path
+ - ./wrangler.toml:/app/wrangler.toml:ro # change `./wrangler.toml` to your local path
+ network_mode: "host" # If you access the proxy port based on the host
+ entrypoint: ["npm", "run", "start:dist"]
\ No newline at end of file
diff --git a/package.json b/package.json
index 012f4cd0..65af9e31 100644
--- a/package.json
+++ b/package.json
@@ -1,7 +1,7 @@
{
"name": "chatgpt-telegram-workers",
"type": "module",
- "version": "1.9.4",
+ "version": "1.10.0",
"description": "The easiest and quickest way to deploy your own ChatGPT Telegram bot is to use a single file and simply copy and paste it. There is no need for any dependencies, local development environment configuration, domain names, or servers.",
"author": "tbxark ",
"license": "MIT",
@@ -21,41 +21,53 @@
],
"scripts": {
"lint": "eslint --fix *.js *.ts src plugins scripts",
+ "version": "tsx src/vite/version/main.ts",
"build": "vite build",
- "build:local": "BUILD_MODE=local vite build",
- "build:docker": "npm run build:local && cd dist && docker build -t chatgpt-telegram-workers:latest .",
- "build:dockerx": "npm run build:local && cd dist && docker build --platform linux/amd64,linux/arm64 -t tbxark/chatgpt-telegram-workers:latest --push .",
- "build:vercel": "BUILD_MODE=vercel vite build",
- "build:pack": "BUILD_MODE=pack vite build",
+ "build:local": "ENTRY=src/entry/local/index.ts vite build",
+ "build:docker": "docker build -t chatgpt-telegram-workers:latest .",
+ "build:dockerx": "docker build --platform linux/amd64,linux/arm64 -t tbxark/chatgpt-telegram-workers:latest --push .",
+ "build:vercel": "ENTRY=src/entry/vercel/index.ts vite build",
+ "build:pack": "TYPES=true FORMATS=es,cjs vite build",
"deploy:dist": "wrangler deploy",
"deploy:build": "npm run build && wrangler deploy",
"deploy:vercel": "vercel deploy --prod",
- "deploy:plugin": "BUILD_MODE=plugins-page vite build && wrangler pages deploy plugins --project-name=interpolate-test --branch=main",
- "start:dist": "node dist/index.js",
- "start:local": "CONFIG_PATH=./config.json TOML_PATH=./wrangler.toml tsx src/adapter/local/index.ts",
+ "deploy:plugin": "vite build -c plugins/vite.config.ts && wrangler pages deploy plugins --project-name=interpolate-test --branch=main",
+ "start:dist": "CONFIG_PATH=./config.json TOML_PATH=./wrangler.toml node dist/index.js",
+ "start:local": "CONFIG_PATH=./config.json TOML_PATH=./wrangler.toml tsx src/entry/local/index.ts",
"start:debug": "wrangler dev --local",
- "prepare:vercel": "tsx ./scripts/plugins/vercel/setenv.ts",
+ "prepare:vercel": "tsx src/vite/vercel/setenv.ts",
"wrangler": "wrangler",
"test": "tsx ./src/agent/index.test.ts"
},
"dependencies": {
+ "@ai-sdk/anthropic": "^0.0.56",
+ "@ai-sdk/azure": "^0.0.52",
+ "@ai-sdk/cohere": "^0.0.28",
+ "@ai-sdk/google": "^0.0.55",
+ "@ai-sdk/mistral": "^0.0.46",
+ "@ai-sdk/openai": "^0.0.72",
+ "ai": "^3.4.33",
"cloudflare-worker-adapter": "^1.3.3"
},
"devDependencies": {
- "@antfu/eslint-config": "^3.6.2",
+ "@antfu/eslint-config": "^3.8.0",
"@rollup/plugin-node-resolve": "^15.2.3",
- "@types/node": "^22.5.5",
- "@vercel/node": "^3.2.14",
- "eslint": "^9.10.0",
+ "@types/node": "^22.9.0",
+ "@types/react": "^18.3.11",
+ "@vercel/node": "^3.2.24",
+ "eslint": "^9.14.0",
"eslint-plugin-format": "^0.1.2",
+ "openai": "^4.68.1",
+ "react-dom": "^18.3.1",
"rollup-plugin-cleanup": "^3.2.1",
"rollup-plugin-node-externals": "^7.1.3",
- "telegram-bot-api-types": "^7.9.12",
- "tsx": "^4.19.1",
- "typescript": "^5.6.2",
- "vite": "^5.4.3",
+ "stylelint": "^16.10.0",
+ "telegram-bot-api-types": "^7.11.0",
+ "tsx": "^4.19.2",
+ "typescript": "^5.6.3",
+ "vite": "^5.4.11",
"vite-plugin-checker": "^0.8.0",
- "vite-plugin-dts": "^4.2.1",
- "wrangler": "^3.78.5"
+ "vite-plugin-dts": "^4.3.0",
+ "wrangler": "^3.86.1"
}
}
diff --git a/plugins/vite.config.ts b/plugins/vite.config.ts
new file mode 100644
index 00000000..aa918f8d
--- /dev/null
+++ b/plugins/vite.config.ts
@@ -0,0 +1,32 @@
+import * as path from 'node:path';
+import { nodeResolve } from '@rollup/plugin-node-resolve';
+import cleanup from 'rollup-plugin-cleanup';
+import nodeExternals from 'rollup-plugin-node-externals';
+import { defineConfig } from 'vite';
+import checker from 'vite-plugin-checker';
+
+export default defineConfig({
+ plugins: [
+ nodeResolve({
+ preferBuiltins: true,
+ }),
+ cleanup({
+ comments: 'none',
+ extensions: ['js', 'ts'],
+ }),
+ checker({
+ typescript: true,
+ }),
+ nodeExternals(),
+ ],
+ build: {
+ target: 'esnext',
+ lib: {
+ entry: path.resolve(__dirname, '../src/plugins/interpolate.ts'),
+ fileName: 'interpolate',
+ formats: ['es'],
+ },
+ minify: false,
+ outDir: path.resolve(__dirname, 'dist'),
+ },
+});
diff --git a/scripts/plugins/docker/index.ts b/scripts/plugins/docker/index.ts
deleted file mode 100644
index 4dae04b7..00000000
--- a/scripts/plugins/docker/index.ts
+++ /dev/null
@@ -1,41 +0,0 @@
-import * as fs from 'node:fs/promises';
-import path from 'node:path';
-
-const dockerfile = `
-FROM node:alpine as PROD
-
-WORKDIR /app
-COPY index.js package.json /app/
-RUN npm install --only=production --omit=dev
-RUN apk add --no-cache sqlite
-EXPOSE 8787
-CMD ["npm", "run", "start"]
-`;
-
-const packageJson = `
-{
- "name": "chatgpt-telegram-workers",
- "type": "module",
- "version": "1.8.0",
- "author": "TBXark",
- "license": "MIT",
- "module": "index.js",
- "scripts": {
- "start": "node index.js"
- },
- "dependencies": {
- "cloudflare-worker-adapter": "^1.2.3"
- },
- "devDependencies": {}
-}
-`;
-
-export function createDockerPlugin(targetDir: string) {
- return {
- name: 'docker',
- async closeBundle() {
- await fs.writeFile(path.resolve(targetDir, 'Dockerfile'), dockerfile.trim());
- await fs.writeFile(path.resolve(targetDir, 'package.json'), packageJson.trim());
- },
- };
-}
diff --git a/scripts/plugins/version/index.ts b/scripts/plugins/version/index.ts
deleted file mode 100644
index 7656caef..00000000
--- a/scripts/plugins/version/index.ts
+++ /dev/null
@@ -1,30 +0,0 @@
-import { execSync } from 'node:child_process';
-import * as fs from 'node:fs/promises';
-import path from 'node:path';
-
-let COMMIT_HASH = 'unknown';
-const TIMESTAMP = Math.floor(Date.now() / 1000);
-
-try {
- COMMIT_HASH = execSync('git rev-parse --short HEAD').toString().trim();
-} catch (e) {
- console.warn(e);
-}
-
-export function createVersionPlugin(targetDir: string) {
- return {
- name: 'buildInfo',
- async closeBundle() {
- await fs.writeFile(path.resolve(targetDir, 'timestamp'), TIMESTAMP.toString());
- await fs.writeFile(path.resolve(targetDir, 'buildinfo.json'), JSON.stringify({
- sha: COMMIT_HASH,
- timestamp: TIMESTAMP,
- }));
- },
- };
-}
-
-export const versionDefine = {
- __BUILD_VERSION__: JSON.stringify(COMMIT_HASH),
- __BUILD_TIMESTAMP__: TIMESTAMP.toString(),
-};
diff --git a/src/agent/anthropic.ts b/src/agent/anthropic.ts
index 2c2c397e..b498c8dc 100644
--- a/src/agent/anthropic.ts
+++ b/src/agent/anthropic.ts
@@ -1,11 +1,18 @@
import type { AgentUserConfig } from '../config/env';
import type { SseChatCompatibleOptions } from './request';
import type { SSEMessage, SSEParserResult } from './stream';
-import type { ChatAgent, ChatStreamTextHandler, HistoryItem, LLMChatParams } from './types';
+import type {
+ ChatAgent,
+ ChatAgentResponse,
+ ChatStreamTextHandler,
+ HistoryItem,
+ LLMChatParams,
+} from './types';
import { ENV } from '../config/env';
import { imageToBase64String } from '../utils/image';
import { requestChatCompletions } from './request';
import { Stream } from './stream';
+import { convertStringToResponseMessages, extractImageContent, loadModelsList } from './utils';
export class Anthropic implements ChatAgent {
readonly name = 'anthropic';
@@ -20,22 +27,37 @@ export class Anthropic implements ChatAgent {
role: item.role,
content: item.content,
};
-
- if (item.images && item.images.length > 0) {
- res.content = [];
- if (item.content) {
- res.content.push({ type: 'text', text: item.content });
- }
- for (const image of item.images) {
- res.content.push(await imageToBase64String(image).then(({ format, data }) => {
- return { type: 'image', source: { type: 'base64', media_type: format, data } };
- }));
+ if (item.role === 'system') {
+ return null;
+ }
+ if (Array.isArray(item.content)) {
+ const contents = [];
+ for (const content of item.content) {
+ switch (content.type) {
+ case 'text':
+ contents.push({ type: 'text', text: content.text });
+ break;
+ case 'image': {
+ const data = extractImageContent(content.image);
+ if (data.url) {
+ contents.push(await imageToBase64String(data.url).then(({ format, data }) => {
+ return { type: 'image', source: { type: 'base64', media_type: format, data } };
+ }));
+ } else if (data.base64) {
+ contents.push({ type: 'image', source: { type: 'base64', media_type: 'image/jpeg', data: data.base64 } });
+ }
+ break;
+ }
+ default:
+ break;
+ }
}
+ res.content = contents;
}
return res;
};
- readonly model = (ctx: AgentUserConfig): string => {
+ readonly model = (ctx: AgentUserConfig): string | null => {
return ctx.ANTHROPIC_CHAT_MODEL;
};
@@ -64,8 +86,8 @@ export class Anthropic implements ChatAgent {
}
}
- readonly request = async (params: LLMChatParams, context: AgentUserConfig, onStream: ChatStreamTextHandler | null): Promise => {
- const { message, images, prompt, history } = params;
+ readonly request = async (params: LLMChatParams, context: AgentUserConfig, onStream: ChatStreamTextHandler | null): Promise => {
+ const { prompt, messages } = params;
const url = `${context.ANTHROPIC_API_BASE}/messages`;
const header = {
'x-api-key': context.ANTHROPIC_API_KEY || '',
@@ -73,8 +95,6 @@ export class Anthropic implements ChatAgent {
'content-type': 'application/json',
};
- const messages: HistoryItem[] = (history || []).concat({ role: 'user', content: message, images });
-
if (messages.length > 0 && messages[0].role === 'assistant') {
messages.shift();
}
@@ -82,14 +102,13 @@ export class Anthropic implements ChatAgent {
const body = {
system: prompt,
model: context.ANTHROPIC_CHAT_MODEL,
- messages: await Promise.all(messages.map(item => this.render(item))),
+ messages: (await Promise.all(messages.map(item => this.render(item)))).filter(i => i !== null),
stream: onStream != null,
max_tokens: ENV.MAX_TOKEN_LENGTH > 0 ? ENV.MAX_TOKEN_LENGTH : 2048,
};
if (!body.system) {
delete body.system;
}
-
const options: SseChatCompatibleOptions = {};
options.streamBuilder = function (r, c) {
return new Stream(r, c, Anthropic.parser);
@@ -98,11 +117,15 @@ export class Anthropic implements ChatAgent {
return data?.delta?.text;
};
options.fullContentExtractor = function (data: any) {
- return data?.content?.[0].text;
+ return data?.content?.at(0).text;
};
options.errorExtractor = function (data: any) {
return data?.error?.message;
};
- return requestChatCompletions(url, header, body, onStream, null, options);
+ return convertStringToResponseMessages(requestChatCompletions(url, header, body, onStream, options));
+ };
+
+ readonly modelList = async (context: AgentUserConfig): Promise => {
+ return loadModelsList(context.ANTHROPIC_CHAT_MODELS_LIST);
};
}
diff --git a/src/agent/azure.ts b/src/agent/azure.ts
index d53fd781..e88b41b3 100644
--- a/src/agent/azure.ts
+++ b/src/agent/azure.ts
@@ -1,7 +1,14 @@
import type { AgentUserConfig } from '../config/env';
-import type { ChatAgent, ChatStreamTextHandler, ImageAgent, LLMChatParams } from './types';
-import { renderOpenAIMessage } from './openai';
+import type {
+ ChatAgent,
+ ChatAgentResponse,
+ ChatStreamTextHandler,
+ ImageAgent,
+ LLMChatParams,
+} from './types';
+import { renderOpenAIMessages } from './openai';
import { requestChatCompletions } from './request';
+import { convertStringToResponseMessages, loadModelsList } from './utils';
class AzureBase {
readonly name = 'azure';
@@ -19,39 +26,33 @@ class AzureBase {
}
export class AzureChatAI extends AzureBase implements ChatAgent {
- readonly modelKey = 'AZURE_COMPLETIONS_API';
+ readonly modelKey = 'AZURE_CHAT_MODEL';
readonly enable = (context: AgentUserConfig): boolean => {
- return !!(context.AZURE_API_KEY && context.AZURE_COMPLETIONS_API);
+ return !!(context.AZURE_API_KEY && context.AZURE_RESOURCE_NAME);
};
- readonly model = (ctx: AgentUserConfig) => {
- return this.modelFromURI(ctx.AZURE_COMPLETIONS_API);
+ readonly model = (ctx: AgentUserConfig): string | null => {
+ return ctx.AZURE_CHAT_MODEL;
};
- readonly request = async (params: LLMChatParams, context: AgentUserConfig, onStream: ChatStreamTextHandler | null): Promise => {
- const { message, images, prompt, history } = params;
- const url = context.AZURE_COMPLETIONS_API;
- if (!url || !context.AZURE_API_KEY) {
- throw new Error('Azure Completions API is not set');
- }
+ readonly request = async (params: LLMChatParams, context: AgentUserConfig, onStream: ChatStreamTextHandler | null): Promise => {
+ const { prompt, messages } = params;
+ const url = `https://${context.AZURE_RESOURCE_NAME}.openai.azure.com/openai/deployments/${context.AZURE_CHAT_MODEL}/chat/completions?api-version=${context.AZURE_API_VERSION}`;
const header = {
'Content-Type': 'application/json',
- 'api-key': context.AZURE_API_KEY,
+ 'api-key': context.AZURE_API_KEY || '',
};
-
- const messages = [...(history || []), { role: 'user', content: message, images }];
- if (prompt) {
- messages.unshift({ role: context.SYSTEM_INIT_MESSAGE_ROLE, content: prompt });
- }
-
const body = {
...context.OPENAI_API_EXTRA_PARAMS,
- messages: await Promise.all(messages.map(renderOpenAIMessage)),
+ messages: await renderOpenAIMessages(prompt, messages, true),
stream: onStream != null,
};
+ return convertStringToResponseMessages(requestChatCompletions(url, header, body, onStream));
+ };
- return requestChatCompletions(url, header, body, onStream);
+ readonly modelList = async (context: AgentUserConfig): Promise => {
+ return loadModelsList(context.AZURE_CHAT_MODELS_LIST);
};
}
@@ -67,13 +68,10 @@ export class AzureImageAI extends AzureBase implements ImageAgent {
};
readonly request = async (prompt: string, context: AgentUserConfig): Promise => {
- const url = context.AZURE_DALLE_API;
- if (!url || !context.AZURE_API_KEY) {
- throw new Error('Azure DALL-E API is not set');
- }
+ const url = `https://${context.AZURE_RESOURCE_NAME}.openai.azure.com/openai/deployments/${context.AZURE_CHAT_MODEL}/images/generations?api-version=${context.AZURE_API_VERSION}`;
const header = {
'Content-Type': 'application/json',
- 'api-key': context.AZURE_API_KEY,
+ 'api-key': context.AZURE_API_KEY || '',
};
const body = {
prompt,
@@ -95,6 +93,6 @@ export class AzureImageAI extends AzureBase implements ImageAgent {
if (resp.error?.message) {
throw new Error(resp.error.message);
}
- return resp?.data?.[0]?.url;
+ return resp?.data?.at(0)?.url;
};
}
diff --git a/src/agent/chat.ts b/src/agent/chat.ts
index df7b4210..d165bbc2 100644
--- a/src/agent/chat.ts
+++ b/src/agent/chat.ts
@@ -1,6 +1,7 @@
import type { WorkerContext } from '../config/context';
-import type { ChatAgent, HistoryItem, HistoryModifier, LLMChatRequestParams } from './types';
+import type { ChatAgent, HistoryItem, HistoryModifier, LLMChatParams, UserMessageItem } from './types';
import { ENV } from '../config/env';
+import { extractTextContent } from './utils';
/**
* @returns {(function(string): number)}
@@ -37,7 +38,7 @@ async function loadHistory(key: string): Promise {
const historyItem = list[i];
let length = 0;
if (historyItem.content) {
- length = counter(historyItem.content);
+ length = counter(extractTextContent(historyItem));
} else {
historyItem.content = '';
}
@@ -62,7 +63,7 @@ async function loadHistory(key: string): Promise {
export type StreamResultHandler = (text: string) => Promise;
-export async function requestCompletionsFromLLM(params: LLMChatRequestParams, context: WorkerContext, agent: ChatAgent, modifier: HistoryModifier | null, onStream: StreamResultHandler | null): Promise {
+export async function requestCompletionsFromLLM(params: UserMessageItem | null, context: WorkerContext, agent: ChatAgent, modifier: HistoryModifier | null, onStream: StreamResultHandler | null): Promise {
const historyDisable = ENV.AUTO_TRIM_HISTORY && ENV.MAX_HISTORY_LENGTH <= 0;
const historyKey = context.SHARE_CONTEXT.chatHistoryKey;
if (!historyKey) {
@@ -70,25 +71,26 @@ export async function requestCompletionsFromLLM(params: LLMChatRequestParams, co
}
let history = await loadHistory(historyKey);
if (modifier) {
- const modifierData = modifier(history, params.message || null);
+ const modifierData = modifier(history, params || null);
history = modifierData.history;
- params.message = modifierData.message;
+ params = modifierData.message;
}
- const llmParams = {
- ...params,
- history,
- prompt: context.USER_CONFIG.SYSTEM_INIT_MESSAGE,
+ if (!params) {
+ throw new Error('Message is empty');
+ }
+ history.push(params);
+ const llmParams: LLMChatParams = {
+ prompt: context.USER_CONFIG.SYSTEM_INIT_MESSAGE || undefined,
+ messages: history,
};
- const answer = await agent.request(llmParams, context.USER_CONFIG, onStream);
+ const { text, responses } = await agent.request(llmParams, context.USER_CONFIG, onStream);
if (!historyDisable) {
- const userMessage = { role: 'user', content: params.message || '', images: params.images };
- if (ENV.HISTORY_IMAGE_PLACEHOLDER && userMessage.images && userMessage.images.length > 0) {
- delete userMessage.images;
- userMessage.content = `${ENV.HISTORY_IMAGE_PLACEHOLDER}\n${userMessage.content}`;
+ if (ENV.HISTORY_IMAGE_PLACEHOLDER) {
+ // TODO: Add image placeholder
}
- history.push(userMessage);
- history.push({ role: 'assistant', content: answer });
+ history.push(params);
+ history.push(...responses);
await ENV.DATABASE.put(historyKey, JSON.stringify(history)).catch(console.error);
}
- return answer;
+ return text;
}
diff --git a/src/agent/cohere.ts b/src/agent/cohere.ts
index 3085c971..6e6d2eec 100644
--- a/src/agent/cohere.ts
+++ b/src/agent/cohere.ts
@@ -1,7 +1,9 @@
import type { AgentUserConfig } from '../config/env';
import type { SseChatCompatibleOptions } from './request';
-import type { ChatAgent, ChatStreamTextHandler, LLMChatParams } from './types';
+import type { ChatAgent, ChatAgentResponse, ChatStreamTextHandler, LLMChatParams } from './types';
+import { renderOpenAIMessages } from './openai';
import { requestChatCompletions } from './request';
+import { convertStringToResponseMessages, loadModelsList } from './utils';
export class Cohere implements ChatAgent {
readonly name = 'cohere';
@@ -11,26 +13,20 @@ export class Cohere implements ChatAgent {
return !!(context.COHERE_API_KEY);
};
- readonly model = (ctx: AgentUserConfig): string => {
+ readonly model = (ctx: AgentUserConfig): string | null => {
return ctx.COHERE_CHAT_MODEL;
};
- readonly request = async (params: LLMChatParams, context: AgentUserConfig, onStream: ChatStreamTextHandler | null): Promise => {
- const { message, prompt, history } = params;
+ readonly request = async (params: LLMChatParams, context: AgentUserConfig, onStream: ChatStreamTextHandler | null): Promise => {
+ const { prompt, messages } = params;
const url = `${context.COHERE_API_BASE}/chat`;
const header = {
'Authorization': `Bearer ${context.COHERE_API_KEY}`,
'Content-Type': 'application/json',
'Accept': onStream !== null ? 'text/event-stream' : 'application/json',
};
-
- const messages = [...history || [], { role: 'user', content: message }];
- if (prompt) {
- messages.unshift({ role: 'assistant', content: prompt });
- }
-
const body = {
- messages,
+ messages: await renderOpenAIMessages(prompt, messages),
model: context.COHERE_CHAT_MODEL,
stream: onStream != null,
};
@@ -40,11 +36,24 @@ export class Cohere implements ChatAgent {
return data?.delta?.message?.content?.text;
};
options.fullContentExtractor = function (data: any) {
- return data?.messages[0].content;
+ return data?.messages?.at(0)?.content;
};
options.errorExtractor = function (data: any) {
return data?.message;
};
- return requestChatCompletions(url, header, body, onStream, null, options);
+ return convertStringToResponseMessages(requestChatCompletions(url, header, body, onStream, options));
+ };
+
+ readonly modelList = async (context: AgentUserConfig): Promise => {
+ if (context.COHERE_CHAT_MODELS_LIST === '') {
+ const { protocol, host } = new URL(context.COHERE_API_BASE);
+ context.COHERE_CHAT_MODELS_LIST = `${protocol}://${host}/v2/models`;
+ }
+ return loadModelsList(context.COHERE_CHAT_MODELS_LIST, async (url): Promise => {
+ const data = await fetch(url, {
+ headers: { Authorization: `Bearer ${context.COHERE_API_KEY}` },
+ }).then(res => res.json());
+ return data.models?.filter((model: any) => model.endpoints?.includes('chat')).map((model: any) => model.name) || [];
+ });
};
}
diff --git a/src/agent/gemini.ts b/src/agent/gemini.ts
index 0452e025..022c8c46 100644
--- a/src/agent/gemini.ts
+++ b/src/agent/gemini.ts
@@ -1,16 +1,13 @@
import type { AgentUserConfig } from '../config/env';
-import type { ChatAgent, ChatStreamTextHandler, HistoryItem, LLMChatParams } from './types';
+import type { ChatAgent, ChatAgentResponse, ChatStreamTextHandler, LLMChatParams } from './types';
+import { renderOpenAIMessages } from './openai';
+import { requestChatCompletions } from './request';
+import { convertStringToResponseMessages, loadModelsList } from './utils';
export class Gemini implements ChatAgent {
readonly name = 'gemini';
readonly modelKey = 'GOOGLE_COMPLETIONS_MODEL';
- static GEMINI_ROLE_MAP: Record = {
- assistant: 'model',
- system: 'user',
- user: 'user',
- };
-
readonly enable = (context: AgentUserConfig): boolean => {
return !!(context.GOOGLE_API_KEY);
};
@@ -19,59 +16,23 @@ export class Gemini implements ChatAgent {
return ctx.GOOGLE_COMPLETIONS_MODEL;
};
- private render = (item: HistoryItem): object => {
- return {
- role: Gemini.GEMINI_ROLE_MAP[item.role],
- parts: [
- {
- text: item.content || '',
- },
- ],
+ readonly request = async (params: LLMChatParams, context: AgentUserConfig, onStream: ChatStreamTextHandler | null): Promise => {
+ const { prompt, messages } = params;
+ const url = `${context.GOOGLE_API_BASE}/chat`;
+ const header = {
+ 'Authorization': `Bearer ${context.GOOGLE_API_KEY}`,
+ 'Content-Type': 'application/json',
+ 'Accept': onStream !== null ? 'text/event-stream' : 'application/json',
+ };
+ const body = {
+ messages: await renderOpenAIMessages(prompt, messages),
+ model: context.GOOGLE_COMPLETIONS_MODEL,
+ stream: onStream != null,
};
+ return convertStringToResponseMessages(requestChatCompletions(url, header, body, onStream));
};
- readonly request = async (params: LLMChatParams, context: AgentUserConfig, onStream: ChatStreamTextHandler | null): Promise => {
- const { message, prompt, history } = params;
- if (onStream !== null) {
- console.warn('Stream mode is not supported');
- }
- const mode = 'generateContent'; // onStream ? 'streamGenerateContent' : 'generateContent'
- const url = `${context.GOOGLE_COMPLETIONS_API}${context.GOOGLE_COMPLETIONS_MODEL}:${mode}`;
-
- const contentsTemp = [...history || [], { role: 'user', content: message }];
- if (prompt) {
- contentsTemp.unshift({ role: 'assistant', content: prompt });
- }
- const contents: any[] = [];
- // role必须是 model,user 而且不能连续两个一样
- for (const msg of contentsTemp) {
- msg.role = Gemini.GEMINI_ROLE_MAP[msg.role];
- // 如果存在最后一个元素或role不一样则插入
- if (contents.length === 0 || contents[contents.length - 1].role !== msg.role) {
- contents.push(this.render(msg));
- } else {
- // 否则合并
- contents[contents.length - 1].parts[0].text += msg.content;
- }
- }
-
- const resp = await fetch(url, {
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json',
- 'x-goog-api-key': context.GOOGLE_API_KEY,
- } as Record,
- body: JSON.stringify({ contents }),
- });
- const data = await resp.json() as any;
- try {
- return data.candidates[0].content.parts[0].text;
- } catch (e) {
- console.error(e);
- if (!data) {
- throw new Error('Empty response');
- }
- throw new Error(data?.error?.message || JSON.stringify(data));
- }
+ readonly modelList = async (context: AgentUserConfig): Promise => {
+ return loadModelsList(context.GOOGLE_CHAT_MODELS_LIST);
};
}
diff --git a/src/agent/index.test.ts b/src/agent/index.test.ts
index ffe7724f..1c82a067 100644
--- a/src/agent/index.test.ts
+++ b/src/agent/index.test.ts
@@ -10,8 +10,12 @@ import '../config/env.test';
});
const params: LLMChatParams = {
prompt: 'You are a useful assistant.',
- message: 'What is your name?',
- history: [],
+ messages: [
+ {
+ role: 'user',
+ content: 'What is your name?',
+ },
+ ],
};
console.log(agent?.name, agent?.model(ENV.USER_CONFIG));
agent?.request(params, ENV.USER_CONFIG, async (text) => {
diff --git a/src/agent/index.ts b/src/agent/index.ts
index ad0f1018..f7abaf4e 100644
--- a/src/agent/index.ts
+++ b/src/agent/index.ts
@@ -8,14 +8,14 @@ import { Mistral } from './mistralai';
import { Dalle, OpenAI } from './openai';
import { WorkersChat, WorkersImage } from './workersai';
-const CHAT_AGENTS: ChatAgent[] = [
+export const CHAT_AGENTS: ChatAgent[] = [
+ new OpenAI(),
new Anthropic(),
new AzureChatAI(),
+ new WorkersChat(),
new Cohere(),
new Gemini(),
new Mistral(),
- new OpenAI(),
- new WorkersChat(),
];
export function loadChatLLM(context: AgentUserConfig): ChatAgent | null {
@@ -33,7 +33,7 @@ export function loadChatLLM(context: AgentUserConfig): ChatAgent | null {
return null;
}
-const IMAGE_AGENTS: ImageAgent[] = [
+export const IMAGE_AGENTS: ImageAgent[] = [
new AzureImageAI(),
new Dalle(),
new WorkersImage(),
@@ -52,4 +52,4 @@ export function loadImageGen(context: AgentUserConfig): ImageAgent | null {
}
}
return null;
-}
+};
diff --git a/src/agent/mistralai.ts b/src/agent/mistralai.ts
index 9e5dc8ea..5c3d69c3 100644
--- a/src/agent/mistralai.ts
+++ b/src/agent/mistralai.ts
@@ -1,6 +1,8 @@
import type { AgentUserConfig } from '../config/env';
-import type { ChatAgent, ChatStreamTextHandler, HistoryItem, LLMChatParams } from './types';
+import type { ChatAgent, ChatAgentResponse, ChatStreamTextHandler, LLMChatParams } from './types';
+import { renderOpenAIMessages } from './openai';
import { requestChatCompletions } from './request';
+import { convertStringToResponseMessages, loadModelsList } from './utils';
export class Mistral implements ChatAgent {
readonly name = 'mistral';
@@ -10,36 +12,36 @@ export class Mistral implements ChatAgent {
return !!(context.MISTRAL_API_KEY);
};
- readonly model = (ctx: AgentUserConfig): string => {
+ readonly model = (ctx: AgentUserConfig): string | null => {
return ctx.MISTRAL_CHAT_MODEL;
};
- private render = (item: HistoryItem): any => {
- return {
- role: item.role,
- content: item.content,
- };
- };
-
- readonly request = async (params: LLMChatParams, context: AgentUserConfig, onStream: ChatStreamTextHandler | null): Promise => {
- const { message, prompt, history } = params;
+ readonly request = async (params: LLMChatParams, context: AgentUserConfig, onStream: ChatStreamTextHandler | null): Promise => {
+ const { prompt, messages } = params;
const url = `${context.MISTRAL_API_BASE}/chat/completions`;
const header = {
'Content-Type': 'application/json',
'Authorization': `Bearer ${context.MISTRAL_API_KEY}`,
};
- const messages = [...(history || []), { role: 'user', content: message }];
- if (prompt) {
- messages.unshift({ role: context.SYSTEM_INIT_MESSAGE_ROLE, content: prompt });
- }
-
const body = {
model: context.MISTRAL_CHAT_MODEL,
- messages: messages.map(this.render),
+ messages: await renderOpenAIMessages(prompt, messages),
stream: onStream != null,
};
- return requestChatCompletions(url, header, body, onStream);
+ return convertStringToResponseMessages(requestChatCompletions(url, header, body, onStream));
+ };
+
+ readonly modelList = async (context: AgentUserConfig): Promise => {
+ if (context.MISTRAL_CHAT_MODELS_LIST === '') {
+ context.MISTRAL_CHAT_MODELS_LIST = `${context.MISTRAL_API_BASE}/models`;
+ }
+ return loadModelsList(context.MISTRAL_CHAT_MODELS_LIST, async (url): Promise => {
+ const data = await fetch(url, {
+ headers: { Authorization: `Bearer ${context.MISTRAL_API_KEY}` },
+ }).then(res => res.json());
+ return data.data?.map((model: any) => model.id) || [];
+ });
};
}
diff --git a/src/agent/next/next.ts b/src/agent/next/next.ts
new file mode 100644
index 00000000..08498ff8
--- /dev/null
+++ b/src/agent/next/next.ts
@@ -0,0 +1,130 @@
+import type { ProviderV1 } from '@ai-sdk/provider';
+import type { LanguageModelV1 } from 'ai';
+import type { AgentUserConfig } from '../../config/env';
+import type { ChatAgent, ChatAgentResponse, ChatStreamTextHandler, HistoryItem, LLMChatParams } from '../types';
+import { createAnthropic } from '@ai-sdk/anthropic';
+import { createAzure } from '@ai-sdk/azure';
+import { createCohere } from '@ai-sdk/cohere';
+import { createGoogleGenerativeAI } from '@ai-sdk/google';
+import { createMistral } from '@ai-sdk/mistral';
+import { createOpenAI } from '@ai-sdk/openai';
+import { generateText, streamText } from 'ai';
+import { streamHandler } from '../request';
+
+async function requestChatCompletionsV2(params: { model: LanguageModelV1; prompt?: string; messages: HistoryItem[] }, onStream: ChatStreamTextHandler | null): Promise {
+ if (onStream !== null) {
+ const stream = await streamText({
+ model: params.model,
+ prompt: params.prompt,
+ messages: params.messages,
+ });
+ await streamHandler(stream.textStream, t => t, onStream);
+ return {
+ text: await stream.text,
+ responses: (await stream.response).messages,
+ };
+ } else {
+ const result = await generateText({
+ model: params.model,
+ prompt: params.prompt,
+ messages: params.messages,
+ });
+ return {
+ text: result.text,
+ responses: result.response.messages,
+ };
+ }
+}
+
+type ProviderCreator = (context: AgentUserConfig) => ProviderV1;
+
+class NextChatAgent implements ChatAgent {
+ readonly name: string;
+ readonly modelKey = 'NEXT_CHAT_MODEL';
+ readonly adapter: ChatAgent;
+ readonly providerCreator: ProviderCreator;
+
+ constructor(adapter: ChatAgent, providerCreator: ProviderCreator) {
+ this.name = adapter.name;
+ this.adapter = adapter;
+ this.providerCreator = providerCreator;
+ }
+
+ static from(agent: ChatAgent): NextChatAgent | null {
+ const provider = this.newProviderCreator(agent.name);
+ if (!provider) {
+ return null;
+ }
+ return new NextChatAgent(agent, provider);
+ }
+
+ readonly enable = (context: AgentUserConfig): boolean => {
+ return this.adapter.enable(context);
+ };
+
+ readonly model = (ctx: AgentUserConfig): string | null => {
+ return this.adapter.model(ctx);
+ };
+
+ static newProviderCreator = (provider: string): ProviderCreator | null => {
+ switch (provider) {
+ case 'anthropic':
+ return (context: AgentUserConfig) => createAnthropic({
+ baseURL: context.ANTHROPIC_API_BASE,
+ apiKey: context.ANTHROPIC_API_KEY || undefined,
+ });
+ case 'azure':
+ return (context: AgentUserConfig) => createAzure({
+ resourceName: context.AZURE_RESOURCE_NAME || undefined,
+ apiKey: context.AZURE_API_KEY || undefined,
+ });
+ case 'cohere':
+ return (context: AgentUserConfig) => createCohere({
+ baseURL: context.COHERE_API_BASE,
+ apiKey: context.COHERE_API_KEY || undefined,
+ });
+ case 'gemini':
+ return (context: AgentUserConfig) => createGoogleGenerativeAI({
+ baseURL: context.GOOGLE_API_BASE,
+ apiKey: context.GOOGLE_API_KEY || undefined,
+ });
+ case 'mistral':
+ return (context: AgentUserConfig) => createMistral({
+ baseURL: context.MISTRAL_API_BASE,
+ apiKey: context.MISTRAL_API_KEY || undefined,
+ });
+ case 'openai':
+ return (context: AgentUserConfig) => createOpenAI({
+ baseURL: context.OPENAI_API_BASE,
+ apiKey: context.OPENAI_API_KEY.at(0) || undefined,
+ });
+ default:
+ return null;
+ }
+ };
+
+ readonly request = async (params: LLMChatParams, context: AgentUserConfig, onStream: ChatStreamTextHandler | null): Promise => {
+ const model = this.model(context);
+ if (!model) {
+ throw new Error('Model not found');
+ }
+ return requestChatCompletionsV2({
+ model: this.providerCreator(context).languageModel(model),
+ prompt: params.prompt,
+ messages: params.messages,
+ }, onStream);
+ };
+
+ readonly modelList = async (context: AgentUserConfig): Promise => {
+ return this.adapter.modelList(context);
+ };
+}
+
+export function injectNextChatAgent(agents: ChatAgent[]) {
+ for (let i = 0; i < agents.length; i++) {
+ const next = NextChatAgent.from(agents[i]);
+ if (next) {
+ agents[i] = next;
+ }
+ }
+}
diff --git a/src/agent/openai.ts b/src/agent/openai.ts
index 844d997e..b6e56475 100644
--- a/src/agent/openai.ts
+++ b/src/agent/openai.ts
@@ -1,36 +1,57 @@
import type { AgentUserConfig } from '../config/env';
-import type { ChatAgent, ChatStreamTextHandler, HistoryItem, ImageAgent, LLMChatParams } from './types';
-import { ENV } from '../config/env';
-import { imageToBase64String, renderBase64DataURI } from '../utils/image';
+import type {
+ ChatAgent,
+ ChatAgentResponse,
+ ChatStreamTextHandler,
+ HistoryItem,
+ ImageAgent,
+ LLMChatParams,
+} from './types';
import { requestChatCompletions } from './request';
+import { convertStringToResponseMessages, extractImageContent, loadModelsList } from './utils';
-export async function renderOpenAIMessage(item: HistoryItem): Promise {
+async function renderOpenAIMessage(item: HistoryItem, supportImage?: boolean): Promise {
const res: any = {
role: item.role,
content: item.content,
};
- if (item.images && item.images.length > 0) {
- res.content = [];
- if (item.content) {
- res.content.push({ type: 'text', text: item.content });
- }
- for (const image of item.images) {
- switch (ENV.TELEGRAM_IMAGE_TRANSFER_MODE) {
- case 'base64':
- res.content.push({ type: 'image_url', image_url: {
- url: renderBase64DataURI(await imageToBase64String(image)),
- } });
+ if (Array.isArray(item.content)) {
+ const contents = [];
+ for (const content of item.content) {
+ switch (content.type) {
+ case 'text':
+ contents.push({ type: 'text', text: content.text });
+ break;
+ case 'image':
+ if (supportImage) {
+ const data = extractImageContent(content.image);
+ if (data.url) {
+ contents.push({ type: 'image_url', image_url: { url: data.url } });
+ } else if (data.base64) {
+ contents.push({ type: 'image_url', image_url: { url: data.base64 } });
+ }
+ }
break;
- case 'url':
default:
- res.content.push({ type: 'image_url', image_url: { url: image } });
break;
}
}
+ res.content = contents;
}
return res;
}
+export async function renderOpenAIMessages(prompt: string | undefined, items: HistoryItem[], supportImage?: boolean): Promise {
+ const messages = await Promise.all(items.map(r => renderOpenAIMessage(r, supportImage)));
+ if (prompt) {
+ if (messages.length > 0 && messages[0].role === 'system') {
+ messages.shift();
+ }
+ messages.unshift({ role: 'system', content: prompt });
+ }
+ return messages;
+}
+
class OpenAIBase {
readonly name = 'openai';
apikey = (context: AgentUserConfig): string => {
@@ -46,7 +67,7 @@ export class OpenAI extends OpenAIBase implements ChatAgent {
return context.OPENAI_API_KEY.length > 0;
};
- readonly model = (ctx: AgentUserConfig): string => {
+ readonly model = (ctx: AgentUserConfig): string | null => {
return ctx.OPENAI_CHAT_MODEL;
};
@@ -54,27 +75,33 @@ export class OpenAI extends OpenAIBase implements ChatAgent {
return renderOpenAIMessage(item);
};
- readonly request = async (params: LLMChatParams, context: AgentUserConfig, onStream: ChatStreamTextHandler | null): Promise => {
- const { message, images, prompt, history } = params;
+ readonly request = async (params: LLMChatParams, context: AgentUserConfig, onStream: ChatStreamTextHandler | null): Promise => {
+ const { prompt, messages } = params;
const url = `${context.OPENAI_API_BASE}/chat/completions`;
const header = {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apikey(context)}`,
};
-
- const messages = [...(history || []), { role: 'user', content: message, images }];
- if (prompt) {
- messages.unshift({ role: context.SYSTEM_INIT_MESSAGE_ROLE, content: prompt });
- }
-
const body = {
model: context.OPENAI_CHAT_MODEL,
...context.OPENAI_API_EXTRA_PARAMS,
- messages: await Promise.all(messages.map(this.render)),
+ messages: await renderOpenAIMessages(prompt, messages, true),
stream: onStream != null,
};
- return requestChatCompletions(url, header, body, onStream);
+ return convertStringToResponseMessages(requestChatCompletions(url, header, body, onStream));
+ };
+
+ readonly modelList = async (context: AgentUserConfig): Promise => {
+ if (context.OPENAI_CHAT_MODELS_LIST === '') {
+ context.OPENAI_CHAT_MODELS_LIST = `${context.OPENAI_API_BASE}/models`;
+ }
+ return loadModelsList(context.OPENAI_CHAT_MODELS_LIST, async (url): Promise => {
+ const data = await fetch(url, {
+ headers: { Authorization: `Bearer ${this.apikey(context)}` },
+ }).then(res => res.json());
+ return data.data?.map((model: any) => model.id) || [];
+ });
};
}
@@ -114,6 +141,6 @@ export class Dalle extends OpenAIBase implements ImageAgent {
if (resp.error?.message) {
throw new Error(resp.error.message);
}
- return resp?.data?.[0]?.url;
+ return resp?.data?.at(0)?.url;
};
}
diff --git a/src/agent/request.ts b/src/agent/request.ts
index 42687443..9c2ec0d0 100644
--- a/src/agent/request.ts
+++ b/src/agent/request.ts
@@ -15,10 +15,10 @@ function fixOpenAICompatibleOptions(options: SseChatCompatibleOptions | null): S
return new Stream(r, c);
};
options.contentExtractor = options.contentExtractor || function (d: any) {
- return d?.choices?.[0]?.delta?.content;
+ return d?.choices?.at(0)?.delta?.content;
};
options.fullContentExtractor = options.fullContentExtractor || function (d: any) {
- return d.choices?.[0]?.message.content;
+ return d.choices?.at(0)?.message.content;
};
options.errorExtractor = options.errorExtractor || function (d: any) {
return d.error?.message;
@@ -41,12 +41,43 @@ export function isEventStreamResponse(resp: Response): boolean {
return false;
}
-export async function requestChatCompletions(url: string, header: Record, body: any, onStream: ChatStreamTextHandler | null, onResult: ChatStreamTextHandler | null = null, options: SseChatCompatibleOptions | null = null): Promise {
+export async function streamHandler(stream: AsyncIterable, contentExtractor: (data: T) => string | null, onStream: (text: string) => Promise): Promise {
+ let contentFull = '';
+ let lengthDelta = 0;
+ let updateStep = 50;
+ let lastUpdateTime = Date.now();
+ try {
+ for await (const part of stream) {
+ const textPart = contentExtractor(part);
+ if (!textPart) {
+ continue;
+ }
+ lengthDelta += textPart.length;
+ contentFull = contentFull + textPart;
+ if (lengthDelta > updateStep) {
+ if (ENV.TELEGRAM_MIN_STREAM_INTERVAL > 0) {
+ const delta = Date.now() - lastUpdateTime;
+ if (delta < ENV.TELEGRAM_MIN_STREAM_INTERVAL) {
+ continue;
+ }
+ lastUpdateTime = Date.now();
+ }
+ lengthDelta = 0;
+ updateStep += 20;
+ await onStream(`${contentFull}\n...`);
+ }
+ }
+ } catch (e) {
+ contentFull += `\nError: ${(e as Error).message}`;
+ }
+ return contentFull;
+}
+
+export async function requestChatCompletions(url: string, header: Record, body: any, onStream: ChatStreamTextHandler | null, options: SseChatCompatibleOptions | null = null): Promise {
const controller = new AbortController();
const { signal } = controller;
let timeoutID = null;
- let lastUpdateTime = Date.now();
if (ENV.CHAT_COMPLETE_API_TIMEOUT > 0) {
timeoutID = setTimeout(() => controller.abort(), ENV.CHAT_COMPLETE_API_TIMEOUT);
}
@@ -57,67 +88,29 @@ export async function requestChatCompletions(url: string, header: Record updateStep) {
- if (ENV.TELEGRAM_MIN_STREAM_INTERVAL > 0) {
- const delta = Date.now() - lastUpdateTime;
- if (delta < ENV.TELEGRAM_MIN_STREAM_INTERVAL) {
- continue;
- }
- lastUpdateTime = Date.now();
- }
- lengthDelta = 0;
- updateStep += 20;
- await onStream(`${contentFull}\n...`);
- }
- }
- } catch (e) {
- contentFull += `\nERROR: ${(e as Error).message}`;
- }
- return contentFull;
+ return streamHandler