forked from juzeon/poe-openai-proxy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.toml
67 lines (57 loc) · 2.33 KB
/
config.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# The port number for the proxy service. The proxied OpenAI API endpoint will be: http://localhost:3700/v1/chat/completions
port = 3700
# A list of poe tokens. You can get them from the cookies on poe.com, they look like this: p-b=fdasac5a1dfa6%3D%3D
tokens = []
# The proxy that will be used to connect to poe.com. Leave it blank if you do not use a proxy
proxy = ""
# The gateway url for the Python backend of poe-api. Don't change this unless you modify external/api.py
# Note that if you use docker this value should be changed into: http://external:5000
gateway = "http://pyopen.internal:5000"
# Use leading prompts to indicate roles if enabled. You'd better disable it if you are using tools like https://github.com/TheR1D/shell_gpt
# 0:disable, 1:enable, 2:auto detect
# Example:
# ||>User:
# Hello!
# ||Assistant:
# Hello! How can I assist you today?
simulate-roles = 2
# Rate limit. Default to 10 api calls per token in 1 minute
rate-limit = 10
# Cool down of seconds. One same token cannot be used more than once in n seconds
cool-down = 3
# Timeout of seconds per response chunk
# This timeout will now reset every time a chunk is received
# so there's no need for a very large value to compensate for a very long reply 300
timeout = 20
# Timeout for poe-api, usually it should be smaller than timeout
api-timeout = 12
[bot]
"gpt-3.5-turbo" = "chinchilla" # ChatGPT 3.5
"gpt-4" = "beaver" # ChatGPT 4
"gpt-3.5-turbo-16k" = "agouti" # chatgpt-16k
"gpt-4-32k" = "vizcacha" # Claude-instant-100k
"gpt-4-0314" = "vizcacha" # Claude+
"gpt-Sage" = "capybara" # Add `gpt-` prefix for ChatGPT Raycast plugin to pick these up
"gpt-ChatGPT" = "chinchilla"
"gpt-GPT-4" = "beaver"
"gpt-Claude-instant" = "a2"
"gpt-Claude-instant-100k" = "a2_100k"
"gpt-Claude+" = "a2_2"
"gpt-claude-2-100k" = "a2_2"
"gpt-google"="acouchy"
"Sage" = "capybara"
"ChatGPT" = "chinchilla"
"GPT-4" = "beaver"
"Claude-instant" = "a2"
"Claude-instant-100k" = "a2_100k"
"Claude+" = "a2_2"
"gpt-3.5-turbo-instruct"="chinchilla_instruct"
"gpt-3.5-i"="chinchilla_instruct"
"gpt-web"="web-search"
"stablediffusionxl"="stablediffusionxl"
"gpt-perplexity"="gpt-perplexity"
"perplexity-web"="perplexity-web"
"perplexity-scholar"="perplexity-scholar"
"perplexity-wolfram"="perplexity-wolfram"
"perplexity-youtube"="perplexity-youtube"
"perplexity-reddit"="perplexity-reddit"