Skip to content

Commit

Permalink
chore(version): 0.5.0
Browse files Browse the repository at this point in the history
  • Loading branch information
kangfenmao committed Aug 7, 2024
1 parent beb40f5 commit f7ef895
Show file tree
Hide file tree
Showing 9 changed files with 33 additions and 36 deletions.
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "cherry-studio",
"version": "0.4.9",
"version": "0.5.0",
"description": "A powerful AI assistant for producer.",
"main": "./out/main/index.js",
"author": "[email protected]",
Expand Down
2 changes: 1 addition & 1 deletion src/main/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ function createWindow() {
width: mainWindowState.width,
height: mainWindowState.height,
minWidth: 1080,
minHeight: 500,
minHeight: 600,
show: true,
autoHideMenuBar: true,
transparent: process.platform === 'darwin',
Expand Down
10 changes: 3 additions & 7 deletions src/renderer/src/pages/home/components/sidebar/SettingsTab.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -33,20 +33,16 @@ const SettingsTab: FC<Props> = (props) => {
debounce(
(settings: Partial<AssistantSettings>) => {
updateAssistantSettings({
...assistant.settings,
temperature: settings.temperature ?? temperature,
contextCount: settings.contextCount ?? contextCount,
enableMaxTokens: settings.enableMaxTokens ?? enableMaxTokens,
maxTokens: settings.maxTokens ?? maxTokens
})
},
1000,
{
leading: false,
trailing: true
}
{ leading: true, trailing: false }
),
[]
[temperature, contextCount, enableMaxTokens, maxTokens]
)

const onTemperatureChange = (value) => {
Expand Down Expand Up @@ -255,7 +251,7 @@ const InputNumberic = styled(InputNumber)`
const Label = styled.p`
margin: 0;
font-size: 12px;
font-weight: bold;
font-weight: 600;
margin-right: 8px;
`

Expand Down
2 changes: 1 addition & 1 deletion src/renderer/src/pages/settings/AssistantSettings.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ const AssistantSettings: FC = () => {
1000,
{ leading: false, trailing: true }
),
[]
[temperature, contextCount, enableMaxTokens, maxTokens]
)

const onTemperatureChange = (value) => {
Expand Down
10 changes: 5 additions & 5 deletions src/renderer/src/services/ProviderSDK.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@ import { MessageCreateParamsNonStreaming, MessageParam } from '@anthropic-ai/sdk
import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
import { getOllamaKeepAliveTime } from '@renderer/hooks/useOllama'
import { Assistant, Message, Provider, Suggestion } from '@renderer/types'
import { getAssistantSettings, removeQuotes } from '@renderer/utils'
import { removeQuotes } from '@renderer/utils'
import { sum, takeRight } from 'lodash'
import OpenAI from 'openai'
import { ChatCompletionCreateParamsNonStreaming, ChatCompletionMessageParam } from 'openai/resources'

import { getAssistantMaxTokens, getDefaultModel, getTopNamingModel } from './assistant'
import { getAssistantSettings, getDefaultModel, getTopNamingModel } from './assistant'
import { EVENT_NAMES } from './event'

export default class ProviderSDK {
Expand Down Expand Up @@ -39,7 +39,7 @@ export default class ProviderSDK {
) {
const defaultModel = getDefaultModel()
const model = assistant.model || defaultModel
const { contextCount } = getAssistantSettings(assistant)
const { contextCount, maxTokens } = getAssistantSettings(assistant)

const systemMessage = assistant.prompt ? { role: 'system', content: assistant.prompt } : undefined

Expand All @@ -53,7 +53,7 @@ export default class ProviderSDK {
.stream({
model: model.id,
messages: [systemMessage, ...userMessages].filter(Boolean) as MessageParam[],
max_tokens: getAssistantMaxTokens(assistant) || DEFAULT_MAX_TOKENS,
max_tokens: maxTokens || DEFAULT_MAX_TOKENS,
temperature: assistant?.settings?.temperature
})
.on('text', (text) => onChunk({ text: text || '' }))
Expand All @@ -73,7 +73,7 @@ export default class ProviderSDK {
messages: [systemMessage, ...userMessages].filter(Boolean) as ChatCompletionMessageParam[],
stream: true,
temperature: assistant?.settings?.temperature,
max_tokens: getAssistantMaxTokens(assistant),
max_tokens: maxTokens,
keep_alive: this.keepAliveTime
})
for await (const chunk of stream) {
Expand Down
27 changes: 18 additions & 9 deletions src/renderer/src/services/assistant.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
import { DEFAULT_CONEXTCOUNT, DEFAULT_MAX_TOKENS, DEFAULT_TEMPERATURE } from '@renderer/config/constant'
import i18n from '@renderer/i18n'
import store from '@renderer/store'
import { updateAgent } from '@renderer/store/agents'
import { updateAssistant } from '@renderer/store/assistants'
import { Agent, Assistant, Model, Provider, Topic } from '@renderer/types'
import { Agent, Assistant, AssistantSettings, Model, Provider, Topic } from '@renderer/types'
import { getLeadingEmoji, removeLeadingEmoji, uuid } from '@renderer/utils'

export function getDefaultAssistant(): Assistant {
Expand Down Expand Up @@ -57,16 +57,25 @@ export function getProviderByModelId(modelId?: string) {
return providers.find((p) => p.models.find((m) => m.id === _modelId)) as Provider
}

export function getAssistantMaxTokens(assistant: Assistant) {
if (assistant.settings?.enableMaxTokens) {
const maxTokens = assistant.settings.maxTokens
if (typeof maxTokens === 'number') {
return maxTokens > 100 ? maxTokens : DEFAULT_MAX_TOKENS
export const getAssistantSettings = (assistant: Assistant): AssistantSettings => {
const contextCount = assistant?.settings?.contextCount ?? DEFAULT_CONEXTCOUNT
const getAssistantMaxTokens = () => {
if (assistant.settings?.enableMaxTokens) {
const maxTokens = assistant.settings.maxTokens
if (typeof maxTokens === 'number') {
return maxTokens > 100 ? maxTokens : DEFAULT_MAX_TOKENS
}
return DEFAULT_MAX_TOKENS
}
return DEFAULT_MAX_TOKENS
return undefined
}

return undefined
return {
contextCount: contextCount === 20 ? 100000 : contextCount,
temperature: assistant?.settings?.temperature ?? DEFAULT_TEMPERATURE,
enableMaxTokens: assistant?.settings?.enableMaxTokens ?? false,
maxTokens: getAssistantMaxTokens()
}
}

export function covertAgentToAssistant(agent: Agent): Assistant {
Expand Down
3 changes: 2 additions & 1 deletion src/renderer/src/services/messages.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import { Assistant, Message } from '@renderer/types'
import { getAssistantSettings } from '@renderer/utils'
import { GPTTokens } from 'gpt-tokens'
import { takeRight } from 'lodash'

import { getAssistantSettings } from './assistant'

export const filterAtMessages = (messages: Message[]) => {
return messages.filter((message) => message.type !== '@')
}
Expand Down
2 changes: 1 addition & 1 deletion src/renderer/src/types/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ export type Assistant = {
export type AssistantSettings = {
contextCount: number
temperature: number
maxTokens: number
maxTokens: number | undefined
enableMaxTokens: boolean
}

Expand Down
11 changes: 1 addition & 10 deletions src/renderer/src/utils/index.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import { DEFAULT_CONEXTCOUNT, DEFAULT_TEMPERATURE } from '@renderer/config/constant'
import { Assistant, AssistantSettings, Model } from '@renderer/types'
import { Model } from '@renderer/types'
import imageCompression from 'browser-image-compression'
import { v4 as uuidv4 } from 'uuid'

Expand Down Expand Up @@ -177,14 +176,6 @@ export function getFirstCharacter(str) {
}
}

export const getAssistantSettings = (assistant: Assistant): AssistantSettings => {
const contextCount = assistant?.settings?.contextCount ?? DEFAULT_CONEXTCOUNT
return {
contextCount: contextCount === 20 ? 100000 : contextCount,
temperature: assistant?.settings?.temperature ?? DEFAULT_TEMPERATURE
}
}

/**
* is valid proxy url
* @param url proxy url
Expand Down

0 comments on commit f7ef895

Please sign in to comment.