Skip to content

Commit

Permalink
model: add Llama 3.2 1B and 3B models
Browse files Browse the repository at this point in the history
  • Loading branch information
Neet-Nestor committed Sep 25, 2024
1 parent 6fd3134 commit 1cdb99c
Show file tree
Hide file tree
Showing 4 changed files with 104 additions and 7 deletions.
97 changes: 97 additions & 0 deletions app/constant.ts
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,103 @@ Latex block format: $$e=mc^2$$
`;

export const DEFAULT_MODELS: ModelRecord[] = [
// Llama-3.2 1B
{
name: "Llama-3.2-1B-Instruct-q4f32_1-MLC",
display_name: "Llama",
provider: "Meta",
size: "1B",
quantization: "q4f32",
family: "Llama 3.2",
vram_required_MB: 1128.82,
low_resource_required: true,
recommended_config: {
temperature: 0.6,
presence_penalty: 0,
frequency_penalty: 0,
top_p: 0.9,
},
},
{
name: "Llama-3.2-1B-Instruct-q4f16_1-MLC",
display_name: "Llama",
provider: "Meta",
size: "1B",
quantization: "q4f16",
family: "Llama 3.2",
vram_required_MB: 879.04,
low_resource_required: true,
recommended_config: {
temperature: 0.6,
presence_penalty: 0,
frequency_penalty: 0,
top_p: 0.9,
},
},
{
name: "Llama-3.2-1B-Instruct-q0f32-MLC",
display_name: "Llama",
provider: "Meta",
size: "1B",
quantization: "q0f32",
family: "Llama 3.2",
vram_required_MB: 5106.26,
low_resource_required: true,
recommended_config: {
temperature: 0.6,
presence_penalty: 0,
frequency_penalty: 0,
top_p: 0.9,
},
},
{
name: "Llama-3.2-1B-Instruct-q0f16-MLC",
display_name: "Llama",
provider: "Meta",
size: "1B",
quantization: "q0f16",
family: "Llama 3.2",
vram_required_MB: 2573.13,
low_resource_required: true,
recommended_config: {
temperature: 0.6,
presence_penalty: 0,
frequency_penalty: 0,
top_p: 0.9,
},
},
{
name: "Llama-3.2-3B-Instruct-q4f32_1-MLC",
display_name: "Llama",
provider: "Meta",
size: "3B",
quantization: "q4f32",
family: "Llama 3.2",
vram_required_MB: 2951.51,
low_resource_required: true,
recommended_config: {
temperature: 0.6,
presence_penalty: 0,
frequency_penalty: 0,
top_p: 0.9,
},
},
{
name: "Llama-3.2-3B-Instruct-q4f16_1-MLC",
display_name: "Llama",
provider: "Meta",
size: "3B",
quantization: "q4f16",
family: "Llama 3.2",
vram_required_MB: 2263.69,
low_resource_required: true,
recommended_config: {
temperature: 0.6,
presence_penalty: 0,
frequency_penalty: 0,
top_p: 0.9,
},
},
// Llama-3.1 8B
{
name: "Llama-3.1-8B-Instruct-q4f32_1-MLC-1k",
Expand Down
4 changes: 2 additions & 2 deletions app/store/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -208,9 +208,9 @@ export const useAppConfig = createPersistStore(
}),
{
name: StoreKey.Config,
version: 0.52,
version: 0.53,
migrate: (persistedState, version) => {
if (version < 0.52) {
if (version < 0.53) {
return {
...DEFAULT_CONFIG,
...(persistedState as any),
Expand Down
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
"dependencies": {
"@fortaine/fetch-event-source": "^3.0.6",
"@hello-pangea/dnd": "^16.5.0",
"@mlc-ai/web-llm": "^0.2.63",
"@mlc-ai/web-llm": "^0.2.67",
"@serwist/next": "^9.0.2",
"@svgr/webpack": "^6.5.1",
"emoji-picker-react": "^4.9.2",
Expand Down
8 changes: 4 additions & 4 deletions yarn.lock
Original file line number Diff line number Diff line change
Expand Up @@ -1180,10 +1180,10 @@
"@jridgewell/resolve-uri" "^3.1.0"
"@jridgewell/sourcemap-codec" "^1.4.14"

"@mlc-ai/web-llm@^0.2.63":
version "0.2.63"
resolved "https://registry.yarnpkg.com/@mlc-ai/web-llm/-/web-llm-0.2.63.tgz#73d2871081fbcf088ce70ec29f08351b9889981e"
integrity sha512-X4xy3cS9Xh/UR//FU07eqcLdeGyMLTwLNF8pbIgJok8IJvzeigLqFXCGktGHf6HLlFRWE7A6+jVCjq1Iw4cGeA==
"@mlc-ai/web-llm@^0.2.67":
version "0.2.67"
resolved "https://registry.yarnpkg.com/@mlc-ai/web-llm/-/web-llm-0.2.67.tgz#ab6ea18fed10d129f1a9d234606ae909e0966a5e"
integrity sha512-aS2uMKSvgj2qNc2441W4TQBUOOj5GdYHivq77XwU1V8YggibQDbPu6Fqwwj410RIVLPFXBfEOIJH0Bsf4Q7klQ==
dependencies:
loglevel "^1.9.1"

Expand Down

0 comments on commit 1cdb99c

Please sign in to comment.