Skip to content

Commit

Permalink
fix: crash at first start
Browse files Browse the repository at this point in the history
  • Loading branch information
mikbry authored Feb 15, 2024
2 parents c32b231 + a6bc24d commit 941faaa
Show file tree
Hide file tree
Showing 5 changed files with 15 additions and 28 deletions.
4 changes: 1 addition & 3 deletions webapp/hooks/useCollectionStorage.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,7 @@ export default function useCollectionStorage<T>(
let v = collection[key];
if (!v) {
v = (await dataStorage().getItem(collectionId, defaultValue, key)) as T;
if (v) {
setCollection({ ...collection, [key]: v });
}
setCollection({ ...collection, [key]: v });
}
return v;
};
Expand Down
23 changes: 4 additions & 19 deletions webapp/native/assets/opla_default_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -6,24 +6,8 @@
},
"models": {
"path": "dev/ai/models",
"active_model": "OpenHermes 2.5 - Mistral 7B",
"items": [
{
"id": "f9a8b7c6-d5e4-4f3e-2d1c-0a9b8c7d6e5f",
"name": "OpenHermes-2.5-Mistral-7B.q4_k.gguf",
"title": "OpenHermes 2.5 - Mistral 7B",
"path": "openhermes-7b-v2.5",
"file_name": "ggml-model-q4_k.gguf",
"description": "OpenHermes 2.5 Mistral 7B is a state of the art Mistral Fine-tune, a continuation of OpenHermes 2 model, which trained on additional code datasets.",
"version": "2.5.0",
"license": "Apache-2.0",
"author": "Teknium",
"quantization": "Q4_K",
"bits": 4,
"repository": "https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B"
}
],
"downloads": []
"active_model": null,
"items": []
},
"server": {
"name": "llama.cpp",
Expand All @@ -36,5 +20,6 @@
"threads": 6,
"n_gpu_layers": 0
}
}
},
"downloads": []
}
2 changes: 1 addition & 1 deletion webapp/utils/backend/Backend.ts
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ class Backend {
}

this.start = async (parameters: LlamaCppArguments, model = activeModel) => {
logger.info('start server', parameters);
logger.info('start server', model, parameters);
return startLLamaCppServer(model, parameters);
};

Expand Down
8 changes: 4 additions & 4 deletions webapp/utils/dataStorage.ts
Original file line number Diff line number Diff line change
Expand Up @@ -98,14 +98,14 @@ const MockStorage: DataStorage = {
const FileStorage: DataStorage = {
async getItem<T>(key: string, defaultValue?: T, path = '') {
logger.warn('FileStorage.getItem() called', path);
let value = await readFromLocalStorage<T>(key, path);
if (!value || (Array.isArray(value) && value.length === 0)) {
const value = await readFromLocalStorage<T>(key, path);
/* if (!value || (Array.isArray(value) && value.length === 0)) {
value = await LocalStorage.getItem<T>(key, defaultValue);
if (value || defaultValue) {
await writeToLocalStorage(key, value || defaultValue, path);
}
}
return value;
} */
return value || (defaultValue as T);
},
async setItem<T>(key: string, value: T, path = '') {
if (value === undefined) {
Expand Down
6 changes: 5 additions & 1 deletion webapp/utils/providers/llama.cpp/schema.ts
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,11 @@ const LlamaCppArgumentsSchema = z.object({
.describe(
'use cuBLAS instead of custom mul_mat_q CUDA kernels. Not recommended since this is both slower and uses more VRAM.',
),
model: z.string().optional().describe('model path (default: models/7B/ggml-model-f16.gguf)'),
model: z
.string()
.nullable()
.optional()
.describe('model path (default: models/7B/ggml-model-f16.gguf)'),
alias: z
.string()
.optional()
Expand Down

0 comments on commit 941faaa

Please sign in to comment.