diff --git a/all.json b/all.json index d540263..28b3ace 100644 --- a/all.json +++ b/all.json @@ -1,8 +1,10 @@ { - "created_at": "2023-12-20T10:16:08Z", - "updated_at": "2023-12-20T10:16:08Z", + "_version": "1", + "created_at": "2023-12-20 14:52:51.076391+00:00", + "updated_at": "2023-12-20 14:52:51.076755+00:00", "models": [ { + "_version": "1.0.0", "name": "llama2-7b-chat-hf", "creator": "meta-llama", "title": "Llama 2 7B Chat", @@ -18,7 +20,9 @@ "name": "Meta Research License Agreement v1.0" }, "tags": "llama llama2 facebook meta english", - "task_type": "conversational", + "task_type": [ + "conversational" + ], "languages": [ "en" ], @@ -32,14 +36,13 @@ "use_cases": " Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.", "out_of_scope_use_cases": "Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.", "bias_risks_limitations": "Llama-2-Chat models are trained on a large corpus of English text, which may contain bias. We recommend that you evaluate the model for your use case before deploying it.", - "sources": { - "repository": "https://huggingface.co/meta-llama/Llama-2-7b-chat-hf", - "download": "https://huggingface.co/meta-llama/Llama-2-7b-chat-hf", - "paper": "https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/" - }, + "repository": "https://huggingface.co/meta-llama/Llama-2-7b-chat-hf", + "download": "https://huggingface.co/meta-llama/Llama-2-7b-chat-hf", + "paper": "https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/", "include": [ { "name": "Llama-2-7B-Chat-GGML", + "release_date": "2021-10-01", "publisher": { "name": "TheBloke", "url": "https://huggingface.co/TheBloke" @@ -48,23 +51,12 @@ "base_model": "meta-llama/llama-2-7b-chat-hf", "model_type": "llama", "library": "GGML", - "sources": { - "repository": "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/blob/main/llama-2-7b-chat.Q2_K.gguf" - }, + "repository": "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/blob/main/llama-2-7b-chat.Q2_K.gguf", "include": [ { "name": "llama-2-7b-chat.Q2_K.gguf", "use_case": "smallest, significant quality loss - not recommended for most purposes", - "tensor_type": "Q2_K", - "bits": 2, - "size": 2.83, - "max_ram": 5.33, - "download": "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/blob/main/llama-2-7b-chat.Q2_K.gguf" - }, - { - "name": "llama-2-7b-chat.Q2_K.gguf", - "recommandations": "smallest, significant quality loss - not recommended for most purposes", - "tensor_type": "Q2_K", + "quantization": "Q2_K", "bits": 2, "size": 2.83, "max_ram": 5.33, @@ -73,7 +65,7 @@ { "name": "llama-2-7b-chat.Q3_K_S.gguf", "recommandations": "very small, high quality loss", - "tensor_type": "Q3_K_S", + "quantization": "Q3_K_S", "bits": 3, "size": 2.95, "max_ram": 5.45, @@ -82,25 +74,25 @@ { "name": "llama-2-7b-chat.Q3_K_M.gguf", "recommandations": "very small, high quality loss", - "tensor_type": "Q3_K_M", + "quantization": "Q3_K_M", "bits": 3, - "size": 3.30, - "max_ram": 5.80, + "size": 3.3, + "max_ram": 5.8, "download": "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/blob/main/llama-2-7b-chat.Q3_K_M.gguf" }, { "name": "llama-2-7b-chat.Q3_K_L.gguf", "recommandations": "small, substantial quality loss", - "tensor_type": "Q3_K_L", + "quantization": "Q3_K_L", "bits": 3, - "size": 3.60, - "max_ram": 6.10, + "size": 3.6, + "max_ram": 6.1, "download": "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/blob/main/llama-2-7b-chat.Q3_K_L.gguf" }, { "name": "llama-2-7b-chat.Q4_0.gguf", "recommandations": "legacy; small, very high quality loss - prefer using Q3_K_M", - "tensor_type": "Q4_0", + "quantization": "Q4_0", "bits": 4, "size": 3.83, "max_ram": 6.33, @@ -110,7 +102,7 @@ { "name": "llama-2-7b-chat.Q4_K_S.gguf", "recommandations": "small, greater quality loss", - "tensor_type": "Q4_K_S", + "quantization": "Q4_K_S", "bits": 4, "size": 3.86, "max_ram": 6.36, @@ -119,7 +111,7 @@ { "name": "llama-2-7b-chat.Q4_K_M.gguf", "recommandations": "medium, balanced quality - recommended", - "tensor_type": "Q4_K_M", + "quantization": "Q4_K_M", "bits": 4, "size": 4.08, "max_ram": 6.58, @@ -129,7 +121,7 @@ { "name": "llama-2-7b-chat.Q5_0.gguf", "recommandations": "legacy; medium, balanced quality - prefer using Q4_K_M", - "tensor_type": "Q5_0", + "quantization": "Q5_0", "bits": 5, "size": 4.65, "max_ram": 7.15, @@ -139,7 +131,7 @@ { "name": "llama-2-7b-chat.Q5_K_S.gguf", "recommandations": "large, low quality loss - recommended", - "tensor_type": "Q5_K_S", + "quantization": "Q5_K_S", "bits": 5, "size": 4.65, "max_ram": 7.15, @@ -149,7 +141,7 @@ { "name": "llama-2-7b-chat.Q5_K_M.gguf", "recommandations": "large, very low quality loss - recommended", - "tensor_type": "Q5_K_M", + "quantization": "Q5_K_M", "bits": 5, "size": 4.78, "max_ram": 7.28, @@ -159,7 +151,7 @@ { "name": "llama-2-7b-chat.Q6_K.gguf", "recommandations": "very large, extremely low quality loss", - "tensor_type": "Q6_K", + "quantization": "Q6_K", "bits": 6, "size": 5.53, "max_ram": 8.03, @@ -168,7 +160,7 @@ { "name": "llama-2-7b-chat.Q8_0.gguf", "recommandations": "very large, extremely low quality loss", - "tensor_type": "Q8_0", + "quantization": "Q8_0", "bits": 8, "size": 7.16, "max_ram": 9.66, diff --git a/schema/v1/payload.schema.json b/schema/v1/models_collection.schema.json similarity index 79% rename from schema/v1/payload.schema.json rename to schema/v1/models_collection.schema.json index 48ca4eb..f2b0782 100644 --- a/schema/v1/payload.schema.json +++ b/schema/v1/models_collection.schema.json @@ -1,7 +1,7 @@ { - "$id": "https://opla.github.io/models/schema/v1/payload.schema.json", + "$id": "https://opla.github.io/models/schema/v1/models_collection.schema.json", "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Payload", + "title": "Models collection", "type": "object", "properties": { "_version": { @@ -19,7 +19,8 @@ "type": "array", "items": { "$ref": "https://opla.github.io/models/schema/v1/model.schema.json" - } + }, + "uniqueItems": true } }, "required": [ diff --git a/scripts/build.py b/scripts/build.py new file mode 100644 index 0000000..8e29d04 --- /dev/null +++ b/scripts/build.py @@ -0,0 +1,51 @@ +# Copyright 2023 mik +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import UTC, datetime +import json +import os +from jsonschema import validate +import json + + +collection = { + "_version": "1", + "created_at": str(datetime.now(tz=UTC)), + "updated_at": str(datetime.now(tz=UTC)), + "models": [] +} + +def add_model(model): + collection['models'].append(model) + +def add_models(path): + if os.path.isdir(path): + for file in os.listdir(path): + add_models(os.path.join(path, file)) + else: + with open(path, 'r') as f: + model = json.load(f) + add_model(model) + +def build(): + add_models('./models') + with open('./schema/v1/models_collection.schema.json', 'r') as f: + schema = json.load(f) + + validate(collection, schema) + +if __name__ == "__main__": + build() + with open("all.json", 'w') as f: + json.dump(collection, f) \ No newline at end of file