From 8495571a68cde9dc75c0daf2a6458f535c8d4ae4 Mon Sep 17 00:00:00 2001 From: Reema Alzaid Date: Thu, 26 Dec 2024 21:33:53 +0300 Subject: [PATCH] Add evaluation file fro code_llama2 --- recipes/configs/code_llama2/evaluation.yaml | 43 +++++++++++++++++++++ torchtune/_recipe_registry.py | 4 ++ 2 files changed, 47 insertions(+) create mode 100644 recipes/configs/code_llama2/evaluation.yaml diff --git a/recipes/configs/code_llama2/evaluation.yaml b/recipes/configs/code_llama2/evaluation.yaml new file mode 100644 index 0000000000..596170cb1c --- /dev/null +++ b/recipes/configs/code_llama2/evaluation.yaml @@ -0,0 +1,43 @@ +# Config for EleutherEvalRecipe in eleuther_eval.py +# +# To launch, run the following command: +# tune run eleuther_eval --config code_llama2/evaluation + +# Model arguments +model: + _component_: torchtune.models.code_llama2.code_llama2_7b + +# Tokenizer +tokenizer: + _component_: torchtune.models.llama2.llama2_tokenizer + path: /tmp/CodeLlama-7b-hf/tokenizer.model + max_seq_len: null + +# Checkpointer +checkpointer: + _component_: torchtune.training.FullModelHFCheckpointer + checkpoint_dir: /tmp/CodeLlama-7b-hf + checkpoint_files: [ + pytorch_model-00001-of-00003.bin, + pytorch_model-00002-of-00003.bin, + pytorch_model-00003-of-00003.bin + ] + recipe_checkpoint: null + output_dir: ${output_dir} + model_type: LLAMA2 +resume_from_checkpoint: False + +# Environment +device: cpu +dtype: bf16 +seed: 1234 # It is not recommended to change this seed, b/c it matches EleutherAI's default seed + +# EleutherAI specific eval args +tasks: ["truthfulqa_mc2"] +limit: null +max_seq_length: 4096 +batch_size: 8 +enable_kv_cache: True + +# Quantization specific args +quantizer: null diff --git a/torchtune/_recipe_registry.py b/torchtune/_recipe_registry.py index faf1ec7124..bf4ee3f34e 100644 --- a/torchtune/_recipe_registry.py +++ b/torchtune/_recipe_registry.py @@ -469,6 +469,10 @@ class Recipe: name="mistral/evaluation", file_path="mistral/evaluation.yaml", ), + Config( + name="code_llama2/evaluation", + file_path="code_llama2/evaluation.yaml", + ), ], supports_distributed=False, ),