From 40419fd3501b9acc1dcdb77a1ba9d502296b67bd Mon Sep 17 00:00:00 2001 From: Felipe Mello Date: Tue, 3 Dec 2024 12:32:26 -0800 Subject: [PATCH] update test names --- tests/recipes/test_knowledge_distillation_distributed.py | 8 ++++---- .../recipes/test_knowledge_distillation_single_device.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/recipes/test_knowledge_distillation_distributed.py b/tests/recipes/test_knowledge_distillation_distributed.py index 949883ac48..741d93daa9 100644 --- a/tests/recipes/test_knowledge_distillation_distributed.py +++ b/tests/recipes/test_knowledge_distillation_distributed.py @@ -62,7 +62,7 @@ def test_loss(self, tmpdir, monkeypatch): cmd = f""" tune run --nnodes 1 --nproc_per_node 2 knowledge_distillation_distributed \ - --config llama3_2/knowledge_distillation_distributed \ + --config llama3_2/8B_to_1B_KD_distributed \ output_dir={tmpdir} \ checkpointer._component_=torchtune.training.FullModelTorchTuneCheckpointer \ checkpointer.checkpoint_dir='{ckpt_dir}' \ @@ -120,7 +120,7 @@ def test_training_state_on_resume(self, tmpdir, monkeypatch): # Train for two epochs cmd_1 = f""" tune run --nnodes 1 --nproc_per_node 2 knowledge_distillation_distributed \ - --config llama3_2/knowledge_distillation_distributed \ + --config llama3_2/8B_to_1B_KD_distributed \ output_dir={tmpdir} \ checkpointer=torchtune.training.FullModelTorchTuneCheckpointer \ checkpointer.checkpoint_dir='{ckpt_dir}' \ @@ -148,7 +148,7 @@ def test_training_state_on_resume(self, tmpdir, monkeypatch): # Resume training cmd_2 = f""" tune run --nnodes 1 --nproc_per_node 2 knowledge_distillation_distributed \ - --config llama3_2/knowledge_distillation_distributed \ + --config llama3_2/8B_to_1B_KD_distributed \ output_dir={tmpdir} \ checkpointer=torchtune.training.FullModelTorchTuneCheckpointer \ checkpointer.checkpoint_dir={tmpdir} \ @@ -199,7 +199,7 @@ def test_save_and_load_merged_weights(self, tmpdir, monkeypatch): cmd = f""" tune run --nnodes 1 --nproc_per_node 2 knowledge_distillation_distributed \ - --config llama3_2/knowledge_distillation_distributed \ + --config llama3_2/8B_to_1B_KD_distributed \ output_dir={tmpdir} \ checkpointer._component_={ckpt_component} \ checkpointer.checkpoint_dir='{ckpt_dir}' \ diff --git a/tests/recipes/test_knowledge_distillation_single_device.py b/tests/recipes/test_knowledge_distillation_single_device.py index 713e05c98f..975b71bf62 100644 --- a/tests/recipes/test_knowledge_distillation_single_device.py +++ b/tests/recipes/test_knowledge_distillation_single_device.py @@ -65,7 +65,7 @@ def test_loss( tmpdir, monkeypatch, ): - config = "qwen2/knowledge_distillation_single_device" + config = "qwen2/1.5_to_0.5B_KD_single_device" model_type = "llama3" ckpt_type = "tune" ckpt_component = CKPT_COMPONENT_MAP[ckpt_type] @@ -152,7 +152,7 @@ def test_training_state_on_resume(self, tmpdir, monkeypatch): # Train for two epochs cmd_1 = f""" tune run knowledge_distillation_single_device \ - --config qwen2/knowledge_distillation_single_device \ + --config qwen2/1.5_to_0.5B_KD_single_device \ output_dir={tmpdir} \ checkpointer=torchtune.training.FullModelTorchTuneCheckpointer \ checkpointer.checkpoint_dir='{ckpt_dir}' \ @@ -186,7 +186,7 @@ def test_training_state_on_resume(self, tmpdir, monkeypatch): # Resume training cmd_2 = f""" tune run knowledge_distillation_single_device \ - --config qwen2/knowledge_distillation_single_device \ + --config qwen2/1.5_to_0.5B_KD_single_device \ output_dir={tmpdir} \ checkpointer=torchtune.training.FullModelTorchTuneCheckpointer \ checkpointer.checkpoint_dir={tmpdir} \ @@ -242,7 +242,7 @@ def test_save_and_load_merged_weights(self, tmpdir, monkeypatch): cmd = f""" tune run knowledge_distillation_single_device \ - --config qwen2/knowledge_distillation_single_device \ + --config qwen2/1.5_to_0.5B_KD_single_device \ output_dir={tmpdir} \ checkpointer._component_={ckpt_component} \ checkpointer.checkpoint_dir='{ckpt_dir}' \