From e043dc6b8ae9f9fc794fc05d9c035742be7fc77d Mon Sep 17 00:00:00 2001 From: Kartikay Khandelwal Date: Tue, 9 Apr 2024 20:15:23 -0700 Subject: [PATCH] typo --- torchtune/utils/_checkpointing/_checkpointer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torchtune/utils/_checkpointing/_checkpointer.py b/torchtune/utils/_checkpointing/_checkpointer.py index 9c430ab52a..402529e244 100644 --- a/torchtune/utils/_checkpointing/_checkpointer.py +++ b/torchtune/utils/_checkpointing/_checkpointer.py @@ -261,7 +261,7 @@ class FullModelHFCheckpointer(_CheckpointerInterface): the Llama-2-7b-hf model from the meta-llama repo (https://huggingface.co/meta-llama/Llama-2-7b-hf) A few notes about the checkpoint reading logic: - - HF checkpoint names usually oredered by ID (eg: 0001_of_0003, 0002_of_0003, etc.) To ensure + - HF checkpoint names usually ordered by ID (eg: 0001_of_0003, 0002_of_0003, etc.) To ensure we read the files in the right order, we sort the checkpoint file names before reading - Checkpoint conversion to and from HF's format requires access to model params which are read directly from the "config.json" file. This helps ensure we either load the weights @@ -574,7 +574,7 @@ def save_checkpoint( """ Save TorchTune checkpoint to file. If ``intermediate_checkpoint`` is True, an additional checkpoint file ``recipe_state.pt`` is created in ``_output_dir`` which contains the recipe - state. The output state dicts have the following formats: + state. Args: state_dict (Dict[str, Any]): Checkpoint state dict to be written out to file