Skip to content

Commit

Permalink
config class rename for consistency, adding preset with anneal LR to 0
Browse files Browse the repository at this point in the history
  • Loading branch information
Ilia Kulikov committed Oct 1, 2024
1 parent 8ce7b48 commit 3b0a318
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 7 deletions.
10 changes: 9 additions & 1 deletion src/fairseq2/recipes/lm/instruction_finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,8 +234,16 @@ def _llama3_1_instruct_constant_lr() -> InstructionFinetuneConfig:
return config


@instruction_finetune_preset("llama3_1_instruct_lr_anneal_0")

Check failure on line 237 in src/fairseq2/recipes/lm/instruction_finetune.py

View workflow job for this annotation

GitHub Actions / Lint Python / Lint

Name "_llama3_1_instruct_constant_lr" already defined on line 229
def _llama3_1_instruct_constant_lr() -> InstructionFinetuneConfig:

Check failure on line 238 in src/fairseq2/recipes/lm/instruction_finetune.py

View workflow job for this annotation

GitHub Actions / Lint Python / Lint

redefinition of unused '_llama3_1_instruct_constant_lr' from line 230
config = _llama3_1_instruct()
# setting up final lr to be 0.0 at the end of the cycle
config.lr_scheduler_config.final_lr = 0.0
return config


@instruction_finetune_preset("llama3_1_70b_instruct")
def _llama3_70b_instruct() -> InstructionFinetuneConfig:
def _llama3_1_70b_instruct() -> InstructionFinetuneConfig:
config = _llama3_1_instruct()

config.model = "llama3_1_70b_instruct"
Expand Down
20 changes: 14 additions & 6 deletions src/fairseq2/recipes/lm/preference_finetune/recipe.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@


@dataclass(kw_only=True)
class PreferenceFinetuningConfig:
class PreferenceFinetuneConfig:
"""Holds the configuration of a language model preference-finetuning task."""

# Data
Expand Down Expand Up @@ -206,22 +206,30 @@ class DropoutConfig:


@preference_finetune_preset("llama3_1_instruct")
def _llama3_1_instruct() -> PreferenceFinetuningConfig:
config = PreferenceFinetuningConfig()
def _llama3_1_instruct() -> PreferenceFinetuneConfig:
config = PreferenceFinetuneConfig()
config.model_config = DropoutConfig()
return config


@preference_finetune_preset("llama3_1_instruct_constant_lr")
def _llama3_1_instruct_constant_lr() -> PreferenceFinetuningConfig:
def _llama3_1_instruct_constant_lr() -> PreferenceFinetuneConfig:
config = _llama3_1_instruct()
# setting up final lr to be the optmiizer base lr, lr_mul is 1.0 by default
config.lr_scheduler_config.final_lr = config.optimizer_config.lr
return config


@preference_finetune_preset("llama3_1_instruct_lr_anneal_0")

Check failure on line 223 in src/fairseq2/recipes/lm/preference_finetune/recipe.py

View workflow job for this annotation

GitHub Actions / Lint Python / Lint

Name "_llama3_1_instruct_constant_lr" already defined on line 215
def _llama3_1_instruct_constant_lr() -> PreferenceFinetuneConfig:

Check failure on line 224 in src/fairseq2/recipes/lm/preference_finetune/recipe.py

View workflow job for this annotation

GitHub Actions / Lint Python / Lint

redefinition of unused '_llama3_1_instruct_constant_lr' from line 216
config = _llama3_1_instruct()
# setting up final lr to be 0.0 at the end of the cycle
config.lr_scheduler_config.final_lr = 0.0
return config


@preference_finetune_preset("llama3_1_70b_instruct")
def _llama3_70b_instruct() -> PreferenceFinetuningConfig:
def _llama3_1_70b_instruct() -> PreferenceFinetuneConfig:
config = _llama3_1_instruct()

config.model = "llama3_1_70b_instruct"
Expand All @@ -233,7 +241,7 @@ def _llama3_70b_instruct() -> PreferenceFinetuningConfig:


def load_preference_finetuner(
config: PreferenceFinetuningConfig, output_dir: Path
config: PreferenceFinetuneConfig, output_dir: Path
) -> Trainer[PreferenceOptimizationBatch]:
"""Load a :class:`Trainer` for language model preference optimization-finetuning."""
wall_watch = Stopwatch(start=True)
Expand Down

0 comments on commit 3b0a318

Please sign in to comment.