Skip to content

Commit

Permalink
[ez] remove stale pytorch version check (#2075)
Browse files Browse the repository at this point in the history
  • Loading branch information
ebsmothers authored Nov 26, 2024
1 parent b1aecb1 commit b5d2e63
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 18 deletions.
9 changes: 0 additions & 9 deletions recipes/full_finetune_distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,15 +122,6 @@ def __init__(self, cfg: DictConfig) -> None:
"full fp16 training is not supported with this recipe. Please use bf16 or fp32 instead."
)

if (
cfg.get("fsdp_cpu_offload", False)
and cfg.optimizer.get("fused", False)
and not utils.torch_version_ge("2.4.0")
):
raise RuntimeError(
"Using fused optimizer on CPU is only supported in PyTorch nightly."
)

# logging attributes
self._output_dir = cfg.output_dir
self._log_every_n_steps = cfg.get("log_every_n_steps", 1)
Expand Down
9 changes: 0 additions & 9 deletions recipes/qat_distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,15 +133,6 @@ def __init__(self, cfg: DictConfig) -> None:
"full fp16 training is not supported with this recipe. Please use bf16 or fp32 instead."
)

if (
cfg.get("fsdp_cpu_offload", False)
and cfg.optimizer.get("fused", False)
and not utils.torch_version_ge("2.4.0")
):
raise RuntimeError(
"Using fused optimizer on CPU is only supported in PyTorch nightly."
)

# logging attributes
self._output_dir = cfg.output_dir
self._log_every_n_steps = cfg.get("log_every_n_steps", 1)
Expand Down

0 comments on commit b5d2e63

Please sign in to comment.