Skip to content

Commit

Permalink
added code of save/load lm_head_layer_inputs.pt
Browse files Browse the repository at this point in the history
  • Loading branch information
ZX-ModelCloud committed Jan 7, 2025
1 parent c3f826f commit 0df8dbb
Showing 1 changed file with 10 additions and 0 deletions.
10 changes: 10 additions & 0 deletions gptqmodel/models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -725,13 +725,23 @@ def tmp(_, inp: Tuple[torch.Tensor, ...], out: torch.Tensor):

if not is_lm_head:
layers[i] = move_to(layer, CPU)
else:
move_to(layer, CPU)

del layer
del gptq
del layer_inputs
layer_inputs, layer_outputs = (
layer_outputs,
[],
) # TODO: is it really OK to cache only the first positional argument?

# if i == layer_count - 1:
# print("saved", layer_inputs)
# torch.save(layer_inputs, "lm_head_layer_inputs.pt")
# layer_inputs = torch.load("lm_head_layer_inputs.pt")
# print("loaded", layer_inputs)

torch_empty_cache()

logger.info(f"Quantization summary:\n{self.quant_log}")
Expand Down

0 comments on commit 0df8dbb

Please sign in to comment.