diff --git a/gptqmodel/models/base.py b/gptqmodel/models/base.py index 46dc54da0..d07e55c5f 100644 --- a/gptqmodel/models/base.py +++ b/gptqmodel/models/base.py @@ -763,7 +763,6 @@ def save_quantized( quantize_config.save_pretrained(save_dir) def get_model_with_quantize(self, quantize_config): - print("quantize_config.c", quantize_config) config = AutoConfig.from_pretrained( quantize_config.model_name_or_path, trust_remote_code=True, diff --git a/tests/test_save_loaded_quantized_model.py b/tests/test_save_loaded_quantized_model.py index b0a6b7862..1c552f81d 100644 --- a/tests/test_save_loaded_quantized_model.py +++ b/tests/test_save_loaded_quantized_model.py @@ -1,8 +1,9 @@ import unittest + import torch -from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM +from gptqmodel import BACKEND, GPTQModel from parameterized import parameterized -from gptqmodel import GPTQModel,BACKEND +from transformers import AutoTokenizer MODEL_ID = "LnL-AI/TinyLlama-1.1B-Chat-v1.0-GPTQ-4bit" diff --git a/tests/test_serialization.py b/tests/test_serialization.py index 6483293f7..4da68c2bb 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -10,7 +10,7 @@ import unittest # noqa: E402 from gptqmodel import BACKEND, GPTQModel # noqa: E402 -from gptqmodel.quantization import FORMAT, FORMAT_FIELD_JSON, QUANT_CONFIG_FILENAME # noqa: E402 +from gptqmodel.quantization import FORMAT, FORMAT_FIELD_JSON # noqa: E402 class TestSerialization(unittest.TestCase):