ENH: Do not pass warning message in case quantization_config is in config but not passed as an arg (#28988)

* Update auto.py

* Update auto.py

* Update src/transformers/quantizers/auto.py

Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>

* Update src/transformers/quantizers/auto.py

Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>

---------

Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>
This commit is contained in:
Younes Belkada 2024-02-14 01:19:42 +01:00 committed by GitHub
parent bd4b83e1ba
commit 1d12b8bc25
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -129,10 +129,13 @@ class AutoHfQuantizer:
"""
handles situations where both quantization_config from args and quantization_config from model config are present.
"""
warning_msg = (
"You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading"
" already has a `quantization_config` attribute. The `quantization_config` from the model will be prevail."
)
if quantization_config_from_args is not None:
warning_msg = (
"You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading"
" already has a `quantization_config` attribute. The `quantization_config` from the model will be used."
)
else:
warning_msg = ""
if isinstance(quantization_config, dict):
quantization_config = AutoQuantizationConfig.from_dict(quantization_config)
@ -144,5 +147,7 @@ class AutoHfQuantizer:
setattr(quantization_config, attr, val)
warning_msg += f"However, loading attributes (e.g. {list(loading_attr_dict.keys())}) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored."
warnings.warn(warning_msg)
if warning_msg != "":
warnings.warn(warning_msg)
return quantization_config