From fe29b8c487dfe554ee44e2a359605f82651e9095 Mon Sep 17 00:00:00 2001 From: Kyle Sayers Date: Mon, 5 May 2025 14:38:49 -0400 Subject: [PATCH] [Ready to Merge][HFQuantizer] Squelch pydantic warnings (#37726) replace dict with model_dump Signed-off-by: Kyle Sayers Co-authored-by: Marc Sun <57196510+SunMarc@users.noreply.github.com> --- src/transformers/utils/quantization_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/utils/quantization_config.py b/src/transformers/utils/quantization_config.py index 8af225d92b3..5d9ab1f6f20 100644 --- a/src/transformers/utils/quantization_config.py +++ b/src/transformers/utils/quantization_config.py @@ -1403,12 +1403,12 @@ class CompressedTensorsConfig(QuantizationConfigMixin): """ quantization_config = {} if self.quantization_config is not None: - quantization_config = self.quantization_config.dict() + quantization_config = self.quantization_config.model_dump() else: quantization_config["quant_method"] = QuantizationMethod.COMPRESSED_TENSORS if self.sparsity_config is not None: - quantization_config["sparsity_config"] = self.sparsity_config.dict() + quantization_config["sparsity_config"] = self.sparsity_config.model_dump() else: quantization_config["sparsity_config"] = {}