mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-01 02:31:11 +06:00
FIX / bnb: fix torch compatiblity issue with itemize
(#30162)
* fix torch compatiblity issues * fix * Update src/transformers/modeling_utils.py
This commit is contained in:
parent
4f7a9f9c5c
commit
f569172fc2
@ -1159,9 +1159,12 @@ class ModuleUtilsMixin:
|
||||
# For 4bit models, we need to multiply the number of parameters by 2 as half of the parameters are
|
||||
# used for the 4bit quantization (uint8 tensors are stored)
|
||||
if is_loaded_in_4bit and isinstance(param, bnb.nn.Params4bit):
|
||||
total_numel.append(
|
||||
param.numel() * 2 * self.hf_quantizer.quantization_config.bnb_4bit_quant_storage.itemsize
|
||||
quant_storage = self.hf_quantizer.quantization_config.bnb_4bit_quant_storage
|
||||
# For compatibility with older PT version - see: https://github.com/huggingface/peft/pull/1635
|
||||
nb_params = (
|
||||
quant_storage.itemsize if hasattr(quant_storage, "itemsize") else quant_storage.element_size()
|
||||
)
|
||||
total_numel.append(param.numel() * 2 * nb_params)
|
||||
else:
|
||||
total_numel.append(param.numel())
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user