mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
[FIX] offload_weight()
takes from 3 to 4 positional arguments but 5 were given (#29457)
* use require_torch_gpu * enable on XPU * fix
This commit is contained in:
parent
7b01579f73
commit
00bf44270f
@ -796,7 +796,7 @@ def _load_state_dict_into_meta_model(
|
||||
if not is_safetensors:
|
||||
offload_index = offload_weight(param, param_name, offload_folder, offload_index)
|
||||
elif param_device == "cpu" and state_dict_index is not None:
|
||||
state_dict_index = offload_weight(param, param_name, model, state_dict_folder, state_dict_index)
|
||||
state_dict_index = offload_weight(param, param_name, state_dict_folder, state_dict_index)
|
||||
elif (
|
||||
hf_quantizer is None
|
||||
or (not hf_quantizer.requires_parameters_quantization)
|
||||
|
@ -765,7 +765,7 @@ class ModelUtilsTest(TestCasePlus):
|
||||
|
||||
@require_accelerate
|
||||
@mark.accelerate_tests
|
||||
@require_torch_accelerator
|
||||
@require_torch_gpu
|
||||
def test_from_pretrained_disk_offload_task_model(self):
|
||||
model = AutoModel.from_pretrained("hf-internal-testing/tiny-random-gpt2")
|
||||
device_map = {
|
||||
@ -808,7 +808,7 @@ class ModelUtilsTest(TestCasePlus):
|
||||
|
||||
@require_accelerate
|
||||
@mark.accelerate_tests
|
||||
@require_torch_accelerator
|
||||
@require_torch_gpu
|
||||
def test_from_pretrained_disk_offload_derived_to_base_model(self):
|
||||
derived_model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user