[FIX] offload_weight() takes from 3 to 4 positional arguments but 5 were given (#29457)

* use require_torch_gpu

* enable on XPU

* fix
This commit is contained in:
Fanli Lin 2024-03-06 10:58:42 +08:00 committed by GitHub
parent 7b01579f73
commit 00bf44270f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 3 additions and 3 deletions

View File

@ -796,7 +796,7 @@ def _load_state_dict_into_meta_model(
if not is_safetensors:
offload_index = offload_weight(param, param_name, offload_folder, offload_index)
elif param_device == "cpu" and state_dict_index is not None:
state_dict_index = offload_weight(param, param_name, model, state_dict_folder, state_dict_index)
state_dict_index = offload_weight(param, param_name, state_dict_folder, state_dict_index)
elif (
hf_quantizer is None
or (not hf_quantizer.requires_parameters_quantization)

View File

@ -765,7 +765,7 @@ class ModelUtilsTest(TestCasePlus):
@require_accelerate
@mark.accelerate_tests
@require_torch_accelerator
@require_torch_gpu
def test_from_pretrained_disk_offload_task_model(self):
model = AutoModel.from_pretrained("hf-internal-testing/tiny-random-gpt2")
device_map = {
@ -808,7 +808,7 @@ class ModelUtilsTest(TestCasePlus):
@require_accelerate
@mark.accelerate_tests
@require_torch_accelerator
@require_torch_gpu
def test_from_pretrained_disk_offload_derived_to_base_model(self):
derived_model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")