Fix incorrect accelerator device handling for MPS in TrainingArguments (#31812)

* Fix wrong acclerator device setup when using MPS

* More robust TrainingArguments MPS handling

* Update training_args.py

* Cleanup
This commit is contained in:
André Storhaug 2024-07-08 13:49:30 +02:00 committed by GitHub
parent 4879ac2b33
commit ae9dd02ee1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -48,6 +48,7 @@ from .utils import (
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_mlu_available,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_npu_available,
is_torch_tf32_available,
@ -2178,6 +2179,8 @@ class TrainingArguments:
)
if self.use_cpu:
device = torch.device("cpu")
elif is_torch_mps_available():
device = torch.device("mps")
elif is_torch_xpu_available():
if not is_ipex_available() and not is_accelerate_available("0.32.0.dev"):
raise ImportError("Using the XPU PyTorch backend requires `accelerate>=0.32.0.dev`")