mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-30 01:32:23 +06:00
Fix incorrect accelerator device handling for MPS in TrainingArguments
(#31812)
* Fix wrong acclerator device setup when using MPS * More robust TrainingArguments MPS handling * Update training_args.py * Cleanup
This commit is contained in:
parent
4879ac2b33
commit
ae9dd02ee1
@ -48,6 +48,7 @@ from .utils import (
|
||||
is_torch_bf16_cpu_available,
|
||||
is_torch_bf16_gpu_available,
|
||||
is_torch_mlu_available,
|
||||
is_torch_mps_available,
|
||||
is_torch_neuroncore_available,
|
||||
is_torch_npu_available,
|
||||
is_torch_tf32_available,
|
||||
@ -2178,6 +2179,8 @@ class TrainingArguments:
|
||||
)
|
||||
if self.use_cpu:
|
||||
device = torch.device("cpu")
|
||||
elif is_torch_mps_available():
|
||||
device = torch.device("mps")
|
||||
elif is_torch_xpu_available():
|
||||
if not is_ipex_available() and not is_accelerate_available("0.32.0.dev"):
|
||||
raise ImportError("Using the XPU PyTorch backend requires `accelerate>=0.32.0.dev`")
|
||||
|
Loading…
Reference in New Issue
Block a user