mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
Update training_args.py - addition of self.distributed_state when using XPU (#25999)
* Update training_args.py Missing distributed state so lign 1813-1814 failed because value is undefined * Update training_args.py Co-authored-by: Zach Mueller <muellerzr@gmail.com> --------- Co-authored-by: Zach Mueller <muellerzr@gmail.com>
This commit is contained in:
parent
0fced06788
commit
e52f1cb669
@ -1803,6 +1803,7 @@ class TrainingArguments:
|
||||
torch.cuda.set_device(device)
|
||||
elif is_torch_xpu_available() and "ACCELERATE_USE_XPU" not in os.environ:
|
||||
os.environ["ACCELERATE_USE_XPU"] = "true"
|
||||
self.distributed_state = PartialState(timeout=timedelta(seconds=self.ddp_timeout))
|
||||
device = torch.device("xpu:0")
|
||||
self._n_gpu = 1
|
||||
elif is_sagemaker_dp_enabled():
|
||||
|
Loading…
Reference in New Issue
Block a user