mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-02 19:21:31 +06:00
[Trainer] Add optional communication backends for torch.distributed when using GPU (#22247)
Update training_args.py
This commit is contained in:
parent
c4bf6f38bd
commit
cf0af9a31b
@ -1641,7 +1641,10 @@ class TrainingArguments:
|
||||
# Here, we'll use torch.distributed.
|
||||
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
|
||||
if not torch.distributed.is_initialized():
|
||||
torch.distributed.init_process_group(backend="nccl", timeout=self.ddp_timeout_delta)
|
||||
if self.xpu_backend and self.xpu_backend in ("mpi", "gloo"):
|
||||
torch.distributed.init_process_group(backend=self.xpu_backend, timeout=self.ddp_timeout_delta)
|
||||
else:
|
||||
torch.distributed.init_process_group(backend="nccl", timeout=self.ddp_timeout_delta)
|
||||
device = torch.device("cuda", self.local_rank)
|
||||
self._n_gpu = 1
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user