[Trainer] Add optional communication backends for torch.distributed when using GPU (#22247)

Update training_args.py
This commit is contained in:
heya5 2023-03-20 21:17:34 +08:00 committed by GitHub
parent c4bf6f38bd
commit cf0af9a31b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -1641,7 +1641,10 @@ class TrainingArguments:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl", timeout=self.ddp_timeout_delta)
if self.xpu_backend and self.xpu_backend in ("mpi", "gloo"):
torch.distributed.init_process_group(backend=self.xpu_backend, timeout=self.ddp_timeout_delta)
else:
torch.distributed.init_process_group(backend="nccl", timeout=self.ddp_timeout_delta)
device = torch.device("cuda", self.local_rank)
self._n_gpu = 1