mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
continue to fix distributed_type from TPU to XLA in LM examples (#38652)
This commit is contained in:
parent
9cd7570f34
commit
91842a6900
@ -625,7 +625,7 @@ def main():
|
||||
)
|
||||
|
||||
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
if accelerator.distributed_type == DistributedType.XLA:
|
||||
model.tie_weights()
|
||||
|
||||
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
||||
|
@ -531,7 +531,7 @@ def main():
|
||||
)
|
||||
|
||||
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
if accelerator.distributed_type == DistributedType.XLA:
|
||||
model.tie_weights()
|
||||
|
||||
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
||||
|
@ -729,7 +729,7 @@ def main():
|
||||
)
|
||||
|
||||
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
if accelerator.distributed_type == DistributedType.XLA:
|
||||
model.tie_weights()
|
||||
|
||||
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
||||
|
@ -568,7 +568,7 @@ def main():
|
||||
)
|
||||
|
||||
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
if accelerator.distributed_type == DistributedType.XLA:
|
||||
model.tie_weights()
|
||||
|
||||
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
||||
|
Loading…
Reference in New Issue
Block a user