mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
[PyTorch/XLA] Fix extra TPU compilations introduced by recent changes (#29158)
* tmp * Remove debug step * Fix a typo * Move to is_torch_xla_available
This commit is contained in:
parent
1e21c4fbe0
commit
b340d90738
@ -1364,7 +1364,7 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
|
||||
hard_check_only=False,
|
||||
check_device_map=check_device_map,
|
||||
)
|
||||
elif requested_attn_implementation in [None, "sdpa"]:
|
||||
elif requested_attn_implementation in [None, "sdpa"] and not is_torch_xla_available():
|
||||
# use_flash_attention_2 takes priority over SDPA, hence SDPA treated in this elif.
|
||||
config = cls._check_and_enable_sdpa(
|
||||
config,
|
||||
|
Loading…
Reference in New Issue
Block a user