diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 3306f76249f..b86e3af91ca 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -849,6 +849,13 @@ def require_torch_xpu(test_case): return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case) +def require_non_xpu(test_case): + """ + Decorator marking a test that should be skipped for XPU. + """ + return unittest.skipUnless(torch_device != "xpu", "test requires a non-XPU")(test_case) + + def require_torch_multi_xpu(test_case): """ Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without diff --git a/tests/extended/test_trainer_ext.py b/tests/extended/test_trainer_ext.py index 9bf34c36692..4f6cf7dffa1 100644 --- a/tests/extended/test_trainer_ext.py +++ b/tests/extended/test_trainer_ext.py @@ -31,6 +31,7 @@ from transformers.testing_utils import ( get_torch_dist_unique_port, require_apex, require_bitsandbytes, + require_non_xpu, require_torch, require_torch_gpu, require_torch_multi_accelerator, @@ -106,6 +107,7 @@ class TestTrainerExt(TestCasePlus): def test_run_seq2seq_ddp(self): self.run_seq2seq_quick(distributed=True) + @require_non_xpu @require_apex @require_torch_gpu def test_run_seq2seq_apex(self): diff --git a/tests/models/layoutlmv2/test_modeling_layoutlmv2.py b/tests/models/layoutlmv2/test_modeling_layoutlmv2.py index 3d366fe3e84..94cc4e95432 100644 --- a/tests/models/layoutlmv2/test_modeling_layoutlmv2.py +++ b/tests/models/layoutlmv2/test_modeling_layoutlmv2.py @@ -16,7 +16,14 @@ import unittest -from transformers.testing_utils import require_detectron2, require_torch, require_torch_multi_gpu, slow, torch_device +from transformers.testing_utils import ( + require_detectron2, + require_non_xpu, + require_torch, + require_torch_multi_gpu, + slow, + torch_device, +) from transformers.utils import is_detectron2_available, is_torch_available from ...test_configuration_common import ConfigTester @@ -251,6 +258,7 @@ class LayoutLMv2ModelTester: return config, inputs_dict +@require_non_xpu @require_torch @require_detectron2 class LayoutLMv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 8eab385ad48..1ad6e93b10f 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -76,6 +76,7 @@ from transformers.testing_utils import ( require_accelerate, require_bitsandbytes, require_flash_attn, + require_non_xpu, require_read_token, require_safetensors, require_torch, @@ -2884,6 +2885,7 @@ class ModelTesterMixin: ) self.assertTrue(torch.allclose(out_embeds, out_ids)) + @require_non_xpu @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() @@ -4118,6 +4120,7 @@ class ModelTesterMixin: with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): _ = model(**inputs_dict) + @require_non_xpu @require_torch_sdpa @require_torch_accelerator @slow diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index c5f8b6169fc..14014e4a094 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -66,6 +66,7 @@ from transformers.testing_utils import ( require_intel_extension_for_pytorch, require_liger_kernel, require_lomo, + require_non_xpu, require_optuna, require_peft, require_ray, @@ -884,6 +885,7 @@ class TrainerIntegrationPrerunTest(TestCasePlus, TrainerIntegrationCommon): # will add more specific tests once there are some bugs to fix + @require_non_xpu @require_torch_gpu @require_torch_tf32 def test_tf32(self): @@ -3196,6 +3198,7 @@ class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon): # perfect world: fp32_init/2 == fp16_eval self.assertAlmostEqual(fp16_eval, fp32_init / 2, delta=5_000) + @require_non_xpu @require_torch_non_multi_gpu @require_torchdynamo @require_torch_tensorrt_fx diff --git a/tests/utils/test_cache_utils.py b/tests/utils/test_cache_utils.py index 6ab821231fd..3e8c80de2d1 100644 --- a/tests/utils/test_cache_utils.py +++ b/tests/utils/test_cache_utils.py @@ -22,6 +22,7 @@ from transformers import set_seed from transformers.testing_utils import ( is_torch_available, require_auto_gptq, + require_non_xpu, require_read_token, require_torch, require_torch_gpu, @@ -317,6 +318,7 @@ class CacheIntegrationTest(unittest.TestCase): ] self.assertListEqual(decoded, expected_text) + @require_non_xpu @require_auto_gptq def test_sink_cache_hard(self): tokenizer = AutoTokenizer.from_pretrained("TheBloke/LLaMa-7B-GPTQ")