[tests] skip tests for xpu (#33553)

* enable

* fix

* add xpu skip

* add marker

* skip for xpu

* add more

* add one more
This commit is contained in:
Fanli Lin 2024-09-20 02:28:04 +08:00 committed by GitHub
parent f111d5b783
commit b87755aa6d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 26 additions and 1 deletions

View File

@ -849,6 +849,13 @@ def require_torch_xpu(test_case):
return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
def require_non_xpu(test_case):
"""
Decorator marking a test that should be skipped for XPU.
"""
return unittest.skipUnless(torch_device != "xpu", "test requires a non-XPU")(test_case)
def require_torch_multi_xpu(test_case):
"""
Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without

View File

@ -31,6 +31,7 @@ from transformers.testing_utils import (
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_non_xpu,
require_torch,
require_torch_gpu,
require_torch_multi_accelerator,
@ -106,6 +107,7 @@ class TestTrainerExt(TestCasePlus):
def test_run_seq2seq_ddp(self):
self.run_seq2seq_quick(distributed=True)
@require_non_xpu
@require_apex
@require_torch_gpu
def test_run_seq2seq_apex(self):

View File

@ -16,7 +16,14 @@
import unittest
from transformers.testing_utils import require_detectron2, require_torch, require_torch_multi_gpu, slow, torch_device
from transformers.testing_utils import (
require_detectron2,
require_non_xpu,
require_torch,
require_torch_multi_gpu,
slow,
torch_device,
)
from transformers.utils import is_detectron2_available, is_torch_available
from ...test_configuration_common import ConfigTester
@ -251,6 +258,7 @@ class LayoutLMv2ModelTester:
return config, inputs_dict
@require_non_xpu
@require_torch
@require_detectron2
class LayoutLMv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):

View File

@ -76,6 +76,7 @@ from transformers.testing_utils import (
require_accelerate,
require_bitsandbytes,
require_flash_attn,
require_non_xpu,
require_read_token,
require_safetensors,
require_torch,
@ -2884,6 +2885,7 @@ class ModelTesterMixin:
)
self.assertTrue(torch.allclose(out_embeds, out_ids))
@require_non_xpu
@require_torch_multi_gpu
def test_multi_gpu_data_parallel_forward(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
@ -4118,6 +4120,7 @@ class ModelTesterMixin:
with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
_ = model(**inputs_dict)
@require_non_xpu
@require_torch_sdpa
@require_torch_accelerator
@slow

View File

@ -66,6 +66,7 @@ from transformers.testing_utils import (
require_intel_extension_for_pytorch,
require_liger_kernel,
require_lomo,
require_non_xpu,
require_optuna,
require_peft,
require_ray,
@ -884,6 +885,7 @@ class TrainerIntegrationPrerunTest(TestCasePlus, TrainerIntegrationCommon):
# will add more specific tests once there are some bugs to fix
@require_non_xpu
@require_torch_gpu
@require_torch_tf32
def test_tf32(self):
@ -3196,6 +3198,7 @@ class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon):
# perfect world: fp32_init/2 == fp16_eval
self.assertAlmostEqual(fp16_eval, fp32_init / 2, delta=5_000)
@require_non_xpu
@require_torch_non_multi_gpu
@require_torchdynamo
@require_torch_tensorrt_fx

View File

@ -22,6 +22,7 @@ from transformers import set_seed
from transformers.testing_utils import (
is_torch_available,
require_auto_gptq,
require_non_xpu,
require_read_token,
require_torch,
require_torch_gpu,
@ -317,6 +318,7 @@ class CacheIntegrationTest(unittest.TestCase):
]
self.assertListEqual(decoded, expected_text)
@require_non_xpu
@require_auto_gptq
def test_sink_cache_hard(self):
tokenizer = AutoTokenizer.from_pretrained("TheBloke/LLaMa-7B-GPTQ")