enable misc test cases on XPU (#38852)
Some checks are pending
Self-hosted runner (benchmark) / Benchmark (aws-g5-4xlarge-cache) (push) Waiting to run
Build documentation / build (push) Waiting to run
Slow tests on important models (on Push - A10) / Get all modified files (push) Waiting to run
Slow tests on important models (on Push - A10) / Slow & FA2 tests (push) Blocked by required conditions
Self-hosted runner (push-caller) / Check if setup was changed (push) Waiting to run
Self-hosted runner (push-caller) / build-docker-containers (push) Blocked by required conditions
Self-hosted runner (push-caller) / Trigger Push CI (push) Blocked by required conditions
Secret Leaks / trufflehog (push) Waiting to run
Update Transformers metadata / build_and_package (push) Waiting to run

* enable misc test cases on XPU

Signed-off-by: YAO Matrix <matrix.yao@intel.com>

* fix style

Signed-off-by: YAO Matrix <matrix.yao@intel.com>

* tweak bamba ground truth on XPU

Signed-off-by: YAO Matrix <matrix.yao@intel.com>

* remove print

Signed-off-by: YAO Matrix <matrix.yao@intel.com>

* one more

Signed-off-by: YAO Matrix <matrix.yao@intel.com>

* fix style

Signed-off-by: YAO Matrix <matrix.yao@intel.com>

---------

Signed-off-by: YAO Matrix <matrix.yao@intel.com>
This commit is contained in:
Yao Matrix 2025-06-18 15:20:49 +08:00 committed by GitHub
parent d058f81e5b
commit 3526e25d3d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 40 additions and 31 deletions

View File

@ -391,7 +391,7 @@ class TrainingArguments:
installation](https://github.com/intel/intel-extension-for-pytorch).
bf16 (`bool`, *optional*, defaults to `False`):
Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training. Requires Ampere or higher
NVIDIA architecture or using CPU (use_cpu) or Ascend NPU. This is an experimental API and it may change.
NVIDIA architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. This is an experimental API and it may change.
fp16 (`bool`, *optional*, defaults to `False`):
Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training.
fp16_opt_level (`str`, *optional*, defaults to 'O1'):

View File

@ -4892,7 +4892,7 @@ class GenerationIntegrationTests(unittest.TestCase):
# If the generate doesn't infer the DECODER device map correctly, this will fail
_ = model.generate(**inputs, max_new_tokens=2, do_sample=False)
@require_torch_gpu
@require_torch_accelerator
def test_cpu_offload_doesnt_compile(self):
"""Test that CPU offload doesn't trigger compilation"""
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")

View File

@ -610,22 +610,15 @@ class BambaModelIntegrationTest(unittest.TestCase):
cls.device_properties = get_device_properties()
def test_simple_generate(self):
# fmt: off
expectations = Expectations(
{
(
"cuda",
8,
): "<|begin_of_text|>Hey how are you doing on this lovely evening? I hope you are all having a good time.",
(
"rocm",
9,
): "<|begin_of_text|>Hey how are you doing on this lovely evening? I hope you are doing well. I am here",
(
"xpu",
3,
): "<|begin_of_text|>Hey how are you doing on this lovely evening? I hope you are all doing well. Today I",
("cuda", 8): "<|begin_of_text|>Hey how are you doing on this lovely evening? I hope you are all having a good time.",
("rocm", 9): "<|begin_of_text|>Hey how are you doing on this lovely evening? I hope you are doing well. I am here",
("xpu", 3): "<|begin_of_text|>Hey how are you doing on this lovely evening? I hope you are all doing well. I am",
}
)
# fmt: on
self.model.to(torch_device)
@ -659,6 +652,7 @@ class BambaModelIntegrationTest(unittest.TestCase):
#
# Note: Key 9 is currently set for MI300, but may need potential future adjustments for H100s,
# considering differences in hardware processing and potential deviations in generated text.
# fmt: off
EXPECTED_TEXTS = Expectations(
{
("cuda", 7): [],
@ -671,11 +665,12 @@ class BambaModelIntegrationTest(unittest.TestCase):
"!!!<|begin_of_text|>I am late! I need to be at the airport in 20 minutes! I",
],
("xpu", 3): [
"<|begin_of_text|>Hey how are you doing on this lovely evening? I hope you are all doing well. Today I",
"<|begin_of_text|>Hey how are you doing on this lovely evening? I hope you are all doing well. I am",
"!!!<|begin_of_text|>I am late! I need to get to work! I have to get to the",
],
}
)
# fmt: on
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
self.model.to(torch_device)

View File

@ -27,7 +27,13 @@ from transformers import (
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import require_torch, require_torch_gpu, require_vision, slow, torch_device
from transformers.testing_utils import (
require_torch,
require_torch_accelerator,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property
@ -680,7 +686,7 @@ class DFineModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
self.assertTrue(not failed_cases, message)
@parameterized.expand(["float32", "float16", "bfloat16"])
@require_torch_gpu
@require_torch_accelerator
@slow
def test_inference_with_different_dtypes(self, torch_dtype_str):
torch_dtype = {
@ -702,7 +708,7 @@ class DFineModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
_ = model(**self._prepare_for_class(inputs_dict, model_class))
@parameterized.expand(["float32", "float16", "bfloat16"])
@require_torch_gpu
@require_torch_accelerator
@slow
def test_inference_equivalence_for_static_and_dynamic_anchors(self, torch_dtype_str):
torch_dtype = {

View File

@ -119,7 +119,7 @@ class Glm4IntegrationTest(unittest.TestCase):
{
("xpu", 3): [
"Hello I am doing a project on the history of the internet and I need to know what the first website was and what",
"Hi today I am going to tell you about the most common disease in the world. This disease is called diabetes",
"Hi today I am going to tell you about the most common mistakes that people make when they are learning English.",
],
("cuda", 7): [],
("cuda", 8): [
@ -177,7 +177,7 @@ class Glm4IntegrationTest(unittest.TestCase):
{
("xpu", 3): [
"Hello I am doing a project on the history of the internet and I need to know what the first website was and what",
"Hi today I am going to tell you about the most common disease in the world. This disease is called diabetes",
"Hi today I am going to tell you about the most common mistakes that people make when they are learning English.",
],
("cuda", 7): [],
("cuda", 8): [

View File

@ -1035,7 +1035,7 @@ class MusicgenTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
self.skipTest(reason="Musicgen doesn't use the MusicgenFlashAttention2 class method.")
@require_torch_sdpa
@require_torch_gpu
@require_torch_accelerator
@slow
def test_sdpa_can_dispatch_on_flash(self):
if not self.has_attentions:
@ -1046,8 +1046,8 @@ class MusicgenTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
self.skipTest(reason="This test requires an NVIDIA GPU with compute capability >= 8.0")
elif device_type == "rocm" and major < 9:
self.skipTest(reason="This test requires an AMD GPU with compute capability >= 9.0")
else:
self.skipTest(reason="This test requires a Nvidia or AMD GPU")
elif device_type not in ["cuda", "rocm", "xpu"]:
self.skipTest(reason="This test requires a Nvidia or AMD GPU or an Intel XPU")
torch.compiler.reset()

View File

@ -1035,7 +1035,7 @@ class MusicgenMelodyTest(ModelTesterMixin, GenerationTesterMixin, PipelineTester
self.skipTest(reason="MusicgenMelody doesn't use the MusicgenMelodyFlashAttention2 class method.")
@require_torch_sdpa
@require_torch_gpu
@require_torch_accelerator
@slow
def test_sdpa_can_dispatch_on_flash(self):
if not self.has_attentions:
@ -1046,8 +1046,8 @@ class MusicgenMelodyTest(ModelTesterMixin, GenerationTesterMixin, PipelineTester
self.skipTest(reason="This test requires an NVIDIA GPU with compute capability >= 8.0")
elif device_type == "rocm" and major < 9:
self.skipTest(reason="This test requires an AMD GPU with compute capability >= 9.0")
else:
self.skipTest(reason="This test requires a Nvidia or AMD GPU")
elif device_type not in ["cuda", "rocm", "xpu"]:
self.skipTest(reason="This test requires a Nvidia or AMD GPU or an Intel XPU")
torch.compiler.reset()

View File

@ -21,7 +21,14 @@ import numpy as np
import requests
from packaging import version
from transformers.testing_utils import is_flaky, require_torch, require_torch_gpu, require_vision, slow, torch_device
from transformers.testing_utils import (
is_flaky,
require_torch,
require_torch_accelerator,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
@ -337,7 +344,7 @@ class VitMatteImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
)
@slow
@require_torch_gpu
@require_torch_accelerator
@require_vision
def test_can_compile_fast_image_processor(self):
# override as trimaps are needed for the image processor

View File

@ -1623,9 +1623,10 @@ class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon):
self.assertFalse(is_any_loss_nan_or_inf(log_history_filter))
def test_train_and_eval_dataloaders(self):
if torch_device in ["cuda", "xpu"]:
if torch_device in ["cuda"]:
n_gpu = max(1, backend_device_count(torch_device))
else:
# DP is decprecated by PyTorch, accelerators like XPU doesn't support DP
n_gpu = 1
tmp_dir = self.get_auto_remove_tmp_dir()
@ -3940,7 +3941,7 @@ class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon):
from torch import _dynamo as torchdynamo
class CustomTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False):
def compute_loss(self, model, inputs, num_items_in_batch=None, return_outputs=False):
x = inputs["x"]
output = model(x)
if self.args.n_gpu == 1:

View File

@ -556,7 +556,7 @@ class CacheHardIntegrationTest(unittest.TestCase):
_ = model(**inputs)
_ = model.generate(**inputs, max_new_tokens=2, cache_implementation="hybrid")
@require_torch_gpu
@require_torch_accelerator
@parameterized.expand(TEST_CACHE_IMPLEMENTATIONS)
def test_cache_gptj_model(self, cache_implementation):
"""Tests caches with GPT-J model. Regression test for https://github.com/huggingface/transformers/pull/34799"""