From 691586b0dcd503afb2a7fc878a408f086e46288b Mon Sep 17 00:00:00 2001 From: Pavel Iakubovskii Date: Wed, 17 Jul 2024 08:37:43 +0100 Subject: [PATCH] Fix tests skip (#32012) * [run-slow] clip * [run-slow] clip * Fix skip -> skipTest * [run-slow] clip --- tests/models/big_bird/test_modeling_big_bird.py | 2 +- tests/models/whisper/test_modeling_whisper.py | 2 +- tests/test_tokenization_common.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/models/big_bird/test_modeling_big_bird.py b/tests/models/big_bird/test_modeling_big_bird.py index 7a7ad5071df..bda5cb62186 100644 --- a/tests/models/big_bird/test_modeling_big_bird.py +++ b/tests/models/big_bird/test_modeling_big_bird.py @@ -716,7 +716,7 @@ class BigBirdModelIntegrationTest(unittest.TestCase): """ if not self.test_attention_probs: - self.skip("test_attention_probs is set to False") + self.skipTest("test_attention_probs is set to False") model = BigBirdModel.from_pretrained( "google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16 diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py index 5fc66f9a205..0232543d121 100644 --- a/tests/models/whisper/test_modeling_whisper.py +++ b/tests/models/whisper/test_modeling_whisper.py @@ -3335,7 +3335,7 @@ class WhisperEncoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest. fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): - self.skip("Flax model does not exist") + self.skipTest("Flax model does not exist") # Output all for aggressive testing config.output_hidden_states = True diff --git a/tests/test_tokenization_common.py b/tests/test_tokenization_common.py index 60ed58c4005..867ca859ebc 100644 --- a/tests/test_tokenization_common.py +++ b/tests/test_tokenization_common.py @@ -2678,7 +2678,7 @@ class TokenizerTesterMixin: config = config_class() if config.is_encoder_decoder or config.pad_token_id is None: - self.skip("Model is not an encoder-decoder model or has no set pad token id") + self.skipTest("Model is not an encoder-decoder model or has no set pad token id") # Build sequence first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]