diff --git a/src/transformers/models/align/configuration_align.py b/src/transformers/models/align/configuration_align.py index 5f6daa5d43c..76193bf7a2b 100644 --- a/src/transformers/models/align/configuration_align.py +++ b/src/transformers/models/align/configuration_align.py @@ -14,12 +14,6 @@ # limitations under the License. """ALIGN model configuration""" -from typing import TYPE_CHECKING - - -if TYPE_CHECKING: - pass - from ...configuration_utils import PretrainedConfig from ...utils import logging diff --git a/src/transformers/models/bamba/modular_bamba.py b/src/transformers/models/bamba/modular_bamba.py index 3fec4ad35cf..8b9255d4540 100644 --- a/src/transformers/models/bamba/modular_bamba.py +++ b/src/transformers/models/bamba/modular_bamba.py @@ -52,13 +52,10 @@ from ...utils import ( can_return_tuple, logging, ) -from ...utils.import_utils import is_causal_conv1d_available, is_flash_attn_2_available, is_mamba_2_ssm_available +from ...utils.import_utils import is_causal_conv1d_available, is_mamba_2_ssm_available from .configuration_bamba import BambaConfig -if is_flash_attn_2_available(): - pass - if is_mamba_2_ssm_available(): from mamba_ssm.ops.triton.selective_state_update import selective_state_update from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined diff --git a/src/transformers/models/clvp/configuration_clvp.py b/src/transformers/models/clvp/configuration_clvp.py index b06cd5f6a41..d1ee5c9fb79 100644 --- a/src/transformers/models/clvp/configuration_clvp.py +++ b/src/transformers/models/clvp/configuration_clvp.py @@ -15,11 +15,7 @@ """CLVP model configuration""" import os -from typing import TYPE_CHECKING, Union - - -if TYPE_CHECKING: - pass +from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging diff --git a/src/transformers/models/got_ocr2/modular_got_ocr2.py b/src/transformers/models/got_ocr2/modular_got_ocr2.py index d8fdd7249f4..98127283a62 100644 --- a/src/transformers/models/got_ocr2/modular_got_ocr2.py +++ b/src/transformers/models/got_ocr2/modular_got_ocr2.py @@ -33,13 +33,10 @@ from transformers.models.sam.modeling_sam import SamMLPBlock, SamVisionAttention from ...configuration_utils import PretrainedConfig from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...processing_utils import Unpack -from ...utils import auto_docstring, can_return_tuple, is_vision_available, logging +from ...utils import auto_docstring, can_return_tuple, logging from ..auto import CONFIG_MAPPING, AutoConfig -if is_vision_available(): - pass - logger = logging.get_logger(__name__) diff --git a/src/transformers/models/mpt/configuration_mpt.py b/src/transformers/models/mpt/configuration_mpt.py index f5078bd9bfe..f3468ca8fac 100644 --- a/src/transformers/models/mpt/configuration_mpt.py +++ b/src/transformers/models/mpt/configuration_mpt.py @@ -14,11 +14,7 @@ # limitations under the License. """Mpt configuration""" -from typing import TYPE_CHECKING, Optional, Union - - -if TYPE_CHECKING: - pass +from typing import Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging diff --git a/src/transformers/models/nougat/image_processing_nougat.py b/src/transformers/models/nougat/image_processing_nougat.py index ad898ccfb20..3447c0ab151 100644 --- a/src/transformers/models/nougat/image_processing_nougat.py +++ b/src/transformers/models/nougat/image_processing_nougat.py @@ -41,16 +41,12 @@ from ...image_utils import ( validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, logging -from ...utils.import_utils import is_cv2_available, is_vision_available +from ...utils.import_utils import is_vision_available logger = logging.get_logger(__name__) -if is_cv2_available(): - pass - - if is_vision_available(): import PIL diff --git a/src/transformers/models/opt/modeling_opt.py b/src/transformers/models/opt/modeling_opt.py index fd22722b69c..0629fe2ad19 100644 --- a/src/transformers/models/opt/modeling_opt.py +++ b/src/transformers/models/opt/modeling_opt.py @@ -25,7 +25,7 @@ from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter -from ...modeling_flash_attention_utils import FlashAttentionKwargs, is_flash_attn_available +from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, @@ -44,10 +44,6 @@ if is_torch_flex_attn_available(): from ...integrations.flex_attention import make_flex_block_causal_mask -if is_flash_attn_available(): - pass - - logger = logging.get_logger(__name__) diff --git a/src/transformers/models/owlv2/configuration_owlv2.py b/src/transformers/models/owlv2/configuration_owlv2.py index 48e094b8f04..310a46508b8 100644 --- a/src/transformers/models/owlv2/configuration_owlv2.py +++ b/src/transformers/models/owlv2/configuration_owlv2.py @@ -14,12 +14,6 @@ # limitations under the License. """OWLv2 model configuration""" -from typing import TYPE_CHECKING - - -if TYPE_CHECKING: - pass - from ...configuration_utils import PretrainedConfig from ...utils import logging diff --git a/src/transformers/models/seggpt/image_processing_seggpt.py b/src/transformers/models/seggpt/image_processing_seggpt.py index 6ad18447445..b05206713ca 100644 --- a/src/transformers/models/seggpt/image_processing_seggpt.py +++ b/src/transformers/models/seggpt/image_processing_seggpt.py @@ -32,15 +32,12 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import TensorType, is_torch_available, is_vision_available, logging, requires_backends +from ...utils import TensorType, is_torch_available, logging, requires_backends if is_torch_available(): import torch -if is_vision_available(): - pass - logger = logging.get_logger(__name__) diff --git a/tests/models/aya_vision/test_modeling_aya_vision.py b/tests/models/aya_vision/test_modeling_aya_vision.py index 4503a6b1acb..eaa5aebe846 100644 --- a/tests/models/aya_vision/test_modeling_aya_vision.py +++ b/tests/models/aya_vision/test_modeling_aya_vision.py @@ -22,7 +22,6 @@ from transformers import ( AutoProcessor, AyaVisionConfig, is_torch_available, - is_vision_available, ) from transformers.testing_utils import ( Expectations, @@ -51,10 +50,6 @@ if is_torch_available(): ) -if is_vision_available(): - pass - - class AyaVisionVisionText2TextModelTester: def __init__( self, diff --git a/tests/models/internvl/test_video_processor_internvl.py b/tests/models/internvl/test_video_processor_internvl.py index b3dfb89f163..f385ab4830e 100644 --- a/tests/models/internvl/test_video_processor_internvl.py +++ b/tests/models/internvl/test_video_processor_internvl.py @@ -17,14 +17,11 @@ import unittest from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from transformers.testing_utils import require_torch, require_vision -from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available +from transformers.utils import is_torchvision_available, is_vision_available from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs -if is_torch_available(): - pass - if is_vision_available(): if is_torchvision_available(): from transformers import InternVLVideoProcessor diff --git a/tests/models/janus/test_processor_janus.py b/tests/models/janus/test_processor_janus.py index bd0d2cbe018..88fcc8888db 100644 --- a/tests/models/janus/test_processor_janus.py +++ b/tests/models/janus/test_processor_janus.py @@ -20,15 +20,10 @@ import unittest import numpy as np from transformers import AutoProcessor, AutoTokenizer, JanusProcessor -from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin -if is_vision_available(): - pass - - class JanusProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = JanusProcessor diff --git a/tests/models/llama4/test_image_processing_llama4.py b/tests/models/llama4/test_image_processing_llama4.py index dfc39f4ff49..95f2b658b75 100644 --- a/tests/models/llama4/test_image_processing_llama4.py +++ b/tests/models/llama4/test_image_processing_llama4.py @@ -16,14 +16,11 @@ import unittest from transformers.testing_utils import require_torch, require_vision -from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available +from transformers.utils import is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs -if is_torch_available(): - pass - if is_vision_available() and is_torchvision_available(): from transformers import Llama4ImageProcessorFast diff --git a/tests/models/llava/test_processor_llava.py b/tests/models/llava/test_processor_llava.py index 51ed955b845..d89601d78bd 100644 --- a/tests/models/llava/test_processor_llava.py +++ b/tests/models/llava/test_processor_llava.py @@ -18,7 +18,7 @@ import unittest from transformers import AutoProcessor, AutoTokenizer, LlamaTokenizerFast, LlavaProcessor from transformers.testing_utils import require_vision -from transformers.utils import is_torch_available, is_vision_available +from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin @@ -26,9 +26,6 @@ from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import CLIPImageProcessor -if is_torch_available: - pass - @require_vision class LlavaProcessorTest(ProcessorTesterMixin, unittest.TestCase): diff --git a/tests/models/llava_next_video/test_processor_llava_next_video.py b/tests/models/llava_next_video/test_processor_llava_next_video.py index 49fa33ffc14..b902b8c496c 100644 --- a/tests/models/llava_next_video/test_processor_llava_next_video.py +++ b/tests/models/llava_next_video/test_processor_llava_next_video.py @@ -21,7 +21,7 @@ import torch from transformers import AutoProcessor, LlamaTokenizerFast, LlavaNextVideoProcessor from transformers.testing_utils import require_vision -from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available +from transformers.utils import is_torchvision_available, is_vision_available from ...test_processing_common import ProcessorTesterMixin @@ -32,9 +32,6 @@ if is_vision_available(): if is_torchvision_available(): from transformers import LlavaNextVideoVideoProcessor -if is_torch_available: - pass - @require_vision class LlavaNextVideoProcessorTest(ProcessorTesterMixin, unittest.TestCase): diff --git a/tests/models/llava_next_video/test_video_processing_llava_next_video.py b/tests/models/llava_next_video/test_video_processing_llava_next_video.py index aaba4b33c0d..d0ee9658b43 100644 --- a/tests/models/llava_next_video/test_video_processing_llava_next_video.py +++ b/tests/models/llava_next_video/test_video_processing_llava_next_video.py @@ -17,14 +17,11 @@ import unittest from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from transformers.testing_utils import require_torch, require_vision -from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available +from transformers.utils import is_torchvision_available, is_vision_available from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs -if is_torch_available(): - pass - if is_vision_available(): if is_torchvision_available(): from transformers import LlavaNextVideoVideoProcessor diff --git a/tests/models/llava_onevision/test_processor_llava_onevision.py b/tests/models/llava_onevision/test_processor_llava_onevision.py index d4bd5f00025..52f2b99f92a 100644 --- a/tests/models/llava_onevision/test_processor_llava_onevision.py +++ b/tests/models/llava_onevision/test_processor_llava_onevision.py @@ -20,7 +20,7 @@ import unittest import torch from transformers.testing_utils import require_torch, require_vision -from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available +from transformers.utils import is_torchvision_available, is_vision_available from ...test_processing_common import ProcessorTesterMixin @@ -36,9 +36,6 @@ if is_vision_available(): if is_torchvision_available(): from transformers import LlavaOnevisionVideoProcessor -if is_torch_available: - pass - @require_vision @require_torch diff --git a/tests/models/llava_onevision/test_video_processing_llava_onevision.py b/tests/models/llava_onevision/test_video_processing_llava_onevision.py index 9f05ab6d264..386b288ff0a 100644 --- a/tests/models/llava_onevision/test_video_processing_llava_onevision.py +++ b/tests/models/llava_onevision/test_video_processing_llava_onevision.py @@ -17,14 +17,11 @@ import unittest from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from transformers.testing_utils import require_torch, require_vision -from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available +from transformers.utils import is_torchvision_available, is_vision_available from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs -if is_torch_available(): - pass - if is_vision_available(): if is_torchvision_available(): from transformers import LlavaOnevisionVideoProcessor diff --git a/tests/models/mistral3/test_processor_mistral3.py b/tests/models/mistral3/test_processor_mistral3.py index 00dca742c6b..d000b7854cb 100644 --- a/tests/models/mistral3/test_processor_mistral3.py +++ b/tests/models/mistral3/test_processor_mistral3.py @@ -20,7 +20,7 @@ import numpy as np from transformers import PixtralProcessor from transformers.testing_utils import require_vision -from transformers.utils import is_torch_available, is_vision_available +from transformers.utils import is_torch_available from ...test_processing_common import ProcessorTesterMixin @@ -29,10 +29,6 @@ if is_torch_available(): import torch -if is_vision_available(): - pass - - @require_vision class Mistral3ProcessorTest(ProcessorTesterMixin, unittest.TestCase): """This tests Pixtral processor with the new `spatial_merge_size` argument in Mistral3.""" diff --git a/tests/models/paligemma2/test_modeling_paligemma2.py b/tests/models/paligemma2/test_modeling_paligemma2.py index c9a53efa14a..d40a6ec17e0 100644 --- a/tests/models/paligemma2/test_modeling_paligemma2.py +++ b/tests/models/paligemma2/test_modeling_paligemma2.py @@ -23,7 +23,6 @@ from transformers import ( PaliGemmaConfig, PaliGemmaForConditionalGeneration, is_torch_available, - is_vision_available, ) from transformers.testing_utils import ( is_flaky, @@ -40,10 +39,6 @@ if is_torch_available(): import torch -if is_vision_available(): - pass - - class PaliGemma2VisionText2TextModelTester: def __init__( self, diff --git a/tests/models/qwen2_audio/test_processor_qwen2_audio.py b/tests/models/qwen2_audio/test_processor_qwen2_audio.py index 007b0d3c7ab..95832c72c2f 100644 --- a/tests/models/qwen2_audio/test_processor_qwen2_audio.py +++ b/tests/models/qwen2_audio/test_processor_qwen2_audio.py @@ -17,15 +17,10 @@ import unittest from transformers import AutoProcessor, AutoTokenizer, Qwen2AudioProcessor, WhisperFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio -from transformers.utils import is_torch_available from ...test_processing_common import ProcessorTesterMixin -if is_torch_available: - pass - - @require_torch @require_torchaudio class Qwen2AudioProcessorTest(ProcessorTesterMixin, unittest.TestCase): diff --git a/tests/models/timesfm/test_modeling_timesfm.py b/tests/models/timesfm/test_modeling_timesfm.py index 486e55daf7f..c38f38f3d8b 100644 --- a/tests/models/timesfm/test_modeling_timesfm.py +++ b/tests/models/timesfm/test_modeling_timesfm.py @@ -21,15 +21,11 @@ import torch from transformers import TimesFmConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device -from transformers.utils import is_torch_fx_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin -if is_torch_fx_available(): - pass - if is_torch_available(): from transformers import TimesFmModelForPrediction diff --git a/tests/models/video_llava/test_video_processing_video_llava.py b/tests/models/video_llava/test_video_processing_video_llava.py index 95fd6bb49b9..70784beca4c 100644 --- a/tests/models/video_llava/test_video_processing_video_llava.py +++ b/tests/models/video_llava/test_video_processing_video_llava.py @@ -17,14 +17,11 @@ import unittest from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from transformers.testing_utils import require_torch, require_vision -from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available +from transformers.utils import is_torchvision_available, is_vision_available from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs -if is_torch_available(): - pass - if is_vision_available(): if is_torchvision_available(): from transformers import VideoLlavaVideoProcessor diff --git a/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py b/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py index dec4e7adfa4..64ff79a68ec 100644 --- a/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py +++ b/tests/models/vitpose_backbone/test_modeling_vitpose_backbone.py @@ -18,7 +18,7 @@ import unittest from transformers import VitPoseBackboneConfig from transformers.testing_utils import require_torch, torch_device -from transformers.utils import is_torch_available, is_vision_available +from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester @@ -31,10 +31,6 @@ if is_torch_available(): from transformers import VitPoseBackbone -if is_vision_available(): - pass - - class VitPoseBackboneModelTester: def __init__( self,