Remove dead protected imports (#38980)

* remove them

* more
This commit is contained in:
Cyril Vallez 2025-06-23 13:44:50 +02:00 committed by GitHub
parent 74f5e4a1fa
commit 07aab1af1e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
24 changed files with 17 additions and 110 deletions

View File

@ -14,12 +14,6 @@
# limitations under the License.
"""ALIGN model configuration"""
from typing import TYPE_CHECKING
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging

View File

@ -52,13 +52,10 @@ from ...utils import (
can_return_tuple,
logging,
)
from ...utils.import_utils import is_causal_conv1d_available, is_flash_attn_2_available, is_mamba_2_ssm_available
from ...utils.import_utils import is_causal_conv1d_available, is_mamba_2_ssm_available
from .configuration_bamba import BambaConfig
if is_flash_attn_2_available():
pass
if is_mamba_2_ssm_available():
from mamba_ssm.ops.triton.selective_state_update import selective_state_update
from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined

View File

@ -15,11 +15,7 @@
"""CLVP model configuration"""
import os
from typing import TYPE_CHECKING, Union
if TYPE_CHECKING:
pass
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging

View File

@ -33,13 +33,10 @@ from transformers.models.sam.modeling_sam import SamMLPBlock, SamVisionAttention
from ...configuration_utils import PretrainedConfig
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...processing_utils import Unpack
from ...utils import auto_docstring, can_return_tuple, is_vision_available, logging
from ...utils import auto_docstring, can_return_tuple, logging
from ..auto import CONFIG_MAPPING, AutoConfig
if is_vision_available():
pass
logger = logging.get_logger(__name__)

View File

@ -14,11 +14,7 @@
# limitations under the License.
"""Mpt configuration"""
from typing import TYPE_CHECKING, Optional, Union
if TYPE_CHECKING:
pass
from typing import Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging

View File

@ -41,16 +41,12 @@ from ...image_utils import (
validate_preprocess_arguments,
)
from ...utils import TensorType, filter_out_non_signature_kwargs, logging
from ...utils.import_utils import is_cv2_available, is_vision_available
from ...utils.import_utils import is_vision_available
logger = logging.get_logger(__name__)
if is_cv2_available():
pass
if is_vision_available():
import PIL

View File

@ -25,7 +25,7 @@ from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...modeling_attn_mask_utils import AttentionMaskConverter
from ...modeling_flash_attention_utils import FlashAttentionKwargs, is_flash_attn_available
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
@ -44,10 +44,6 @@ if is_torch_flex_attn_available():
from ...integrations.flex_attention import make_flex_block_causal_mask
if is_flash_attn_available():
pass
logger = logging.get_logger(__name__)

View File

@ -14,12 +14,6 @@
# limitations under the License.
"""OWLv2 model configuration"""
from typing import TYPE_CHECKING
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging

View File

@ -32,15 +32,12 @@ from ...image_utils import (
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging, requires_backends
from ...utils import TensorType, is_torch_available, logging, requires_backends
if is_torch_available():
import torch
if is_vision_available():
pass
logger = logging.get_logger(__name__)

View File

@ -22,7 +22,6 @@ from transformers import (
AutoProcessor,
AyaVisionConfig,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
Expectations,
@ -51,10 +50,6 @@ if is_torch_available():
)
if is_vision_available():
pass
class AyaVisionVisionText2TextModelTester:
def __init__(
self,

View File

@ -17,14 +17,11 @@ import unittest
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_torch_available():
pass
if is_vision_available():
if is_torchvision_available():
from transformers import InternVLVideoProcessor

View File

@ -20,15 +20,10 @@ import unittest
import numpy as np
from transformers import AutoProcessor, AutoTokenizer, JanusProcessor
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
pass
class JanusProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = JanusProcessor

View File

@ -16,14 +16,11 @@
import unittest
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_torch_available():
pass
if is_vision_available() and is_torchvision_available():
from transformers import Llama4ImageProcessorFast

View File

@ -18,7 +18,7 @@ import unittest
from transformers import AutoProcessor, AutoTokenizer, LlamaTokenizerFast, LlavaProcessor
from transformers.testing_utils import require_vision
from transformers.utils import is_torch_available, is_vision_available
from transformers.utils import is_vision_available
from ...test_processing_common import ProcessorTesterMixin
@ -26,9 +26,6 @@ from ...test_processing_common import ProcessorTesterMixin
if is_vision_available():
from transformers import CLIPImageProcessor
if is_torch_available:
pass
@require_vision
class LlavaProcessorTest(ProcessorTesterMixin, unittest.TestCase):

View File

@ -21,7 +21,7 @@ import torch
from transformers import AutoProcessor, LlamaTokenizerFast, LlavaNextVideoProcessor
from transformers.testing_utils import require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
@ -32,9 +32,6 @@ if is_vision_available():
if is_torchvision_available():
from transformers import LlavaNextVideoVideoProcessor
if is_torch_available:
pass
@require_vision
class LlavaNextVideoProcessorTest(ProcessorTesterMixin, unittest.TestCase):

View File

@ -17,14 +17,11 @@ import unittest
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_torch_available():
pass
if is_vision_available():
if is_torchvision_available():
from transformers import LlavaNextVideoVideoProcessor

View File

@ -20,7 +20,7 @@ import unittest
import torch
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_processing_common import ProcessorTesterMixin
@ -36,9 +36,6 @@ if is_vision_available():
if is_torchvision_available():
from transformers import LlavaOnevisionVideoProcessor
if is_torch_available:
pass
@require_vision
@require_torch

View File

@ -17,14 +17,11 @@ import unittest
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_torch_available():
pass
if is_vision_available():
if is_torchvision_available():
from transformers import LlavaOnevisionVideoProcessor

View File

@ -20,7 +20,7 @@ import numpy as np
from transformers import PixtralProcessor
from transformers.testing_utils import require_vision
from transformers.utils import is_torch_available, is_vision_available
from transformers.utils import is_torch_available
from ...test_processing_common import ProcessorTesterMixin
@ -29,10 +29,6 @@ if is_torch_available():
import torch
if is_vision_available():
pass
@require_vision
class Mistral3ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
"""This tests Pixtral processor with the new `spatial_merge_size` argument in Mistral3."""

View File

@ -23,7 +23,6 @@ from transformers import (
PaliGemmaConfig,
PaliGemmaForConditionalGeneration,
is_torch_available,
is_vision_available,
)
from transformers.testing_utils import (
is_flaky,
@ -40,10 +39,6 @@ if is_torch_available():
import torch
if is_vision_available():
pass
class PaliGemma2VisionText2TextModelTester:
def __init__(
self,

View File

@ -17,15 +17,10 @@ import unittest
from transformers import AutoProcessor, AutoTokenizer, Qwen2AudioProcessor, WhisperFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils import is_torch_available
from ...test_processing_common import ProcessorTesterMixin
if is_torch_available:
pass
@require_torch
@require_torchaudio
class Qwen2AudioProcessorTest(ProcessorTesterMixin, unittest.TestCase):

View File

@ -21,15 +21,11 @@ import torch
from transformers import TimesFmConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import is_torch_fx_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
if is_torch_fx_available():
pass
if is_torch_available():
from transformers import TimesFmModelForPrediction

View File

@ -17,14 +17,11 @@ import unittest
from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available
from transformers.utils import is_torchvision_available, is_vision_available
from ...test_video_processing_common import VideoProcessingTestMixin, prepare_video_inputs
if is_torch_available():
pass
if is_vision_available():
if is_torchvision_available():
from transformers import VideoLlavaVideoProcessor

View File

@ -18,7 +18,7 @@ import unittest
from transformers import VitPoseBackboneConfig
from transformers.testing_utils import require_torch, torch_device
from transformers.utils import is_torch_available, is_vision_available
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
@ -31,10 +31,6 @@ if is_torch_available():
from transformers import VitPoseBackbone
if is_vision_available():
pass
class VitPoseBackboneModelTester:
def __init__(
self,