diff --git a/src/transformers/commands/add_new_model_like.py b/src/transformers/commands/add_new_model_like.py index 5dd5e7dcb82..30d273bec50 100644 --- a/src/transformers/commands/add_new_model_like.py +++ b/src/transformers/commands/add_new_model_like.py @@ -23,9 +23,8 @@ from itertools import chain from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Pattern, Tuple, Union -import transformers.models.auto as auto_module -from transformers.models.auto.configuration_auto import model_type_to_module_name - +from ..models import auto as auto_module +from ..models.auto.configuration_auto import model_type_to_module_name from ..utils import is_flax_available, is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand diff --git a/src/transformers/image_transforms.py b/src/transformers/image_transforms.py index d09f29b7904..0ae19c43c74 100644 --- a/src/transformers/image_transforms.py +++ b/src/transformers/image_transforms.py @@ -18,7 +18,7 @@ from typing import Iterable, List, Optional, Tuple, Union import numpy as np -from transformers.image_utils import ( +from .image_utils import ( ChannelDimension, ImageInput, get_channel_dimension_axis, @@ -26,8 +26,8 @@ from transformers.image_utils import ( infer_channel_dimension_format, to_numpy_array, ) -from transformers.utils import ExplicitEnum, TensorType, is_jax_tensor, is_tf_tensor, is_torch_tensor -from transformers.utils.import_utils import ( +from .utils import ExplicitEnum, TensorType, is_jax_tensor, is_tf_tensor, is_torch_tensor +from .utils.import_utils import ( is_flax_available, is_tf_available, is_torch_available, diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index 5c73a387d2b..29c4d67510c 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -33,8 +33,6 @@ import tensorflow as tf from huggingface_hub import Repository, list_repo_files from packaging.version import parse -from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files - from . import DataCollatorWithPadding, DefaultDataCollator from .activations_tf import get_tf_activation from .configuration_utils import PretrainedConfig @@ -63,6 +61,7 @@ from .utils import ( requires_backends, working_or_temp_dir, ) +from .utils.hub import convert_file_size_to_int, get_checkpoint_shard_files if parse(tf.__version__) >= parse("2.11.0"): diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 73e6cf00ef8..e7b0aab459f 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -32,9 +32,6 @@ from packaging import version from torch import Tensor, nn from torch.nn import CrossEntropyLoss -from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files -from transformers.utils.import_utils import ENV_VARS_TRUE_VALUES, is_sagemaker_mp_enabled - from .activations import get_activation from .configuration_utils import PretrainedConfig from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled @@ -73,7 +70,8 @@ from .utils import ( logging, replace_return_docstrings, ) -from .utils.import_utils import importlib_metadata +from .utils.hub import convert_file_size_to_int, get_checkpoint_shard_files +from .utils.import_utils import ENV_VARS_TRUE_VALUES, importlib_metadata, is_sagemaker_mp_enabled from .utils.quantization_config import BitsAndBytesConfig from .utils.versions import require_version_core diff --git a/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py index 8823a86fc8c..eecada8b432 100644 --- a/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py @@ -19,8 +19,8 @@ import argparse import torch -from transformers import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert -from transformers.utils import logging +from ...utils import logging +from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() diff --git a/src/transformers/models/beit/image_processing_beit.py b/src/transformers/models/beit/image_processing_beit.py index 6f73679a71b..4fafc5fda66 100644 --- a/src/transformers/models/beit/image_processing_beit.py +++ b/src/transformers/models/beit/image_processing_beit.py @@ -19,9 +19,6 @@ from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np -from transformers.utils import is_torch_available, is_torch_tensor, is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( @@ -34,7 +31,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): diff --git a/src/transformers/models/bit/image_processing_bit.py b/src/transformers/models/bit/image_processing_bit.py index 0394ecb411e..66b308c1bcd 100644 --- a/src/transformers/models/bit/image_processing_bit.py +++ b/src/transformers/models/bit/image_processing_bit.py @@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, @@ -40,8 +38,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging -from ...utils.import_utils import is_vision_available +from ...utils import TensorType, is_vision_available, logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/blip/image_processing_blip.py b/src/transformers/models/blip/image_processing_blip.py index 539d6d19860..59ea4ac7798 100644 --- a/src/transformers/models/blip/image_processing_blip.py +++ b/src/transformers/models/blip/image_processing_blip.py @@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers.utils import is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( @@ -33,7 +30,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_vision_available, logging if is_vision_available(): diff --git a/src/transformers/models/blip/modeling_blip_text.py b/src/transformers/models/blip/modeling_blip_text.py index 3ec4a994d78..356d4e52719 100644 --- a/src/transformers/models/blip/modeling_blip_text.py +++ b/src/transformers/models/blip/modeling_blip_text.py @@ -22,20 +22,19 @@ import torch.utils.checkpoint from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss -from transformers.activations import ACT2FN -from transformers.modeling_outputs import ( +from ...activations import ACT2FN +from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, ) -from transformers.modeling_utils import ( +from ...modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) -from transformers.utils import logging - +from ...utils import logging from .configuration_blip import BlipTextConfig diff --git a/src/transformers/models/blip_2/configuration_blip_2.py b/src/transformers/models/blip_2/configuration_blip_2.py index 9db098adf1a..8a80510db9a 100644 --- a/src/transformers/models/blip_2/configuration_blip_2.py +++ b/src/transformers/models/blip_2/configuration_blip_2.py @@ -18,9 +18,8 @@ import copy import os from typing import Union -from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES - from ...configuration_utils import PretrainedConfig +from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING diff --git a/src/transformers/models/bloom/configuration_bloom.py b/src/transformers/models/bloom/configuration_bloom.py index f2ea93c1168..17395625e01 100644 --- a/src/transformers/models/bloom/configuration_bloom.py +++ b/src/transformers/models/bloom/configuration_bloom.py @@ -18,15 +18,13 @@ from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version -from transformers import is_torch_available - if TYPE_CHECKING: - from transformers import PreTrainedTokenizer, TensorType + from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec -from ...utils import logging +from ...utils import is_torch_available, logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/bridgetower/image_processing_bridgetower.py b/src/transformers/models/bridgetower/image_processing_bridgetower.py index 6cf988114e2..76e0876c76c 100644 --- a/src/transformers/models/bridgetower/image_processing_bridgetower.py +++ b/src/transformers/models/bridgetower/image_processing_bridgetower.py @@ -19,9 +19,6 @@ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import numpy as np -from transformers.utils import is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import PaddingMode, center_crop, normalize, pad, rescale, resize, to_channel_dimension_format from ...image_utils import ( @@ -36,7 +33,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_vision_available, logging if is_vision_available(): diff --git a/src/transformers/models/chinese_clip/image_processing_chinese_clip.py b/src/transformers/models/chinese_clip/image_processing_chinese_clip.py index a21372b7533..5aa4eafbbba 100644 --- a/src/transformers/models/chinese_clip/image_processing_chinese_clip.py +++ b/src/transformers/models/chinese_clip/image_processing_chinese_clip.py @@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, @@ -40,8 +38,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging -from ...utils.import_utils import is_vision_available +from ...utils import TensorType, is_vision_available, logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/clip/image_processing_clip.py b/src/transformers/models/clip/image_processing_clip.py index b06e121758f..f9b5f3edde7 100644 --- a/src/transformers/models/clip/image_processing_clip.py +++ b/src/transformers/models/clip/image_processing_clip.py @@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, @@ -40,8 +38,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging -from ...utils.import_utils import is_vision_available +from ...utils import TensorType, is_vision_available, logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/conditional_detr/image_processing_conditional_detr.py b/src/transformers/models/conditional_detr/image_processing_conditional_detr.py index 2a70e45edf0..cc4a24cb05d 100644 --- a/src/transformers/models/conditional_detr/image_processing_conditional_detr.py +++ b/src/transformers/models/conditional_detr/image_processing_conditional_detr.py @@ -22,9 +22,9 @@ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Un import numpy as np -from transformers.feature_extraction_utils import BatchFeature -from transformers.image_processing_utils import BaseImageProcessor, get_size_dict -from transformers.image_transforms import ( +from ...feature_extraction_utils import BatchFeature +from ...image_processing_utils import BaseImageProcessor, get_size_dict +from ...image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, @@ -36,7 +36,7 @@ from transformers.image_transforms import ( rgb_to_id, to_channel_dimension_format, ) -from transformers.image_utils import ( +from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, @@ -50,7 +50,9 @@ from transformers.image_utils import ( valid_coco_panoptic_annotations, valid_images, ) -from transformers.utils import ( +from ...utils import ( + ExplicitEnum, + TensorType, is_flax_available, is_jax_tensor, is_scipy_available, @@ -60,7 +62,6 @@ from transformers.utils import ( is_torch_tensor, is_vision_available, ) -from transformers.utils.generic import ExplicitEnum, TensorType if is_torch_available(): diff --git a/src/transformers/models/conditional_detr/modeling_conditional_detr.py b/src/transformers/models/conditional_detr/modeling_conditional_detr.py index 36e6e942158..eb67b1e25bd 100644 --- a/src/transformers/models/conditional_detr/modeling_conditional_detr.py +++ b/src/transformers/models/conditional_detr/modeling_conditional_detr.py @@ -49,7 +49,7 @@ if is_timm_available(): from timm import create_model if is_vision_available(): - from transformers.image_transforms import center_to_corners_format + from ...image_transforms import center_to_corners_format logger = logging.get_logger(__name__) diff --git a/src/transformers/models/convnext/image_processing_convnext.py b/src/transformers/models/convnext/image_processing_convnext.py index a46bdcfef75..6d1892ef87f 100644 --- a/src/transformers/models/convnext/image_processing_convnext.py +++ b/src/transformers/models/convnext/image_processing_convnext.py @@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers.utils import is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, @@ -40,7 +37,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_vision_available, logging if is_vision_available(): diff --git a/src/transformers/models/convnext/modeling_tf_convnext.py b/src/transformers/models/convnext/modeling_tf_convnext.py index 671de6ca9b4..00db1f0b788 100644 --- a/src/transformers/models/convnext/modeling_tf_convnext.py +++ b/src/transformers/models/convnext/modeling_tf_convnext.py @@ -20,8 +20,6 @@ from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf -from transformers import shape_list - from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput from ...modeling_tf_utils import ( @@ -32,6 +30,7 @@ from ...modeling_tf_utils import ( keras_serializable, unpack_inputs, ) +from ...tf_utils import shape_list from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_convnext import ConvNextConfig diff --git a/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py b/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py index 8bf8a885504..eca13d433f2 100644 --- a/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py +++ b/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py @@ -22,8 +22,6 @@ from typing import Dict, List, Optional, Tuple, Union import numpy as np import tensorflow as tf -from transformers.tf_utils import shape_list, stable_softmax - from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, @@ -39,6 +37,7 @@ from ...modeling_tf_utils import ( keras_serializable, unpack_inputs, ) +from ...tf_utils import shape_list, stable_softmax from ...utils import ( add_code_sample_docstrings, add_start_docstrings, diff --git a/src/transformers/models/deformable_detr/image_processing_deformable_detr.py b/src/transformers/models/deformable_detr/image_processing_deformable_detr.py index 4db2e27647a..87e6ef508c6 100644 --- a/src/transformers/models/deformable_detr/image_processing_deformable_detr.py +++ b/src/transformers/models/deformable_detr/image_processing_deformable_detr.py @@ -22,9 +22,9 @@ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Un import numpy as np -from transformers.feature_extraction_utils import BatchFeature -from transformers.image_processing_utils import BaseImageProcessor, get_size_dict -from transformers.image_transforms import ( +from ...feature_extraction_utils import BatchFeature +from ...image_processing_utils import BaseImageProcessor, get_size_dict +from ...image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, @@ -36,7 +36,7 @@ from transformers.image_transforms import ( rgb_to_id, to_channel_dimension_format, ) -from transformers.image_utils import ( +from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, @@ -50,7 +50,9 @@ from transformers.image_utils import ( valid_coco_panoptic_annotations, valid_images, ) -from transformers.utils import ( +from ...utils import ( + ExplicitEnum, + TensorType, is_flax_available, is_jax_tensor, is_scipy_available, @@ -60,7 +62,6 @@ from transformers.utils import ( is_torch_tensor, is_vision_available, ) -from transformers.utils.generic import ExplicitEnum, TensorType if is_torch_available(): diff --git a/src/transformers/models/deit/image_processing_deit.py b/src/transformers/models/deit/image_processing_deit.py index 77d7d2bb2ca..3f9d5ee2d0a 100644 --- a/src/transformers/models/deit/image_processing_deit.py +++ b/src/transformers/models/deit/image_processing_deit.py @@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers.utils import is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( @@ -33,7 +30,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_vision_available, logging if is_vision_available(): diff --git a/src/transformers/models/detr/image_processing_detr.py b/src/transformers/models/detr/image_processing_detr.py index 75132b9a2f3..71e80e8c748 100644 --- a/src/transformers/models/detr/image_processing_detr.py +++ b/src/transformers/models/detr/image_processing_detr.py @@ -22,8 +22,8 @@ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Un import numpy as np -from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict -from transformers.image_transforms import ( +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, @@ -35,7 +35,7 @@ from transformers.image_transforms import ( rgb_to_id, to_channel_dimension_format, ) -from transformers.image_utils import ( +from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, @@ -49,7 +49,9 @@ from transformers.image_utils import ( valid_coco_panoptic_annotations, valid_images, ) -from transformers.utils import ( +from ...utils import ( + ExplicitEnum, + TensorType, is_flax_available, is_jax_tensor, is_scipy_available, @@ -59,7 +61,6 @@ from transformers.utils import ( is_torch_tensor, is_vision_available, ) -from transformers.utils.generic import ExplicitEnum, TensorType if is_torch_available(): diff --git a/src/transformers/models/distilbert/modeling_distilbert.py b/src/transformers/models/distilbert/modeling_distilbert.py index 7dbf95b0692..84db89e0f4b 100755 --- a/src/transformers/models/distilbert/modeling_distilbert.py +++ b/src/transformers/models/distilbert/modeling_distilbert.py @@ -27,9 +27,8 @@ import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss -from transformers.configuration_utils import PretrainedConfig - from ...activations import get_activation +from ...configuration_utils import PretrainedConfig from ...deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import ( BaseModelOutput, diff --git a/src/transformers/models/dpt/image_processing_dpt.py b/src/transformers/models/dpt/image_processing_dpt.py index d6bcfe9c5e3..4f5dbc44507 100644 --- a/src/transformers/models/dpt/image_processing_dpt.py +++ b/src/transformers/models/dpt/image_processing_dpt.py @@ -19,9 +19,6 @@ from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np -from transformers.utils import is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( @@ -37,7 +34,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_vision_available, logging if is_torch_available(): diff --git a/src/transformers/models/efficientformer/image_processing_efficientformer.py b/src/transformers/models/efficientformer/image_processing_efficientformer.py index 5694fb166e3..81e3d798d11 100644 --- a/src/transformers/models/efficientformer/image_processing_efficientformer.py +++ b/src/transformers/models/efficientformer/image_processing_efficientformer.py @@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, @@ -39,7 +37,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/efficientnet/image_processing_efficientnet.py b/src/transformers/models/efficientnet/image_processing_efficientnet.py index 0769fb820e2..f4d2a88ee4d 100644 --- a/src/transformers/models/efficientnet/image_processing_efficientnet.py +++ b/src/transformers/models/efficientnet/image_processing_efficientnet.py @@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers.utils import is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( @@ -33,7 +30,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_vision_available, logging if is_vision_available(): diff --git a/src/transformers/models/flava/image_processing_flava.py b/src/transformers/models/flava/image_processing_flava.py index 433f09f6ad8..3b5755d9b4d 100644 --- a/src/transformers/models/flava/image_processing_flava.py +++ b/src/transformers/models/flava/image_processing_flava.py @@ -21,9 +21,6 @@ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import numpy as np -from transformers.utils import is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( @@ -36,7 +33,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_vision_available, logging if is_vision_available(): diff --git a/src/transformers/models/flava/modeling_flava.py b/src/transformers/models/flava/modeling_flava.py index 720e0659725..6681de3b322 100644 --- a/src/transformers/models/flava/modeling_flava.py +++ b/src/transformers/models/flava/modeling_flava.py @@ -24,13 +24,12 @@ import torch import torch.utils.checkpoint from torch import nn -from transformers.utils.doc import add_code_sample_docstrings - from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, + add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, diff --git a/src/transformers/models/glpn/image_processing_glpn.py b/src/transformers/models/glpn/image_processing_glpn.py index ba279c3c4ff..7b0f316d2b5 100644 --- a/src/transformers/models/glpn/image_processing_glpn.py +++ b/src/transformers/models/glpn/image_processing_glpn.py @@ -19,13 +19,17 @@ from typing import List, Optional, Union import numpy as np import PIL.Image -from transformers.image_utils import PILImageResampling -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format -from ...image_utils import ChannelDimension, get_image_size, make_list_of_images, to_numpy_array, valid_images -from ...utils import logging +from ...image_utils import ( + ChannelDimension, + PILImageResampling, + get_image_size, + make_list_of_images, + to_numpy_array, + valid_images, +) +from ...utils import TensorType, logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/gpt2/configuration_gpt2.py b/src/transformers/models/gpt2/configuration_gpt2.py index 4ad37c5fdf0..f1e2d0ae150 100644 --- a/src/transformers/models/gpt2/configuration_gpt2.py +++ b/src/transformers/models/gpt2/configuration_gpt2.py @@ -17,8 +17,7 @@ from collections import OrderedDict from typing import Any, List, Mapping, Optional -from transformers import PreTrainedTokenizer, TensorType, is_torch_available - +from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging diff --git a/src/transformers/models/hubert/modeling_hubert.py b/src/transformers/models/hubert/modeling_hubert.py index dc88f9359d4..cb51d0479f8 100755 --- a/src/transformers/models/hubert/modeling_hubert.py +++ b/src/transformers/models/hubert/modeling_hubert.py @@ -23,9 +23,8 @@ import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss -from transformers.deepspeed import is_deepspeed_zero3_enabled - from ...activations import ACT2FN +from ...deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import torch_int_div diff --git a/src/transformers/models/ibert/configuration_ibert.py b/src/transformers/models/ibert/configuration_ibert.py index fe46e3fca61..249061ceae3 100644 --- a/src/transformers/models/ibert/configuration_ibert.py +++ b/src/transformers/models/ibert/configuration_ibert.py @@ -18,9 +18,8 @@ from collections import OrderedDict from typing import Mapping -from transformers.onnx import OnnxConfig - from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig from ...utils import logging diff --git a/src/transformers/models/imagegpt/image_processing_imagegpt.py b/src/transformers/models/imagegpt/image_processing_imagegpt.py index 451b4a265fe..9cfa40078bd 100644 --- a/src/transformers/models/imagegpt/image_processing_imagegpt.py +++ b/src/transformers/models/imagegpt/image_processing_imagegpt.py @@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers.utils import is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( @@ -31,7 +28,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_vision_available, logging if is_vision_available(): diff --git a/src/transformers/models/jukebox/tokenization_jukebox.py b/src/transformers/models/jukebox/tokenization_jukebox.py index bd4d6721daf..63399adf162 100644 --- a/src/transformers/models/jukebox/tokenization_jukebox.py +++ b/src/transformers/models/jukebox/tokenization_jukebox.py @@ -25,11 +25,10 @@ from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex -from transformers.utils.generic import _is_jax, _is_numpy - from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging +from ...utils.generic import _is_jax, _is_numpy logger = logging.get_logger(__name__) diff --git a/src/transformers/models/layoutlm/configuration_layoutlm.py b/src/transformers/models/layoutlm/configuration_layoutlm.py index f297adea142..c6fc01b7650 100644 --- a/src/transformers/models/layoutlm/configuration_layoutlm.py +++ b/src/transformers/models/layoutlm/configuration_layoutlm.py @@ -16,11 +16,9 @@ from collections import OrderedDict from typing import Any, List, Mapping, Optional -from transformers import PretrainedConfig, PreTrainedTokenizer, TensorType - -from ... import is_torch_available +from ... import PretrainedConfig, PreTrainedTokenizer from ...onnx import OnnxConfig, PatchingSpec -from ...utils import logging +from ...utils import TensorType, is_torch_available, logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py b/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py index ca01b3670d0..6ad0968b612 100644 --- a/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py @@ -18,9 +18,6 @@ from typing import Dict, Optional, Union import numpy as np -from transformers.utils import is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( @@ -32,7 +29,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import is_pytesseract_available, logging, requires_backends +from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): diff --git a/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py b/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py index aec81de1a99..7152abf06c4 100644 --- a/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py @@ -18,9 +18,6 @@ from typing import Dict, Iterable, Optional, Union import numpy as np -from transformers.utils import is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( @@ -34,7 +31,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import is_pytesseract_available, logging, requires_backends +from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): diff --git a/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py b/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py index 56e4fa1e68a..db6618caaea 100644 --- a/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py @@ -24,18 +24,16 @@ import torch.nn.functional as F import torch.utils.checkpoint from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss -from transformers import apply_chunking_to_forward -from transformers.modeling_outputs import ( +from ...activations import ACT2FN +from ...modeling_outputs import ( BaseModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) -from transformers.modeling_utils import PreTrainedModel -from transformers.utils import logging - -from ...activations import ACT2FN -from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import apply_chunking_to_forward +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_layoutlmv3 import LayoutLMv3Config diff --git a/src/transformers/models/levit/image_processing_levit.py b/src/transformers/models/levit/image_processing_levit.py index 6aef221a16a..6e142729418 100644 --- a/src/transformers/models/levit/image_processing_levit.py +++ b/src/transformers/models/levit/image_processing_levit.py @@ -18,8 +18,6 @@ from typing import Dict, Iterable, List, Optional, Union import numpy as np -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, @@ -39,7 +37,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/lxmert/modeling_tf_lxmert.py b/src/transformers/models/lxmert/modeling_tf_lxmert.py index 5510c7ff845..9408188100f 100644 --- a/src/transformers/models/lxmert/modeling_tf_lxmert.py +++ b/src/transformers/models/lxmert/modeling_tf_lxmert.py @@ -23,8 +23,6 @@ from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf -from transformers.tf_utils import stable_softmax - from ...activations_tf import get_tf_activation from ...modeling_tf_utils import ( TFModelInputType, @@ -34,6 +32,7 @@ from ...modeling_tf_utils import ( shape_list, unpack_inputs, ) +from ...tf_utils import stable_softmax from ...utils import ( ModelOutput, add_code_sample_docstrings, diff --git a/src/transformers/models/markuplm/configuration_markuplm.py b/src/transformers/models/markuplm/configuration_markuplm.py index 935625b1fa1..1455150598a 100644 --- a/src/transformers/models/markuplm/configuration_markuplm.py +++ b/src/transformers/models/markuplm/configuration_markuplm.py @@ -14,9 +14,8 @@ # limitations under the License. """ MarkupLM model configuration""" -from transformers.utils import logging - from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/markuplm/modeling_markuplm.py b/src/transformers/models/markuplm/modeling_markuplm.py index 205e6aefc12..18ad4d6978e 100755 --- a/src/transformers/models/markuplm/modeling_markuplm.py +++ b/src/transformers/models/markuplm/modeling_markuplm.py @@ -23,13 +23,13 @@ import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss -from transformers.activations import ACT2FN -from transformers.file_utils import ( +from ...activations import ACT2FN +from ...file_utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from transformers.modeling_outputs import ( +from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, @@ -37,14 +37,13 @@ from transformers.modeling_outputs import ( SequenceClassifierOutput, TokenClassifierOutput, ) -from transformers.modeling_utils import ( +from ...modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) -from transformers.utils import logging - +from ...utils import logging from .configuration_markuplm import MarkupLMConfig diff --git a/src/transformers/models/mask2former/image_processing_mask2former.py b/src/transformers/models/mask2former/image_processing_mask2former.py index b27ef5207b3..07642faf24b 100644 --- a/src/transformers/models/mask2former/image_processing_mask2former.py +++ b/src/transformers/models/mask2former/image_processing_mask2former.py @@ -20,8 +20,8 @@ from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np -from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict -from transformers.image_transforms import ( +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import ( PaddingMode, get_resize_output_image_size, normalize, @@ -31,7 +31,7 @@ from transformers.image_transforms import ( to_channel_dimension_format, to_numpy_array, ) -from transformers.image_utils import ( +from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, @@ -40,7 +40,7 @@ from transformers.image_utils import ( is_batched, valid_images, ) -from transformers.utils import ( +from ...utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, TensorType, diff --git a/src/transformers/models/mask2former/modeling_mask2former.py b/src/transformers/models/mask2former/modeling_mask2former.py index ca9712639df..f8a9c3c1ae4 100644 --- a/src/transformers/models/mask2former/modeling_mask2former.py +++ b/src/transformers/models/mask2former/modeling_mask2former.py @@ -24,9 +24,7 @@ import numpy as np import torch from torch import Tensor, nn -from transformers import AutoBackbone, SwinConfig -from transformers.utils import logging - +from ... import AutoBackbone, SwinConfig from ...activations import ACT2FN from ...file_utils import ( ModelOutput, @@ -38,6 +36,7 @@ from ...file_utils import ( ) from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel +from ...utils import logging from .configuration_mask2former import Mask2FormerConfig diff --git a/src/transformers/models/maskformer/feature_extraction_maskformer.py b/src/transformers/models/maskformer/feature_extraction_maskformer.py index 26aff086afd..848c8e12829 100644 --- a/src/transformers/models/maskformer/feature_extraction_maskformer.py +++ b/src/transformers/models/maskformer/feature_extraction_maskformer.py @@ -16,8 +16,7 @@ import warnings -from transformers.utils import logging - +from ...utils import logging from .image_processing_maskformer import MaskFormerImageProcessor diff --git a/src/transformers/models/maskformer/image_processing_maskformer.py b/src/transformers/models/maskformer/image_processing_maskformer.py index 373bee1ab21..ef4314869b4 100644 --- a/src/transformers/models/maskformer/image_processing_maskformer.py +++ b/src/transformers/models/maskformer/image_processing_maskformer.py @@ -20,8 +20,8 @@ from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tupl import numpy as np -from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict -from transformers.image_transforms import ( +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import ( PaddingMode, get_resize_output_image_size, normalize, @@ -31,7 +31,7 @@ from transformers.image_transforms import ( to_channel_dimension_format, to_numpy_array, ) -from transformers.image_utils import ( +from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, @@ -40,7 +40,7 @@ from transformers.image_utils import ( make_list_of_images, valid_images, ) -from transformers.utils import ( +from ...utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, TensorType, diff --git a/src/transformers/models/maskformer/modeling_maskformer.py b/src/transformers/models/maskformer/modeling_maskformer.py index 1482b76f209..878b046c180 100644 --- a/src/transformers/models/maskformer/modeling_maskformer.py +++ b/src/transformers/models/maskformer/modeling_maskformer.py @@ -24,9 +24,7 @@ import numpy as np import torch from torch import Tensor, nn -from transformers import AutoBackbone -from transformers.utils import logging - +from ... import AutoBackbone from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel @@ -35,6 +33,7 @@ from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_scipy_available, + logging, replace_return_docstrings, requires_backends, ) diff --git a/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py b/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py index c332b96c86a..9ba6bb1685c 100644 --- a/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py +++ b/src/transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py @@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, @@ -39,7 +37,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py b/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py index 7b24547749a..be8020a7ea4 100644 --- a/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py +++ b/src/transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py @@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Tuple, Union import numpy as np -from transformers.utils import is_torch_available, is_torch_tensor -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, @@ -40,7 +37,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): diff --git a/src/transformers/models/mobilevit/image_processing_mobilevit.py b/src/transformers/models/mobilevit/image_processing_mobilevit.py index b600009c2ea..e121c2ae7ba 100644 --- a/src/transformers/models/mobilevit/image_processing_mobilevit.py +++ b/src/transformers/models/mobilevit/image_processing_mobilevit.py @@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Tuple, Union import numpy as np -from transformers.utils import is_torch_available, is_torch_tensor, is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, get_resize_output_image_size, rescale, resize, to_channel_dimension_format from ...image_utils import ( @@ -32,7 +29,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): diff --git a/src/transformers/models/nezha/configuration_nezha.py b/src/transformers/models/nezha/configuration_nezha.py index 8d191b7d96e..f41a9b2bf89 100644 --- a/src/transformers/models/nezha/configuration_nezha.py +++ b/src/transformers/models/nezha/configuration_nezha.py @@ -1,4 +1,4 @@ -from transformers import PretrainedConfig +from ... import PretrainedConfig NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP = { diff --git a/src/transformers/models/oneformer/image_processing_oneformer.py b/src/transformers/models/oneformer/image_processing_oneformer.py index 237a3dbad46..67eadd4e841 100644 --- a/src/transformers/models/oneformer/image_processing_oneformer.py +++ b/src/transformers/models/oneformer/image_processing_oneformer.py @@ -21,8 +21,8 @@ from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np from huggingface_hub import hf_hub_download -from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict -from transformers.image_transforms import ( +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import ( PaddingMode, get_resize_output_image_size, normalize, @@ -32,7 +32,7 @@ from transformers.image_transforms import ( to_channel_dimension_format, to_numpy_array, ) -from transformers.image_utils import ( +from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, @@ -41,7 +41,7 @@ from transformers.image_utils import ( make_list_of_images, valid_images, ) -from transformers.utils import ( +from ...utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, TensorType, diff --git a/src/transformers/models/oneformer/modeling_oneformer.py b/src/transformers/models/oneformer/modeling_oneformer.py index 8e41ff86921..6536a54ba5f 100644 --- a/src/transformers/models/oneformer/modeling_oneformer.py +++ b/src/transformers/models/oneformer/modeling_oneformer.py @@ -24,9 +24,7 @@ import torch from torch import Tensor, nn from torch.cuda.amp import autocast -from transformers import AutoBackbone -from transformers.utils import logging - +from ... import AutoBackbone from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel @@ -35,6 +33,7 @@ from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, is_scipy_available, + logging, replace_return_docstrings, requires_backends, ) diff --git a/src/transformers/models/oneformer/processing_oneformer.py b/src/transformers/models/oneformer/processing_oneformer.py index bc392a77c14..c4479110ae7 100644 --- a/src/transformers/models/oneformer/processing_oneformer.py +++ b/src/transformers/models/oneformer/processing_oneformer.py @@ -18,9 +18,8 @@ Image/Text processor class for OneFormer from typing import List -from transformers.utils import is_torch_available - from ...processing_utils import ProcessorMixin +from ...utils import is_torch_available if is_torch_available(): diff --git a/src/transformers/models/owlvit/image_processing_owlvit.py b/src/transformers/models/owlvit/image_processing_owlvit.py index cb5ffc4ddfd..2d86599fffe 100644 --- a/src/transformers/models/owlvit/image_processing_owlvit.py +++ b/src/transformers/models/owlvit/image_processing_owlvit.py @@ -19,8 +19,8 @@ from typing import Dict, List, Optional, Tuple, Union import numpy as np -from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict -from transformers.image_transforms import ( +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import ( center_crop, center_to_corners_format, normalize, @@ -29,7 +29,7 @@ from transformers.image_transforms import ( to_channel_dimension_format, to_numpy_array, ) -from transformers.image_utils import ( +from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, @@ -38,7 +38,7 @@ from transformers.image_utils import ( make_list_of_images, valid_images, ) -from transformers.utils import TensorType, is_torch_available, logging +from ...utils import TensorType, is_torch_available, logging if is_torch_available(): diff --git a/src/transformers/models/owlvit/processing_owlvit.py b/src/transformers/models/owlvit/processing_owlvit.py index 04b8c191acd..e41dc16354b 100644 --- a/src/transformers/models/owlvit/processing_owlvit.py +++ b/src/transformers/models/owlvit/processing_owlvit.py @@ -21,10 +21,9 @@ from typing import List import numpy as np -from transformers import is_flax_available, is_tf_available, is_torch_available - from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding +from ...utils import is_flax_available, is_tf_available, is_torch_available class OwlViTProcessor(ProcessorMixin): diff --git a/src/transformers/models/perceiver/image_processing_perceiver.py b/src/transformers/models/perceiver/image_processing_perceiver.py index 59b7fd5332b..ecfd7026156 100644 --- a/src/transformers/models/perceiver/image_processing_perceiver.py +++ b/src/transformers/models/perceiver/image_processing_perceiver.py @@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers.utils import is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( @@ -34,7 +31,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_vision_available, logging if is_vision_available(): diff --git a/src/transformers/models/poolformer/image_processing_poolformer.py b/src/transformers/models/poolformer/image_processing_poolformer.py index 2548c03da46..d92ffaa3ddc 100644 --- a/src/transformers/models/poolformer/image_processing_poolformer.py +++ b/src/transformers/models/poolformer/image_processing_poolformer.py @@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers import is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, @@ -40,7 +37,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_vision_available, logging if is_vision_available(): diff --git a/src/transformers/models/realm/retrieval_realm.py b/src/transformers/models/realm/retrieval_realm.py index 4bdf19454f0..c84e7af08f5 100644 --- a/src/transformers/models/realm/retrieval_realm.py +++ b/src/transformers/models/realm/retrieval_realm.py @@ -20,8 +20,7 @@ from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download -from transformers import AutoTokenizer - +from ... import AutoTokenizer from ...utils import logging diff --git a/src/transformers/models/segformer/image_processing_segformer.py b/src/transformers/models/segformer/image_processing_segformer.py index 1920c6668b0..36d171f8e2f 100644 --- a/src/transformers/models/segformer/image_processing_segformer.py +++ b/src/transformers/models/segformer/image_processing_segformer.py @@ -19,9 +19,6 @@ from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np -from transformers.utils import is_torch_available, is_torch_tensor, is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( @@ -34,7 +31,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): diff --git a/src/transformers/models/sew/modeling_sew.py b/src/transformers/models/sew/modeling_sew.py index 6b6d3487bb3..7f7a90a8a1e 100644 --- a/src/transformers/models/sew/modeling_sew.py +++ b/src/transformers/models/sew/modeling_sew.py @@ -24,9 +24,8 @@ import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss -from transformers.deepspeed import is_deepspeed_zero3_enabled - from ...activations import ACT2FN +from ...deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import torch_int_div diff --git a/src/transformers/models/sew_d/modeling_sew_d.py b/src/transformers/models/sew_d/modeling_sew_d.py index 5513fec19d5..02a8477d146 100644 --- a/src/transformers/models/sew_d/modeling_sew_d.py +++ b/src/transformers/models/sew_d/modeling_sew_d.py @@ -25,9 +25,8 @@ import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss, LayerNorm -from transformers.deepspeed import is_deepspeed_zero3_enabled - from ...activations import ACT2FN +from ...deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import softmax_backward_data, torch_int_div diff --git a/src/transformers/models/swin2sr/image_processing_swin2sr.py b/src/transformers/models/swin2sr/image_processing_swin2sr.py index 24ee846e459..5af2ed41063 100644 --- a/src/transformers/models/swin2sr/image_processing_swin2sr.py +++ b/src/transformers/models/swin2sr/image_processing_swin2sr.py @@ -18,12 +18,10 @@ from typing import Optional, Union import numpy as np -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images -from ...utils import logging +from ...utils import TensorType, logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/tvlt/feature_extraction_tvlt.py b/src/transformers/models/tvlt/feature_extraction_tvlt.py index 7911fa93719..ac219502f1b 100644 --- a/src/transformers/models/tvlt/feature_extraction_tvlt.py +++ b/src/transformers/models/tvlt/feature_extraction_tvlt.py @@ -20,8 +20,8 @@ from typing import List, Optional, Union import numpy as np from numpy.fft import fft -from transformers.feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor -from transformers.utils import TensorType, logging +from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor +from ...utils import TensorType, logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/tvlt/image_processing_tvlt.py b/src/transformers/models/tvlt/image_processing_tvlt.py index d01a49f3510..d07ca31e2ff 100644 --- a/src/transformers/models/tvlt/image_processing_tvlt.py +++ b/src/transformers/models/tvlt/image_processing_tvlt.py @@ -17,8 +17,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, @@ -38,7 +36,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/upernet/modeling_upernet.py b/src/transformers/models/upernet/modeling_upernet.py index 1c2d37b0f2e..a00866f77dc 100644 --- a/src/transformers/models/upernet/modeling_upernet.py +++ b/src/transformers/models/upernet/modeling_upernet.py @@ -20,8 +20,7 @@ import torch from torch import nn from torch.nn import CrossEntropyLoss -from transformers import AutoBackbone - +from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import BackboneMixin, PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings diff --git a/src/transformers/models/videomae/image_processing_videomae.py b/src/transformers/models/videomae/image_processing_videomae.py index e3edfdf00d2..3bc1ab5dd65 100644 --- a/src/transformers/models/videomae/image_processing_videomae.py +++ b/src/transformers/models/videomae/image_processing_videomae.py @@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers.utils import is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, @@ -40,7 +37,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_vision_available, logging if is_vision_available(): diff --git a/src/transformers/models/vilt/image_processing_vilt.py b/src/transformers/models/vilt/image_processing_vilt.py index 783197c7e95..87b6e682e93 100644 --- a/src/transformers/models/vilt/image_processing_vilt.py +++ b/src/transformers/models/vilt/image_processing_vilt.py @@ -19,9 +19,6 @@ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import numpy as np -from transformers.utils import is_vision_available -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import PaddingMode, normalize, pad, rescale, resize, to_channel_dimension_format from ...image_utils import ( @@ -36,7 +33,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, is_vision_available, logging if is_vision_available(): diff --git a/src/transformers/models/vit/image_processing_vit.py b/src/transformers/models/vit/image_processing_vit.py index 5ca0d932880..66d68e7f82d 100644 --- a/src/transformers/models/vit/image_processing_vit.py +++ b/src/transformers/models/vit/image_processing_vit.py @@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( @@ -32,7 +30,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging +from ...utils import TensorType, logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/vit_hybrid/image_processing_vit_hybrid.py b/src/transformers/models/vit_hybrid/image_processing_vit_hybrid.py index 1babc3677a9..a45ece6e5a3 100644 --- a/src/transformers/models/vit_hybrid/image_processing_vit_hybrid.py +++ b/src/transformers/models/vit_hybrid/image_processing_vit_hybrid.py @@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union import numpy as np -from transformers.utils.generic import TensorType - from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, @@ -40,8 +38,7 @@ from ...image_utils import ( to_numpy_array, valid_images, ) -from ...utils import logging -from ...utils.import_utils import is_vision_available +from ...utils import TensorType, is_vision_available, logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/yolos/image_processing_yolos.py b/src/transformers/models/yolos/image_processing_yolos.py index a8fb00aee5c..6b6de49c68a 100644 --- a/src/transformers/models/yolos/image_processing_yolos.py +++ b/src/transformers/models/yolos/image_processing_yolos.py @@ -20,9 +20,9 @@ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Un import numpy as np -from transformers.feature_extraction_utils import BatchFeature -from transformers.image_processing_utils import BaseImageProcessor, get_size_dict -from transformers.image_transforms import ( +from ...feature_extraction_utils import BatchFeature +from ...image_processing_utils import BaseImageProcessor, get_size_dict +from ...image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, @@ -34,7 +34,7 @@ from transformers.image_transforms import ( rgb_to_id, to_channel_dimension_format, ) -from transformers.image_utils import ( +from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, @@ -48,7 +48,9 @@ from transformers.image_utils import ( valid_coco_panoptic_annotations, valid_images, ) -from transformers.utils import ( +from ...utils import ( + ExplicitEnum, + TensorType, is_flax_available, is_jax_tensor, is_scipy_available, @@ -57,8 +59,8 @@ from transformers.utils import ( is_torch_available, is_torch_tensor, is_vision_available, + logging, ) -from transformers.utils.generic import ExplicitEnum, TensorType if is_torch_available(): @@ -74,6 +76,7 @@ if is_scipy_available(): import scipy.special import scipy.stats +logger = logging.get_logger(__name__) AnnotationType = Dict[str, Union[int, str, List[Dict]]] diff --git a/src/transformers/pipelines/pt_utils.py b/src/transformers/pipelines/pt_utils.py index a2ce6fc7f21..4a95d050ec8 100644 --- a/src/transformers/pipelines/pt_utils.py +++ b/src/transformers/pipelines/pt_utils.py @@ -2,7 +2,7 @@ import numpy as np import torch from torch.utils.data import Dataset, IterableDataset -from transformers.utils.generic import ModelOutput +from ..utils.generic import ModelOutput class PipelineDataset(Dataset): diff --git a/src/transformers/pipelines/text_generation.py b/src/transformers/pipelines/text_generation.py index 2800515c821..f95acf7d307 100644 --- a/src/transformers/pipelines/text_generation.py +++ b/src/transformers/pipelines/text_generation.py @@ -1,8 +1,7 @@ import enum import warnings -from transformers import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING - +from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline diff --git a/src/transformers/sagemaker/training_args_sm.py b/src/transformers/sagemaker/training_args_sm.py index e4a356a25b1..3daac7859b5 100644 --- a/src/transformers/sagemaker/training_args_sm.py +++ b/src/transformers/sagemaker/training_args_sm.py @@ -20,8 +20,8 @@ from dataclasses import dataclass, field import torch -from transformers.training_args import TrainingArguments -from transformers.utils import cached_property, is_sagemaker_dp_enabled, logging +from ..training_args import TrainingArguments +from ..utils import cached_property, is_sagemaker_dp_enabled, logging logger = logging.get_logger(__name__) diff --git a/src/transformers/utils/bitsandbytes.py b/src/transformers/utils/bitsandbytes.py index 9c183bf5d54..efd9abf6ce5 100644 --- a/src/transformers/utils/bitsandbytes.py +++ b/src/transformers/utils/bitsandbytes.py @@ -1,6 +1,6 @@ from copy import deepcopy -from transformers.utils import is_accelerate_available, is_bitsandbytes_available +from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 2bee24324ca..3403867eafe 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -49,8 +49,6 @@ from huggingface_hub.utils import ( ) from requests.exceptions import HTTPError -from transformers.utils.logging import tqdm - from . import __version__, logging from .generic import working_or_temp_dir from .import_utils import ( @@ -61,6 +59,7 @@ from .import_utils import ( is_torch_available, is_training_run_on_sagemaker, ) +from .logging import tqdm logger = logging.get_logger(__name__) # pylint: disable=invalid-name diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 45ee725778a..6ef639831b8 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -29,9 +29,8 @@ from typing import Any from packaging import version -from transformers.utils.versions import importlib_metadata - from . import logging +from .versions import importlib_metadata logger = logging.get_logger(__name__) # pylint: disable=invalid-name