mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
[Refactor] Relative imports wherever we can (#21880)
* initial commit * update * second batch * style * fix imports * fix relative import on pipeline
This commit is contained in:
parent
43299c63ca
commit
633e5e89f7
@ -23,9 +23,8 @@ from itertools import chain
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Optional, Pattern, Tuple, Union
|
||||
|
||||
import transformers.models.auto as auto_module
|
||||
from transformers.models.auto.configuration_auto import model_type_to_module_name
|
||||
|
||||
from ..models import auto as auto_module
|
||||
from ..models.auto.configuration_auto import model_type_to_module_name
|
||||
from ..utils import is_flax_available, is_tf_available, is_torch_available, logging
|
||||
from . import BaseTransformersCLICommand
|
||||
|
||||
|
@ -18,7 +18,7 @@ from typing import Iterable, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.image_utils import (
|
||||
from .image_utils import (
|
||||
ChannelDimension,
|
||||
ImageInput,
|
||||
get_channel_dimension_axis,
|
||||
@ -26,8 +26,8 @@ from transformers.image_utils import (
|
||||
infer_channel_dimension_format,
|
||||
to_numpy_array,
|
||||
)
|
||||
from transformers.utils import ExplicitEnum, TensorType, is_jax_tensor, is_tf_tensor, is_torch_tensor
|
||||
from transformers.utils.import_utils import (
|
||||
from .utils import ExplicitEnum, TensorType, is_jax_tensor, is_tf_tensor, is_torch_tensor
|
||||
from .utils.import_utils import (
|
||||
is_flax_available,
|
||||
is_tf_available,
|
||||
is_torch_available,
|
||||
|
@ -33,8 +33,6 @@ import tensorflow as tf
|
||||
from huggingface_hub import Repository, list_repo_files
|
||||
from packaging.version import parse
|
||||
|
||||
from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files
|
||||
|
||||
from . import DataCollatorWithPadding, DefaultDataCollator
|
||||
from .activations_tf import get_tf_activation
|
||||
from .configuration_utils import PretrainedConfig
|
||||
@ -63,6 +61,7 @@ from .utils import (
|
||||
requires_backends,
|
||||
working_or_temp_dir,
|
||||
)
|
||||
from .utils.hub import convert_file_size_to_int, get_checkpoint_shard_files
|
||||
|
||||
|
||||
if parse(tf.__version__) >= parse("2.11.0"):
|
||||
|
@ -32,9 +32,6 @@ from packaging import version
|
||||
from torch import Tensor, nn
|
||||
from torch.nn import CrossEntropyLoss
|
||||
|
||||
from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files
|
||||
from transformers.utils.import_utils import ENV_VARS_TRUE_VALUES, is_sagemaker_mp_enabled
|
||||
|
||||
from .activations import get_activation
|
||||
from .configuration_utils import PretrainedConfig
|
||||
from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled
|
||||
@ -73,7 +70,8 @@ from .utils import (
|
||||
logging,
|
||||
replace_return_docstrings,
|
||||
)
|
||||
from .utils.import_utils import importlib_metadata
|
||||
from .utils.hub import convert_file_size_to_int, get_checkpoint_shard_files
|
||||
from .utils.import_utils import ENV_VARS_TRUE_VALUES, importlib_metadata, is_sagemaker_mp_enabled
|
||||
from .utils.quantization_config import BitsAndBytesConfig
|
||||
from .utils.versions import require_version_core
|
||||
|
||||
|
@ -19,8 +19,8 @@ import argparse
|
||||
|
||||
import torch
|
||||
|
||||
from transformers import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
|
||||
from transformers.utils import logging
|
||||
from ...utils import logging
|
||||
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
|
||||
|
||||
|
||||
logging.set_verbosity_info()
|
||||
|
@ -19,9 +19,6 @@ from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_torch_available, is_torch_tensor, is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
|
||||
from ...image_utils import (
|
||||
@ -34,7 +31,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
center_crop,
|
||||
@ -40,8 +38,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils.import_utils import is_vision_available
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
|
||||
from ...image_utils import (
|
||||
@ -33,7 +30,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -22,20 +22,19 @@ import torch.utils.checkpoint
|
||||
from torch import Tensor, device, nn
|
||||
from torch.nn import CrossEntropyLoss
|
||||
|
||||
from transformers.activations import ACT2FN
|
||||
from transformers.modeling_outputs import (
|
||||
from ...activations import ACT2FN
|
||||
from ...modeling_outputs import (
|
||||
BaseModelOutputWithPastAndCrossAttentions,
|
||||
BaseModelOutputWithPoolingAndCrossAttentions,
|
||||
CausalLMOutputWithCrossAttentions,
|
||||
)
|
||||
from transformers.modeling_utils import (
|
||||
from ...modeling_utils import (
|
||||
PreTrainedModel,
|
||||
apply_chunking_to_forward,
|
||||
find_pruneable_heads_and_indices,
|
||||
prune_linear_layer,
|
||||
)
|
||||
from transformers.utils import logging
|
||||
|
||||
from ...utils import logging
|
||||
from .configuration_blip import BlipTextConfig
|
||||
|
||||
|
||||
|
@ -18,9 +18,8 @@ import copy
|
||||
import os
|
||||
from typing import Union
|
||||
|
||||
from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
|
||||
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
|
||||
from ...utils import logging
|
||||
from ..auto import CONFIG_MAPPING
|
||||
|
||||
|
@ -18,15 +18,13 @@ from typing import TYPE_CHECKING, Any, List, Mapping, Optional
|
||||
|
||||
from packaging import version
|
||||
|
||||
from transformers import is_torch_available
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from transformers import PreTrainedTokenizer, TensorType
|
||||
from ... import PreTrainedTokenizer, TensorType
|
||||
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...onnx import OnnxConfigWithPast, PatchingSpec
|
||||
from ...utils import logging
|
||||
from ...utils import is_torch_available, logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -19,9 +19,6 @@ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import PaddingMode, center_crop, normalize, pad, rescale, resize, to_channel_dimension_format
|
||||
from ...image_utils import (
|
||||
@ -36,7 +33,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
center_crop,
|
||||
@ -40,8 +38,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils.import_utils import is_vision_available
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
center_crop,
|
||||
@ -40,8 +38,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils.import_utils import is_vision_available
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -22,9 +22,9 @@ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Un
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.feature_extraction_utils import BatchFeature
|
||||
from transformers.image_processing_utils import BaseImageProcessor, get_size_dict
|
||||
from transformers.image_transforms import (
|
||||
from ...feature_extraction_utils import BatchFeature
|
||||
from ...image_processing_utils import BaseImageProcessor, get_size_dict
|
||||
from ...image_transforms import (
|
||||
PaddingMode,
|
||||
center_to_corners_format,
|
||||
corners_to_center_format,
|
||||
@ -36,7 +36,7 @@ from transformers.image_transforms import (
|
||||
rgb_to_id,
|
||||
to_channel_dimension_format,
|
||||
)
|
||||
from transformers.image_utils import (
|
||||
from ...image_utils import (
|
||||
IMAGENET_DEFAULT_MEAN,
|
||||
IMAGENET_DEFAULT_STD,
|
||||
ChannelDimension,
|
||||
@ -50,7 +50,9 @@ from transformers.image_utils import (
|
||||
valid_coco_panoptic_annotations,
|
||||
valid_images,
|
||||
)
|
||||
from transformers.utils import (
|
||||
from ...utils import (
|
||||
ExplicitEnum,
|
||||
TensorType,
|
||||
is_flax_available,
|
||||
is_jax_tensor,
|
||||
is_scipy_available,
|
||||
@ -60,7 +62,6 @@ from transformers.utils import (
|
||||
is_torch_tensor,
|
||||
is_vision_available,
|
||||
)
|
||||
from transformers.utils.generic import ExplicitEnum, TensorType
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
|
@ -49,7 +49,7 @@ if is_timm_available():
|
||||
from timm import create_model
|
||||
|
||||
if is_vision_available():
|
||||
from transformers.image_transforms import center_to_corners_format
|
||||
from ...image_transforms import center_to_corners_format
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
center_crop,
|
||||
@ -40,7 +37,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -20,8 +20,6 @@ from typing import Dict, Optional, Tuple, Union
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from transformers import shape_list
|
||||
|
||||
from ...activations_tf import get_tf_activation
|
||||
from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput
|
||||
from ...modeling_tf_utils import (
|
||||
@ -32,6 +30,7 @@ from ...modeling_tf_utils import (
|
||||
keras_serializable,
|
||||
unpack_inputs,
|
||||
)
|
||||
from ...tf_utils import shape_list
|
||||
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
||||
from .configuration_convnext import ConvNextConfig
|
||||
|
||||
|
@ -22,8 +22,6 @@ from typing import Dict, List, Optional, Tuple, Union
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from transformers.tf_utils import shape_list, stable_softmax
|
||||
|
||||
from ...activations_tf import get_tf_activation
|
||||
from ...modeling_tf_outputs import (
|
||||
TFBaseModelOutput,
|
||||
@ -39,6 +37,7 @@ from ...modeling_tf_utils import (
|
||||
keras_serializable,
|
||||
unpack_inputs,
|
||||
)
|
||||
from ...tf_utils import shape_list, stable_softmax
|
||||
from ...utils import (
|
||||
add_code_sample_docstrings,
|
||||
add_start_docstrings,
|
||||
|
@ -22,9 +22,9 @@ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Un
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.feature_extraction_utils import BatchFeature
|
||||
from transformers.image_processing_utils import BaseImageProcessor, get_size_dict
|
||||
from transformers.image_transforms import (
|
||||
from ...feature_extraction_utils import BatchFeature
|
||||
from ...image_processing_utils import BaseImageProcessor, get_size_dict
|
||||
from ...image_transforms import (
|
||||
PaddingMode,
|
||||
center_to_corners_format,
|
||||
corners_to_center_format,
|
||||
@ -36,7 +36,7 @@ from transformers.image_transforms import (
|
||||
rgb_to_id,
|
||||
to_channel_dimension_format,
|
||||
)
|
||||
from transformers.image_utils import (
|
||||
from ...image_utils import (
|
||||
IMAGENET_DEFAULT_MEAN,
|
||||
IMAGENET_DEFAULT_STD,
|
||||
ChannelDimension,
|
||||
@ -50,7 +50,9 @@ from transformers.image_utils import (
|
||||
valid_coco_panoptic_annotations,
|
||||
valid_images,
|
||||
)
|
||||
from transformers.utils import (
|
||||
from ...utils import (
|
||||
ExplicitEnum,
|
||||
TensorType,
|
||||
is_flax_available,
|
||||
is_jax_tensor,
|
||||
is_scipy_available,
|
||||
@ -60,7 +62,6 @@ from transformers.utils import (
|
||||
is_torch_tensor,
|
||||
is_vision_available,
|
||||
)
|
||||
from transformers.utils.generic import ExplicitEnum, TensorType
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
|
@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
|
||||
from ...image_utils import (
|
||||
@ -33,7 +30,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -22,8 +22,8 @@ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Un
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from transformers.image_transforms import (
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
PaddingMode,
|
||||
center_to_corners_format,
|
||||
corners_to_center_format,
|
||||
@ -35,7 +35,7 @@ from transformers.image_transforms import (
|
||||
rgb_to_id,
|
||||
to_channel_dimension_format,
|
||||
)
|
||||
from transformers.image_utils import (
|
||||
from ...image_utils import (
|
||||
IMAGENET_DEFAULT_MEAN,
|
||||
IMAGENET_DEFAULT_STD,
|
||||
ChannelDimension,
|
||||
@ -49,7 +49,9 @@ from transformers.image_utils import (
|
||||
valid_coco_panoptic_annotations,
|
||||
valid_images,
|
||||
)
|
||||
from transformers.utils import (
|
||||
from ...utils import (
|
||||
ExplicitEnum,
|
||||
TensorType,
|
||||
is_flax_available,
|
||||
is_jax_tensor,
|
||||
is_scipy_available,
|
||||
@ -59,7 +61,6 @@ from transformers.utils import (
|
||||
is_torch_tensor,
|
||||
is_vision_available,
|
||||
)
|
||||
from transformers.utils.generic import ExplicitEnum, TensorType
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
|
@ -27,9 +27,8 @@ import torch
|
||||
from torch import nn
|
||||
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
||||
|
||||
from transformers.configuration_utils import PretrainedConfig
|
||||
|
||||
from ...activations import get_activation
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...deepspeed import is_deepspeed_zero3_enabled
|
||||
from ...modeling_outputs import (
|
||||
BaseModelOutput,
|
||||
|
@ -19,9 +19,6 @@ from typing import Dict, Iterable, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
|
||||
from ...image_utils import (
|
||||
@ -37,7 +34,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
|
@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
center_crop,
|
||||
@ -39,7 +37,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
|
||||
from ...image_utils import (
|
||||
@ -33,7 +30,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -21,9 +21,6 @@ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
|
||||
from ...image_utils import (
|
||||
@ -36,7 +33,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -24,13 +24,12 @@ import torch
|
||||
import torch.utils.checkpoint
|
||||
from torch import nn
|
||||
|
||||
from transformers.utils.doc import add_code_sample_docstrings
|
||||
|
||||
from ...activations import ACT2FN
|
||||
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
|
||||
from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
|
||||
from ...utils import (
|
||||
ModelOutput,
|
||||
add_code_sample_docstrings,
|
||||
add_start_docstrings,
|
||||
add_start_docstrings_to_model_forward,
|
||||
logging,
|
||||
|
@ -19,13 +19,17 @@ from typing import List, Optional, Union
|
||||
import numpy as np
|
||||
import PIL.Image
|
||||
|
||||
from transformers.image_utils import PILImageResampling
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature
|
||||
from ...image_transforms import rescale, resize, to_channel_dimension_format
|
||||
from ...image_utils import ChannelDimension, get_image_size, make_list_of_images, to_numpy_array, valid_images
|
||||
from ...utils import logging
|
||||
from ...image_utils import (
|
||||
ChannelDimension,
|
||||
PILImageResampling,
|
||||
get_image_size,
|
||||
make_list_of_images,
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import TensorType, logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -17,8 +17,7 @@
|
||||
from collections import OrderedDict
|
||||
from typing import Any, List, Mapping, Optional
|
||||
|
||||
from transformers import PreTrainedTokenizer, TensorType, is_torch_available
|
||||
|
||||
from ... import PreTrainedTokenizer, TensorType, is_torch_available
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...onnx import OnnxConfigWithPast, PatchingSpec
|
||||
from ...utils import logging
|
||||
|
@ -23,9 +23,8 @@ import torch.utils.checkpoint
|
||||
from torch import nn
|
||||
from torch.nn import CrossEntropyLoss
|
||||
|
||||
from transformers.deepspeed import is_deepspeed_zero3_enabled
|
||||
|
||||
from ...activations import ACT2FN
|
||||
from ...deepspeed import is_deepspeed_zero3_enabled
|
||||
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
|
||||
from ...modeling_utils import PreTrainedModel
|
||||
from ...pytorch_utils import torch_int_div
|
||||
|
@ -18,9 +18,8 @@
|
||||
from collections import OrderedDict
|
||||
from typing import Mapping
|
||||
|
||||
from transformers.onnx import OnnxConfig
|
||||
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...onnx import OnnxConfig
|
||||
from ...utils import logging
|
||||
|
||||
|
||||
|
@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import rescale, resize, to_channel_dimension_format
|
||||
from ...image_utils import (
|
||||
@ -31,7 +28,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -25,11 +25,10 @@ from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
import numpy as np
|
||||
import regex
|
||||
|
||||
from transformers.utils.generic import _is_jax, _is_numpy
|
||||
|
||||
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
|
||||
from ...tokenization_utils_base import BatchEncoding
|
||||
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
|
||||
from ...utils.generic import _is_jax, _is_numpy
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -16,11 +16,9 @@
|
||||
from collections import OrderedDict
|
||||
from typing import Any, List, Mapping, Optional
|
||||
|
||||
from transformers import PretrainedConfig, PreTrainedTokenizer, TensorType
|
||||
|
||||
from ... import is_torch_available
|
||||
from ... import PretrainedConfig, PreTrainedTokenizer
|
||||
from ...onnx import OnnxConfig, PatchingSpec
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_torch_available, logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -18,9 +18,6 @@ from typing import Dict, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import resize, to_channel_dimension_format, to_pil_image
|
||||
from ...image_utils import (
|
||||
@ -32,7 +29,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import is_pytesseract_available, logging, requires_backends
|
||||
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -18,9 +18,6 @@ from typing import Dict, Iterable, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
|
||||
from ...image_utils import (
|
||||
@ -34,7 +31,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import is_pytesseract_available, logging, requires_backends
|
||||
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -24,18 +24,16 @@ import torch.nn.functional as F
|
||||
import torch.utils.checkpoint
|
||||
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
||||
|
||||
from transformers import apply_chunking_to_forward
|
||||
from transformers.modeling_outputs import (
|
||||
from ...activations import ACT2FN
|
||||
from ...modeling_outputs import (
|
||||
BaseModelOutput,
|
||||
QuestionAnsweringModelOutput,
|
||||
SequenceClassifierOutput,
|
||||
TokenClassifierOutput,
|
||||
)
|
||||
from transformers.modeling_utils import PreTrainedModel
|
||||
from transformers.utils import logging
|
||||
|
||||
from ...activations import ACT2FN
|
||||
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
|
||||
from ...modeling_utils import PreTrainedModel
|
||||
from ...pytorch_utils import apply_chunking_to_forward
|
||||
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
|
||||
from .configuration_layoutlmv3 import LayoutLMv3Config
|
||||
|
||||
|
||||
|
@ -18,8 +18,6 @@ from typing import Dict, Iterable, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
center_crop,
|
||||
@ -39,7 +37,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -23,8 +23,6 @@ from typing import Dict, Optional, Tuple, Union
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
from transformers.tf_utils import stable_softmax
|
||||
|
||||
from ...activations_tf import get_tf_activation
|
||||
from ...modeling_tf_utils import (
|
||||
TFModelInputType,
|
||||
@ -34,6 +32,7 @@ from ...modeling_tf_utils import (
|
||||
shape_list,
|
||||
unpack_inputs,
|
||||
)
|
||||
from ...tf_utils import stable_softmax
|
||||
from ...utils import (
|
||||
ModelOutput,
|
||||
add_code_sample_docstrings,
|
||||
|
@ -14,9 +14,8 @@
|
||||
# limitations under the License.
|
||||
""" MarkupLM model configuration"""
|
||||
|
||||
from transformers.utils import logging
|
||||
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -23,13 +23,13 @@ import torch.utils.checkpoint
|
||||
from torch import nn
|
||||
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
||||
|
||||
from transformers.activations import ACT2FN
|
||||
from transformers.file_utils import (
|
||||
from ...activations import ACT2FN
|
||||
from ...file_utils import (
|
||||
add_start_docstrings,
|
||||
add_start_docstrings_to_model_forward,
|
||||
replace_return_docstrings,
|
||||
)
|
||||
from transformers.modeling_outputs import (
|
||||
from ...modeling_outputs import (
|
||||
BaseModelOutputWithPastAndCrossAttentions,
|
||||
BaseModelOutputWithPoolingAndCrossAttentions,
|
||||
MaskedLMOutput,
|
||||
@ -37,14 +37,13 @@ from transformers.modeling_outputs import (
|
||||
SequenceClassifierOutput,
|
||||
TokenClassifierOutput,
|
||||
)
|
||||
from transformers.modeling_utils import (
|
||||
from ...modeling_utils import (
|
||||
PreTrainedModel,
|
||||
apply_chunking_to_forward,
|
||||
find_pruneable_heads_and_indices,
|
||||
prune_linear_layer,
|
||||
)
|
||||
from transformers.utils import logging
|
||||
|
||||
from ...utils import logging
|
||||
from .configuration_markuplm import MarkupLMConfig
|
||||
|
||||
|
||||
|
@ -20,8 +20,8 @@ from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from transformers.image_transforms import (
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
PaddingMode,
|
||||
get_resize_output_image_size,
|
||||
normalize,
|
||||
@ -31,7 +31,7 @@ from transformers.image_transforms import (
|
||||
to_channel_dimension_format,
|
||||
to_numpy_array,
|
||||
)
|
||||
from transformers.image_utils import (
|
||||
from ...image_utils import (
|
||||
ChannelDimension,
|
||||
ImageInput,
|
||||
PILImageResampling,
|
||||
@ -40,7 +40,7 @@ from transformers.image_utils import (
|
||||
is_batched,
|
||||
valid_images,
|
||||
)
|
||||
from transformers.utils import (
|
||||
from ...utils import (
|
||||
IMAGENET_DEFAULT_MEAN,
|
||||
IMAGENET_DEFAULT_STD,
|
||||
TensorType,
|
||||
|
@ -24,9 +24,7 @@ import numpy as np
|
||||
import torch
|
||||
from torch import Tensor, nn
|
||||
|
||||
from transformers import AutoBackbone, SwinConfig
|
||||
from transformers.utils import logging
|
||||
|
||||
from ... import AutoBackbone, SwinConfig
|
||||
from ...activations import ACT2FN
|
||||
from ...file_utils import (
|
||||
ModelOutput,
|
||||
@ -38,6 +36,7 @@ from ...file_utils import (
|
||||
)
|
||||
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions
|
||||
from ...modeling_utils import PreTrainedModel
|
||||
from ...utils import logging
|
||||
from .configuration_mask2former import Mask2FormerConfig
|
||||
|
||||
|
||||
|
@ -16,8 +16,7 @@
|
||||
|
||||
import warnings
|
||||
|
||||
from transformers.utils import logging
|
||||
|
||||
from ...utils import logging
|
||||
from .image_processing_maskformer import MaskFormerImageProcessor
|
||||
|
||||
|
||||
|
@ -20,8 +20,8 @@ from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tupl
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from transformers.image_transforms import (
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
PaddingMode,
|
||||
get_resize_output_image_size,
|
||||
normalize,
|
||||
@ -31,7 +31,7 @@ from transformers.image_transforms import (
|
||||
to_channel_dimension_format,
|
||||
to_numpy_array,
|
||||
)
|
||||
from transformers.image_utils import (
|
||||
from ...image_utils import (
|
||||
ChannelDimension,
|
||||
ImageInput,
|
||||
PILImageResampling,
|
||||
@ -40,7 +40,7 @@ from transformers.image_utils import (
|
||||
make_list_of_images,
|
||||
valid_images,
|
||||
)
|
||||
from transformers.utils import (
|
||||
from ...utils import (
|
||||
IMAGENET_DEFAULT_MEAN,
|
||||
IMAGENET_DEFAULT_STD,
|
||||
TensorType,
|
||||
|
@ -24,9 +24,7 @@ import numpy as np
|
||||
import torch
|
||||
from torch import Tensor, nn
|
||||
|
||||
from transformers import AutoBackbone
|
||||
from transformers.utils import logging
|
||||
|
||||
from ... import AutoBackbone
|
||||
from ...activations import ACT2FN
|
||||
from ...modeling_outputs import BaseModelOutputWithCrossAttentions
|
||||
from ...modeling_utils import PreTrainedModel
|
||||
@ -35,6 +33,7 @@ from ...utils import (
|
||||
add_start_docstrings,
|
||||
add_start_docstrings_to_model_forward,
|
||||
is_scipy_available,
|
||||
logging,
|
||||
replace_return_docstrings,
|
||||
requires_backends,
|
||||
)
|
||||
|
@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
center_crop,
|
||||
@ -39,7 +37,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_torch_available, is_torch_tensor
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
center_crop,
|
||||
@ -40,7 +37,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
|
@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_torch_available, is_torch_tensor, is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import center_crop, get_resize_output_image_size, rescale, resize, to_channel_dimension_format
|
||||
from ...image_utils import (
|
||||
@ -32,7 +29,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -1,4 +1,4 @@
|
||||
from transformers import PretrainedConfig
|
||||
from ... import PretrainedConfig
|
||||
|
||||
|
||||
NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
||||
|
@ -21,8 +21,8 @@ from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
|
||||
import numpy as np
|
||||
from huggingface_hub import hf_hub_download
|
||||
|
||||
from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from transformers.image_transforms import (
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
PaddingMode,
|
||||
get_resize_output_image_size,
|
||||
normalize,
|
||||
@ -32,7 +32,7 @@ from transformers.image_transforms import (
|
||||
to_channel_dimension_format,
|
||||
to_numpy_array,
|
||||
)
|
||||
from transformers.image_utils import (
|
||||
from ...image_utils import (
|
||||
ChannelDimension,
|
||||
ImageInput,
|
||||
PILImageResampling,
|
||||
@ -41,7 +41,7 @@ from transformers.image_utils import (
|
||||
make_list_of_images,
|
||||
valid_images,
|
||||
)
|
||||
from transformers.utils import (
|
||||
from ...utils import (
|
||||
IMAGENET_DEFAULT_MEAN,
|
||||
IMAGENET_DEFAULT_STD,
|
||||
TensorType,
|
||||
|
@ -24,9 +24,7 @@ import torch
|
||||
from torch import Tensor, nn
|
||||
from torch.cuda.amp import autocast
|
||||
|
||||
from transformers import AutoBackbone
|
||||
from transformers.utils import logging
|
||||
|
||||
from ... import AutoBackbone
|
||||
from ...activations import ACT2FN
|
||||
from ...modeling_outputs import BaseModelOutput
|
||||
from ...modeling_utils import PreTrainedModel
|
||||
@ -35,6 +33,7 @@ from ...utils import (
|
||||
add_start_docstrings,
|
||||
add_start_docstrings_to_model_forward,
|
||||
is_scipy_available,
|
||||
logging,
|
||||
replace_return_docstrings,
|
||||
requires_backends,
|
||||
)
|
||||
|
@ -18,9 +18,8 @@ Image/Text processor class for OneFormer
|
||||
|
||||
from typing import List
|
||||
|
||||
from transformers.utils import is_torch_available
|
||||
|
||||
from ...processing_utils import ProcessorMixin
|
||||
from ...utils import is_torch_available
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
|
@ -19,8 +19,8 @@ from typing import Dict, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from transformers.image_transforms import (
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
center_crop,
|
||||
center_to_corners_format,
|
||||
normalize,
|
||||
@ -29,7 +29,7 @@ from transformers.image_transforms import (
|
||||
to_channel_dimension_format,
|
||||
to_numpy_array,
|
||||
)
|
||||
from transformers.image_utils import (
|
||||
from ...image_utils import (
|
||||
OPENAI_CLIP_MEAN,
|
||||
OPENAI_CLIP_STD,
|
||||
ChannelDimension,
|
||||
@ -38,7 +38,7 @@ from transformers.image_utils import (
|
||||
make_list_of_images,
|
||||
valid_images,
|
||||
)
|
||||
from transformers.utils import TensorType, is_torch_available, logging
|
||||
from ...utils import TensorType, is_torch_available, logging
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
|
@ -21,10 +21,9 @@ from typing import List
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers import is_flax_available, is_tf_available, is_torch_available
|
||||
|
||||
from ...processing_utils import ProcessorMixin
|
||||
from ...tokenization_utils_base import BatchEncoding
|
||||
from ...utils import is_flax_available, is_tf_available, is_torch_available
|
||||
|
||||
|
||||
class OwlViTProcessor(ProcessorMixin):
|
||||
|
@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
|
||||
from ...image_utils import (
|
||||
@ -34,7 +31,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers import is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
center_crop,
|
||||
@ -40,7 +37,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -20,8 +20,7 @@ from typing import Optional, Union
|
||||
import numpy as np
|
||||
from huggingface_hub import hf_hub_download
|
||||
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from ... import AutoTokenizer
|
||||
from ...utils import logging
|
||||
|
||||
|
||||
|
@ -19,9 +19,6 @@ from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_torch_available, is_torch_tensor, is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
|
||||
from ...image_utils import (
|
||||
@ -34,7 +31,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -24,9 +24,8 @@ import torch.utils.checkpoint
|
||||
from torch import nn
|
||||
from torch.nn import CrossEntropyLoss
|
||||
|
||||
from transformers.deepspeed import is_deepspeed_zero3_enabled
|
||||
|
||||
from ...activations import ACT2FN
|
||||
from ...deepspeed import is_deepspeed_zero3_enabled
|
||||
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
|
||||
from ...modeling_utils import PreTrainedModel
|
||||
from ...pytorch_utils import torch_int_div
|
||||
|
@ -25,9 +25,8 @@ import torch.utils.checkpoint
|
||||
from torch import nn
|
||||
from torch.nn import CrossEntropyLoss, LayerNorm
|
||||
|
||||
from transformers.deepspeed import is_deepspeed_zero3_enabled
|
||||
|
||||
from ...activations import ACT2FN
|
||||
from ...deepspeed import is_deepspeed_zero3_enabled
|
||||
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
|
||||
from ...modeling_utils import PreTrainedModel
|
||||
from ...pytorch_utils import softmax_backward_data, torch_int_div
|
||||
|
@ -18,12 +18,10 @@ from typing import Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature
|
||||
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
|
||||
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -20,8 +20,8 @@ from typing import List, Optional, Union
|
||||
import numpy as np
|
||||
from numpy.fft import fft
|
||||
|
||||
from transformers.feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
|
||||
from transformers.utils import TensorType, logging
|
||||
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
|
||||
from ...utils import TensorType, logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -17,8 +17,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
center_crop,
|
||||
@ -38,7 +36,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -20,8 +20,7 @@ import torch
|
||||
from torch import nn
|
||||
from torch.nn import CrossEntropyLoss
|
||||
|
||||
from transformers import AutoBackbone
|
||||
|
||||
from ... import AutoBackbone
|
||||
from ...modeling_outputs import SemanticSegmenterOutput
|
||||
from ...modeling_utils import BackboneMixin, PreTrainedModel
|
||||
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
|
||||
|
@ -18,9 +18,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
center_crop,
|
||||
@ -40,7 +37,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -19,9 +19,6 @@ from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils import is_vision_available
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import PaddingMode, normalize, pad, rescale, resize, to_channel_dimension_format
|
||||
from ...image_utils import (
|
||||
@ -36,7 +33,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
|
@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
|
||||
from ...image_utils import (
|
||||
@ -32,7 +30,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils import TensorType, logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -18,8 +18,6 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils.generic import TensorType
|
||||
|
||||
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
|
||||
from ...image_transforms import (
|
||||
center_crop,
|
||||
@ -40,8 +38,7 @@ from ...image_utils import (
|
||||
to_numpy_array,
|
||||
valid_images,
|
||||
)
|
||||
from ...utils import logging
|
||||
from ...utils.import_utils import is_vision_available
|
||||
from ...utils import TensorType, is_vision_available, logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -20,9 +20,9 @@ from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Un
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.feature_extraction_utils import BatchFeature
|
||||
from transformers.image_processing_utils import BaseImageProcessor, get_size_dict
|
||||
from transformers.image_transforms import (
|
||||
from ...feature_extraction_utils import BatchFeature
|
||||
from ...image_processing_utils import BaseImageProcessor, get_size_dict
|
||||
from ...image_transforms import (
|
||||
PaddingMode,
|
||||
center_to_corners_format,
|
||||
corners_to_center_format,
|
||||
@ -34,7 +34,7 @@ from transformers.image_transforms import (
|
||||
rgb_to_id,
|
||||
to_channel_dimension_format,
|
||||
)
|
||||
from transformers.image_utils import (
|
||||
from ...image_utils import (
|
||||
IMAGENET_DEFAULT_MEAN,
|
||||
IMAGENET_DEFAULT_STD,
|
||||
ChannelDimension,
|
||||
@ -48,7 +48,9 @@ from transformers.image_utils import (
|
||||
valid_coco_panoptic_annotations,
|
||||
valid_images,
|
||||
)
|
||||
from transformers.utils import (
|
||||
from ...utils import (
|
||||
ExplicitEnum,
|
||||
TensorType,
|
||||
is_flax_available,
|
||||
is_jax_tensor,
|
||||
is_scipy_available,
|
||||
@ -57,8 +59,8 @@ from transformers.utils import (
|
||||
is_torch_available,
|
||||
is_torch_tensor,
|
||||
is_vision_available,
|
||||
logging,
|
||||
)
|
||||
from transformers.utils.generic import ExplicitEnum, TensorType
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
@ -74,6 +76,7 @@ if is_scipy_available():
|
||||
import scipy.special
|
||||
import scipy.stats
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
AnnotationType = Dict[str, Union[int, str, List[Dict]]]
|
||||
|
||||
|
@ -2,7 +2,7 @@ import numpy as np
|
||||
import torch
|
||||
from torch.utils.data import Dataset, IterableDataset
|
||||
|
||||
from transformers.utils.generic import ModelOutput
|
||||
from ..utils.generic import ModelOutput
|
||||
|
||||
|
||||
class PipelineDataset(Dataset):
|
||||
|
@ -1,8 +1,7 @@
|
||||
import enum
|
||||
import warnings
|
||||
|
||||
from transformers import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
|
||||
|
||||
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
|
||||
from ..utils import add_end_docstrings, is_tf_available
|
||||
from .base import PIPELINE_INIT_ARGS, Pipeline
|
||||
|
||||
|
@ -20,8 +20,8 @@ from dataclasses import dataclass, field
|
||||
|
||||
import torch
|
||||
|
||||
from transformers.training_args import TrainingArguments
|
||||
from transformers.utils import cached_property, is_sagemaker_dp_enabled, logging
|
||||
from ..training_args import TrainingArguments
|
||||
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
@ -1,6 +1,6 @@
|
||||
from copy import deepcopy
|
||||
|
||||
from transformers.utils import is_accelerate_available, is_bitsandbytes_available
|
||||
from .import_utils import is_accelerate_available, is_bitsandbytes_available
|
||||
|
||||
|
||||
if is_bitsandbytes_available():
|
||||
|
@ -49,8 +49,6 @@ from huggingface_hub.utils import (
|
||||
)
|
||||
from requests.exceptions import HTTPError
|
||||
|
||||
from transformers.utils.logging import tqdm
|
||||
|
||||
from . import __version__, logging
|
||||
from .generic import working_or_temp_dir
|
||||
from .import_utils import (
|
||||
@ -61,6 +59,7 @@ from .import_utils import (
|
||||
is_torch_available,
|
||||
is_training_run_on_sagemaker,
|
||||
)
|
||||
from .logging import tqdm
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
||||
|
@ -29,9 +29,8 @@ from typing import Any
|
||||
|
||||
from packaging import version
|
||||
|
||||
from transformers.utils.versions import importlib_metadata
|
||||
|
||||
from . import logging
|
||||
from .versions import importlib_metadata
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
||||
|
Loading…
Reference in New Issue
Block a user