[tests] further fix Tester object has no attribute '_testMethodName' (#35781)

* bug fix

* update with more cases

* more entries

* Fix

---------

Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
This commit is contained in:
Fanli Lin 2025-01-29 23:05:33 +08:00 committed by GitHub
parent ec7790f0d3
commit f0ae65c198
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
57 changed files with 57 additions and 110 deletions

View File

@ -36,7 +36,7 @@ if is_flax_available():
)
class FlaxAlbertModelTester(unittest.TestCase):
class FlaxAlbertModelTester:
def __init__(
self,
parent,
@ -80,7 +80,6 @@ class FlaxAlbertModelTester(unittest.TestCase):
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -35,7 +35,7 @@ if is_torch_available():
import torch
class AriaImageProcessingTester(unittest.TestCase):
class AriaImageProcessingTester:
def __init__(
self,
parent,
@ -55,7 +55,6 @@ class AriaImageProcessingTester(unittest.TestCase):
do_convert_rgb=True,
resample=PILImageResampling.BICUBIC,
):
super().__init__()
self.size = size if size is not None else {"longest_edge": max_resolution}
self.parent = parent
self.batch_size = batch_size

View File

@ -36,7 +36,7 @@ if is_vision_available():
from transformers import BeitImageProcessor
class FlaxBeitModelTester(unittest.TestCase):
class FlaxBeitModelTester:
def __init__(
self,
parent,
@ -79,7 +79,6 @@ class FlaxBeitModelTester(unittest.TestCase):
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
super().__init__()
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])

View File

@ -35,7 +35,7 @@ if is_flax_available():
)
class FlaxBertModelTester(unittest.TestCase):
class FlaxBertModelTester:
def __init__(
self,
parent,
@ -79,7 +79,6 @@ class FlaxBertModelTester(unittest.TestCase):
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -35,7 +35,7 @@ if is_flax_available():
)
class FlaxBigBirdModelTester(unittest.TestCase):
class FlaxBigBirdModelTester:
def __init__(
self,
parent,
@ -90,7 +90,6 @@ class FlaxBigBirdModelTester(unittest.TestCase):
self.use_bias = use_bias
self.block_size = block_size
self.num_random_blocks = num_random_blocks
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -26,7 +26,7 @@ if is_vision_available():
from transformers import BlipImageProcessor
class BlipImageProcessingTester(unittest.TestCase):
class BlipImageProcessingTester:
def __init__(
self,
parent,
@ -43,7 +43,6 @@ class BlipImageProcessingTester(unittest.TestCase):
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"height": 20, "width": 20}
self.parent = parent
self.batch_size = batch_size

View File

@ -31,7 +31,7 @@ if is_vision_available():
from transformers import BridgeTowerImageProcessor
class BridgeTowerImageProcessingTester(unittest.TestCase):
class BridgeTowerImageProcessingTester:
def __init__(
self,
parent,
@ -50,7 +50,6 @@ class BridgeTowerImageProcessingTester(unittest.TestCase):
max_resolution=400,
num_channels=3,
):
super().__init__()
self.parent = parent
self.do_resize = do_resize
self.size = size if size is not None else {"shortest_edge": 288}

View File

@ -32,7 +32,7 @@ if is_vision_available():
from transformers import ChameleonImageProcessor
class ChameleonImageProcessingTester(unittest.TestCase):
class ChameleonImageProcessingTester:
def __init__(
self,
parent,
@ -50,7 +50,6 @@ class ChameleonImageProcessingTester(unittest.TestCase):
image_std=[1.0, 1.0, 1.0],
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 18}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent

View File

@ -26,7 +26,7 @@ if is_vision_available():
from transformers import ChineseCLIPImageProcessor
class ChineseCLIPImageProcessingTester(unittest.TestCase):
class ChineseCLIPImageProcessingTester:
def __init__(
self,
parent,
@ -44,7 +44,6 @@ class ChineseCLIPImageProcessingTester(unittest.TestCase):
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"height": 224, "width": 224}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent

View File

@ -26,7 +26,7 @@ if is_vision_available():
from transformers import ConvNextImageProcessor
class ConvNextImageProcessingTester(unittest.TestCase):
class ConvNextImageProcessingTester:
def __init__(
self,
parent,
@ -42,7 +42,6 @@ class ConvNextImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"shortest_edge": 20}
self.parent = parent
self.batch_size = batch_size

View File

@ -35,7 +35,7 @@ if is_vision_available():
from transformers import DeformableDetrImageProcessor, DeformableDetrImageProcessorFast
class DeformableDetrImageProcessingTester(unittest.TestCase):
class DeformableDetrImageProcessingTester:
def __init__(
self,
parent,
@ -52,7 +52,6 @@ class DeformableDetrImageProcessingTester(unittest.TestCase):
rescale_factor=1 / 255,
do_pad=True,
):
super().__init__()
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
self.parent = parent

View File

@ -26,7 +26,7 @@ if is_vision_available():
from transformers import DeiTImageProcessor
class DeiTImageProcessingTester(unittest.TestCase):
class DeiTImageProcessingTester:
def __init__(
self,
parent,
@ -43,7 +43,6 @@ class DeiTImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"height": 20, "width": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}

View File

@ -37,7 +37,7 @@ if is_vision_available():
from transformers import DetrImageProcessorFast
class DetrImageProcessingTester(unittest.TestCase):
class DetrImageProcessingTester:
def __init__(
self,
parent,
@ -54,7 +54,6 @@ class DetrImageProcessingTester(unittest.TestCase):
image_std=[0.5, 0.5, 0.5],
do_pad=True,
):
super().__init__()
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
self.parent = parent

View File

@ -35,7 +35,7 @@ if is_flax_available():
)
class FlaxDistilBertModelTester(unittest.TestCase):
class FlaxDistilBertModelTester:
def __init__(
self,
parent,
@ -79,7 +79,6 @@ class FlaxDistilBertModelTester(unittest.TestCase):
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -33,7 +33,7 @@ if is_vision_available():
from transformers import DonutImageProcessor
class DonutImageProcessingTester(unittest.TestCase):
class DonutImageProcessingTester:
def __init__(
self,
parent,
@ -51,7 +51,6 @@ class DonutImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels

View File

@ -34,7 +34,7 @@ if is_vision_available():
from transformers import DPTImageProcessor
class DPTImageProcessingTester(unittest.TestCase):
class DPTImageProcessingTester:
def __init__(
self,
parent,
@ -50,7 +50,6 @@ class DPTImageProcessingTester(unittest.TestCase):
image_std=[0.5, 0.5, 0.5],
do_reduce_labels=False,
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size

View File

@ -28,7 +28,7 @@ if is_vision_available():
from transformers import EfficientNetImageProcessor
class EfficientNetImageProcessorTester(unittest.TestCase):
class EfficientNetImageProcessorTester:
def __init__(
self,
parent,
@ -43,7 +43,6 @@ class EfficientNetImageProcessorTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size

View File

@ -21,7 +21,7 @@ if is_flax_available():
)
class FlaxElectraModelTester(unittest.TestCase):
class FlaxElectraModelTester:
def __init__(
self,
parent,
@ -67,7 +67,6 @@ class FlaxElectraModelTester(unittest.TestCase):
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -42,7 +42,7 @@ else:
FLAVA_IMAGE_MEAN = FLAVA_IMAGE_STD = FLAVA_CODEBOOK_MEAN = FLAVA_CODEBOOK_STD = None
class FlavaImageProcessingTester(unittest.TestCase):
class FlavaImageProcessingTester:
def __init__(
self,
parent,
@ -76,7 +76,6 @@ class FlavaImageProcessingTester(unittest.TestCase):
codebook_image_mean=FLAVA_CODEBOOK_MEAN,
codebook_image_std=FLAVA_CODEBOOK_STD,
):
super().__init__()
size = size if size is not None else {"height": 224, "width": 224}
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
codebook_size = codebook_size if codebook_size is not None else {"height": 112, "width": 112}

View File

@ -33,7 +33,7 @@ if is_vision_available():
from transformers import GLPNImageProcessor
class GLPNImageProcessingTester(unittest.TestCase):
class GLPNImageProcessingTester:
def __init__(
self,
parent,
@ -46,7 +46,6 @@ class GLPNImageProcessingTester(unittest.TestCase):
size_divisor=32,
do_rescale=True,
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels

View File

@ -35,7 +35,7 @@ if is_torch_available():
import torch
class Idefics3ImageProcessingTester(unittest.TestCase):
class Idefics3ImageProcessingTester:
def __init__(
self,
parent,
@ -58,7 +58,6 @@ class Idefics3ImageProcessingTester(unittest.TestCase):
do_image_splitting=True,
resample=PILImageResampling.LANCZOS,
):
super().__init__()
self.size = size if size is not None else {"longest_edge": max_resolution}
self.parent = parent
self.batch_size = batch_size

View File

@ -38,7 +38,7 @@ if is_vision_available():
from transformers import ImageGPTImageProcessor
class ImageGPTImageProcessingTester(unittest.TestCase):
class ImageGPTImageProcessingTester:
def __init__(
self,
parent,
@ -51,7 +51,6 @@ class ImageGPTImageProcessingTester(unittest.TestCase):
size=None,
do_normalize=True,
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size

View File

@ -33,7 +33,7 @@ if is_vision_available():
from transformers import InstructBlipVideoImageProcessor
class InstructBlipVideoProcessingTester(unittest.TestCase):
class InstructBlipVideoProcessingTester:
def __init__(
self,
parent,
@ -50,7 +50,6 @@ class InstructBlipVideoProcessingTester(unittest.TestCase):
do_convert_rgb=True,
frames=4,
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size

View File

@ -28,7 +28,7 @@ if is_pytesseract_available():
from transformers import LayoutLMv2ImageProcessor
class LayoutLMv2ImageProcessingTester(unittest.TestCase):
class LayoutLMv2ImageProcessingTester:
def __init__(
self,
parent,
@ -41,7 +41,6 @@ class LayoutLMv2ImageProcessingTester(unittest.TestCase):
size=None,
apply_ocr=True,
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size

View File

@ -28,7 +28,7 @@ if is_pytesseract_available():
from transformers import LayoutLMv3ImageProcessor
class LayoutLMv3ImageProcessingTester(unittest.TestCase):
class LayoutLMv3ImageProcessingTester:
def __init__(
self,
parent,
@ -41,7 +41,6 @@ class LayoutLMv3ImageProcessingTester(unittest.TestCase):
size=None,
apply_ocr=True,
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size

View File

@ -26,7 +26,7 @@ if is_vision_available():
from transformers import LevitImageProcessor
class LevitImageProcessingTester(unittest.TestCase):
class LevitImageProcessingTester:
def __init__(
self,
parent,
@ -43,7 +43,6 @@ class LevitImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"shortest_edge": 18}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent

View File

@ -31,7 +31,7 @@ if is_vision_available():
from transformers import LlavaImageProcessor
class LlavaImageProcessingTester(unittest.TestCase):
class LlavaImageProcessingTester:
def __init__(
self,
parent,

View File

@ -74,7 +74,7 @@ def prepare_mbart_inputs_dict(
}
class FlaxMBartModelTester(unittest.TestCase):
class FlaxMBartModelTester:
def __init__(
self,
parent,
@ -116,7 +116,6 @@ class FlaxMBartModelTester(unittest.TestCase):
self.bos_token_id = bos_token_id
self.decoder_start_token_id = decoder_start_token_id
self.initializer_range = initializer_range
super().__init__()
def prepare_config_and_inputs(self):
input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size)

View File

@ -34,7 +34,7 @@ if is_torch_available():
import torch
class MllamaImageProcessingTester(unittest.TestCase):
class MllamaImageProcessingTester:
def __init__(
self,
parent,
@ -55,7 +55,6 @@ class MllamaImageProcessingTester(unittest.TestCase):
do_pad=True,
max_image_tiles=4,
):
super().__init__()
size = size if size is not None else {"height": 224, "width": 224}
self.parent = parent
self.batch_size = batch_size

View File

@ -26,7 +26,7 @@ if is_vision_available():
from transformers import MobileNetV1ImageProcessor
class MobileNetV1ImageProcessingTester(unittest.TestCase):
class MobileNetV1ImageProcessingTester:
def __init__(
self,
parent,
@ -40,7 +40,6 @@ class MobileNetV1ImageProcessingTester(unittest.TestCase):
do_center_crop=True,
crop_size=None,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent

View File

@ -26,7 +26,7 @@ if is_vision_available():
from transformers import MobileNetV2ImageProcessor
class MobileNetV2ImageProcessingTester(unittest.TestCase):
class MobileNetV2ImageProcessingTester:
def __init__(
self,
parent,
@ -40,7 +40,6 @@ class MobileNetV2ImageProcessingTester(unittest.TestCase):
do_center_crop=True,
crop_size=None,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent

View File

@ -33,7 +33,7 @@ if is_vision_available():
from transformers import MobileViTImageProcessor
class MobileViTImageProcessingTester(unittest.TestCase):
class MobileViTImageProcessingTester:
def __init__(
self,
parent,
@ -48,7 +48,6 @@ class MobileViTImageProcessingTester(unittest.TestCase):
crop_size=None,
do_flip_channel_order=True,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent

View File

@ -34,7 +34,7 @@ if is_vision_available():
from transformers import NougatImageProcessor
class NougatImageProcessingTester(unittest.TestCase):
class NougatImageProcessingTester:
def __init__(
self,
parent,
@ -53,7 +53,6 @@ class NougatImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"height": 20, "width": 20}
self.parent = parent
self.batch_size = batch_size

View File

@ -31,7 +31,7 @@ if is_torch_available():
import torch
class Owlv2ImageProcessingTester(unittest.TestCase):
class Owlv2ImageProcessingTester:
def __init__(
self,
parent,
@ -47,7 +47,6 @@ class Owlv2ImageProcessingTester(unittest.TestCase):
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels

View File

@ -26,7 +26,7 @@ if is_vision_available():
from transformers import OwlViTImageProcessor
class OwlViTImageProcessingTester(unittest.TestCase):
class OwlViTImageProcessingTester:
def __init__(
self,
parent,
@ -44,7 +44,6 @@ class OwlViTImageProcessingTester(unittest.TestCase):
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels

View File

@ -25,7 +25,7 @@ if is_vision_available():
from transformers import PoolFormerImageProcessor
class PoolFormerImageProcessingTester(unittest.TestCase):
class PoolFormerImageProcessingTester:
def __init__(
self,
parent,
@ -41,7 +41,6 @@ class PoolFormerImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"shortest_edge": 30}
crop_size = crop_size if crop_size is not None else {"height": 30, "width": 30}
self.parent = parent

View File

@ -26,7 +26,7 @@ if is_vision_available():
from transformers import PvtImageProcessor
class PvtImageProcessingTester(unittest.TestCase):
class PvtImageProcessingTester:
def __init__(
self,
parent,
@ -41,7 +41,6 @@ class PvtImageProcessingTester(unittest.TestCase):
image_mean=[0.485, 0.456, 0.406],
image_std=[0.229, 0.224, 0.225],
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size

View File

@ -36,7 +36,7 @@ if is_vision_available():
from transformers import AutoImageProcessor
class FlaxRegNetModelTester(unittest.TestCase):
class FlaxRegNetModelTester:
def __init__(
self,
parent,
@ -65,7 +65,6 @@ class FlaxRegNetModelTester(unittest.TestCase):
self.num_labels = num_labels
self.scope = scope
self.num_stages = len(hidden_sizes)
super().__init__()
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])

View File

@ -35,7 +35,7 @@ if is_vision_available():
from transformers import AutoImageProcessor
class FlaxResNetModelTester(unittest.TestCase):
class FlaxResNetModelTester:
def __init__(
self,
parent,
@ -64,7 +64,6 @@ class FlaxResNetModelTester(unittest.TestCase):
self.num_labels = num_labels
self.scope = scope
self.num_stages = len(hidden_sizes)
super().__init__()
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])

View File

@ -34,7 +34,7 @@ if is_flax_available():
)
class FlaxRobertaModelTester(unittest.TestCase):
class FlaxRobertaModelTester:
def __init__(
self,
parent,
@ -78,7 +78,6 @@ class FlaxRobertaModelTester(unittest.TestCase):
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -37,7 +37,7 @@ if is_flax_available():
# Copied from tests.models.roberta.test_modeling_flax_roberta.FlaxRobertaModelTester with Roberta->RobertaPreLayerNorm
class FlaxRobertaPreLayerNormModelTester(unittest.TestCase):
class FlaxRobertaPreLayerNormModelTester:
def __init__(
self,
parent,
@ -81,7 +81,6 @@ class FlaxRobertaPreLayerNormModelTester(unittest.TestCase):
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -35,7 +35,7 @@ if is_flax_available():
)
class FlaxRoFormerModelTester(unittest.TestCase):
class FlaxRoFormerModelTester:
def __init__(
self,
parent,
@ -79,7 +79,6 @@ class FlaxRoFormerModelTester(unittest.TestCase):
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -31,7 +31,7 @@ if is_torch_available():
import torch
class RTDetrImageProcessingTester(unittest.TestCase):
class RTDetrImageProcessingTester:
def __init__(
self,
parent,
@ -45,7 +45,6 @@ class RTDetrImageProcessingTester(unittest.TestCase):
do_pad=False,
return_tensors="pt",
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels

View File

@ -26,7 +26,7 @@ if is_vision_available():
from transformers import SiglipImageProcessor
class SiglipImageProcessingTester(unittest.TestCase):
class SiglipImageProcessingTester:
def __init__(
self,
parent,
@ -43,7 +43,6 @@ class SiglipImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size

View File

@ -42,7 +42,7 @@ def random_tensor(size):
return torch.rand(size)
class SuperGlueImageProcessingTester(unittest.TestCase):
class SuperGlueImageProcessingTester:
def __init__(
self,
parent,

View File

@ -34,7 +34,7 @@ if is_vision_available():
from transformers.image_transforms import get_image_size
class Swin2SRImageProcessingTester(unittest.TestCase):
class Swin2SRImageProcessingTester:
def __init__(
self,
parent,
@ -48,7 +48,6 @@ class Swin2SRImageProcessingTester(unittest.TestCase):
do_pad=True,
pad_size=8,
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels

View File

@ -26,7 +26,7 @@ if is_vision_available():
from transformers import TextNetImageProcessor
class TextNetImageProcessingTester(unittest.TestCase):
class TextNetImageProcessingTester:
def __init__(
self,
parent,

View File

@ -35,7 +35,7 @@ if is_vision_available():
from transformers import TvpImageProcessor
class TvpImageProcessingTester(unittest.TestCase):
class TvpImageProcessingTester:
def __init__(
self,
parent,
@ -58,7 +58,6 @@ class TvpImageProcessingTester(unittest.TestCase):
num_channels=3,
num_frames=2,
):
super().__init__()
self.do_resize = do_resize
self.size = size
self.do_center_crop = do_center_crop

View File

@ -34,7 +34,7 @@ if is_vision_available():
from transformers import VideoLlavaImageProcessor
class VideoLlavaImageProcessingTester(unittest.TestCase):
class VideoLlavaImageProcessingTester:
def __init__(
self,
parent,
@ -52,7 +52,6 @@ class VideoLlavaImageProcessingTester(unittest.TestCase):
image_std=OPENAI_CLIP_STD,
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 20}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}
self.parent = parent

View File

@ -33,7 +33,7 @@ if is_vision_available():
from transformers import VideoMAEImageProcessor
class VideoMAEImageProcessingTester(unittest.TestCase):
class VideoMAEImageProcessingTester:
def __init__(
self,
parent,
@ -50,7 +50,6 @@ class VideoMAEImageProcessingTester(unittest.TestCase):
image_std=[0.5, 0.5, 0.5],
crop_size=None,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 18}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}

View File

@ -30,7 +30,7 @@ if is_vision_available():
from transformers import ViltImageProcessor
class ViltImageProcessingTester(unittest.TestCase):
class ViltImageProcessingTester:
def __init__(
self,
parent,
@ -46,7 +46,6 @@ class ViltImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"shortest_edge": 30}
self.parent = parent
self.batch_size = batch_size

View File

@ -29,7 +29,7 @@ if is_torchvision_available():
from transformers import ViTImageProcessorFast
class ViTImageProcessingTester(unittest.TestCase):
class ViTImageProcessingTester:
def __init__(
self,
parent,
@ -44,7 +44,6 @@ class ViTImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size

View File

@ -30,7 +30,7 @@ if is_flax_available():
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class FlaxViTModelTester(unittest.TestCase):
class FlaxViTModelTester:
def __init__(
self,
parent,
@ -72,7 +72,6 @@ class FlaxViTModelTester(unittest.TestCase):
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
super().__init__()
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])

View File

@ -35,7 +35,7 @@ if is_vision_available():
from transformers import VitMatteImageProcessor
class VitMatteImageProcessingTester(unittest.TestCase):
class VitMatteImageProcessingTester:
def __init__(
self,
parent,
@ -52,7 +52,6 @@ class VitMatteImageProcessingTester(unittest.TestCase):
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels

View File

@ -34,7 +34,7 @@ if is_vision_available():
from transformers import VitPoseImageProcessor
class VitPoseImageProcessingTester(unittest.TestCase):
class VitPoseImageProcessingTester:
def __init__(
self,
parent,

View File

@ -33,7 +33,7 @@ if is_vision_available():
from transformers import VivitImageProcessor
class VivitImageProcessingTester(unittest.TestCase):
class VivitImageProcessingTester:
def __init__(
self,
parent,
@ -50,7 +50,6 @@ class VivitImageProcessingTester(unittest.TestCase):
image_std=[0.5, 0.5, 0.5],
crop_size=None,
):
super().__init__()
size = size if size is not None else {"shortest_edge": 18}
crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18}

View File

@ -28,7 +28,7 @@ if is_vision_available():
from transformers import ZoeDepthImageProcessor
class ZoeDepthImageProcessingTester(unittest.TestCase):
class ZoeDepthImageProcessingTester:
def __init__(
self,
parent,
@ -46,7 +46,6 @@ class ZoeDepthImageProcessingTester(unittest.TestCase):
image_std=[0.5, 0.5, 0.5],
do_pad=False,
):
super().__init__()
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size