[pytes collection] Fix flax test collection (#34004)

bit weird but to filter I had to use this
This commit is contained in:
Arthur 2024-10-07 18:11:13 +02:00 committed by GitHub
parent 55be7c4c48
commit 736c7cde51
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 13 additions and 0 deletions

View File

@ -80,6 +80,7 @@ class FlaxAlbertModelTester(unittest.TestCase):
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -79,6 +79,7 @@ class FlaxBeitModelTester(unittest.TestCase):
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
super().__init__()
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])

View File

@ -79,6 +79,7 @@ class FlaxBertModelTester(unittest.TestCase):
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -90,6 +90,7 @@ class FlaxBigBirdModelTester(unittest.TestCase):
self.use_bias = use_bias
self.block_size = block_size
self.num_random_blocks = num_random_blocks
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -79,6 +79,7 @@ class FlaxDistilBertModelTester(unittest.TestCase):
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -67,6 +67,7 @@ class FlaxElectraModelTester(unittest.TestCase):
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -116,6 +116,7 @@ class FlaxMBartModelTester(unittest.TestCase):
self.bos_token_id = bos_token_id
self.decoder_start_token_id = decoder_start_token_id
self.initializer_range = initializer_range
super().__init__()
def prepare_config_and_inputs(self):
input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size)

View File

@ -65,6 +65,7 @@ class FlaxRegNetModelTester(unittest.TestCase):
self.num_labels = num_labels
self.scope = scope
self.num_stages = len(hidden_sizes)
super().__init__()
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])

View File

@ -64,6 +64,7 @@ class FlaxResNetModelTester(unittest.TestCase):
self.num_labels = num_labels
self.scope = scope
self.num_stages = len(hidden_sizes)
super().__init__()
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])

View File

@ -78,6 +78,7 @@ class FlaxRobertaModelTester(unittest.TestCase):
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -81,6 +81,7 @@ class FlaxRobertaPreLayerNormModelTester(unittest.TestCase):
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -79,6 +79,7 @@ class FlaxRoFormerModelTester(unittest.TestCase):
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
super().__init__()
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

View File

@ -72,6 +72,7 @@ class FlaxViTModelTester(unittest.TestCase):
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
super().__init__()
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])