fix: Replace deprecated assertEquals with assertEqual (#30241)

Replace deprecated assertEquals with assertEqual.
This commit is contained in:
Sai-Suraj-27 2024-04-15 14:06:06 +05:30 committed by GitHub
parent 8fd2de933c
commit 06b1192768
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 36 additions and 36 deletions

View File

@ -173,7 +173,7 @@ class ASTFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.Test
input_speech = self._load_datasamples(1)
feature_extractor = ASTFeatureExtractor()
input_values = feature_extractor(input_speech, return_tensors="pt").input_values
self.assertEquals(input_values.shape, (1, 1024, 128))
self.assertEqual(input_values.shape, (1, 1024, 128))
self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-4))
def test_feat_extract_from_and_save_pretrained(self):

View File

@ -158,7 +158,7 @@ class EnCodecFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.
input_audio = self._load_datasamples(1)
feature_extractor = EncodecFeatureExtractor()
input_values = feature_extractor(input_audio, return_tensors="pt").input_values
self.assertEquals(input_values.shape, (1, 1, 93680))
self.assertEqual(input_values.shape, (1, 1, 93680))
self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-6))
def test_integration_stereo(self):
@ -177,7 +177,7 @@ class EnCodecFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.
input_audio[0][1] *= 0.5
feature_extractor = EncodecFeatureExtractor(feature_size=2)
input_values = feature_extractor(input_audio, return_tensors="pt").input_values
self.assertEquals(input_values.shape, (1, 2, 93680))
self.assertEqual(input_values.shape, (1, 2, 93680))
self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-6))
self.assertTrue(torch.allclose(input_values[0, 1, :30], EXPECTED_INPUT_VALUES * 0.5, atol=1e-6))
@ -197,27 +197,27 @@ class EnCodecFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.
# truncate to chunk
truncated_outputs = feature_extractor(input_audio, truncation=True, return_tensors="pt").input_values
self.assertEquals(truncated_outputs.shape, (2, 1, 71520)) # 2 chunks
self.assertEqual(truncated_outputs.shape, (2, 1, 71520)) # 2 chunks
# force truncate to max_length
truncated_outputs = feature_extractor(
input_audio, truncation=True, max_length=48000, return_tensors="pt"
).input_values
self.assertEquals(truncated_outputs.shape, (2, 1, 48000))
self.assertEqual(truncated_outputs.shape, (2, 1, 48000))
# pad to chunk
padded_outputs = feature_extractor(input_audio, padding=True, return_tensors="pt").input_values
self.assertEquals(padded_outputs.shape, (2, 1, 95280))
self.assertEqual(padded_outputs.shape, (2, 1, 95280))
# pad to chunk
truncated_outputs = feature_extractor(input_audio, return_tensors="pt").input_values
self.assertEquals(truncated_outputs.shape, (2, 1, 95280))
self.assertEqual(truncated_outputs.shape, (2, 1, 95280))
# force pad to max length
truncated_outputs = feature_extractor(
input_audio, padding="max_length", max_length=100000, return_tensors="pt"
).input_values
self.assertEquals(truncated_outputs.shape, (2, 1, 100000))
self.assertEqual(truncated_outputs.shape, (2, 1, 100000))
# force no pad
with self.assertRaisesRegex(
@ -227,7 +227,7 @@ class EnCodecFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.
truncated_outputs = feature_extractor(input_audio, padding=False, return_tensors="pt").input_values
truncated_outputs = feature_extractor(input_audio[0], padding=False, return_tensors="pt").input_values
self.assertEquals(truncated_outputs.shape, (1, 1, 93680))
self.assertEqual(truncated_outputs.shape, (1, 1, 93680))
# no pad if no chunk_length_s
feature_extractor.chunk_length_s = None
@ -238,7 +238,7 @@ class EnCodecFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.
truncated_outputs = feature_extractor(input_audio, padding=False, return_tensors="pt").input_values
truncated_outputs = feature_extractor(input_audio[0], padding=False, return_tensors="pt").input_values
self.assertEquals(truncated_outputs.shape, (1, 1, 93680))
self.assertEqual(truncated_outputs.shape, (1, 1, 93680))
# no pad if no overlap
feature_extractor.chunk_length_s = 2
@ -250,4 +250,4 @@ class EnCodecFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.
truncated_outputs = feature_extractor(input_audio, padding=False, return_tensors="pt").input_values
truncated_outputs = feature_extractor(input_audio[0], padding=False, return_tensors="pt").input_values
self.assertEquals(truncated_outputs.shape, (1, 1, 93680))
self.assertEqual(truncated_outputs.shape, (1, 1, 93680))

View File

@ -510,7 +510,7 @@ class GitModelIntegrationTest(unittest.TestCase):
expected_shape = torch.Size((1, 9))
self.assertEqual(outputs.sequences.shape, expected_shape)
self.assertEquals(generated_caption, "two cats laying on a pink blanket")
self.assertEqual(generated_caption, "two cats laying on a pink blanket")
self.assertTrue(outputs.scores[-1].shape, expected_shape)
expected_slice = torch.tensor([[-0.8805, -0.8803, -0.8799]], device=torch_device)
self.assertTrue(torch.allclose(outputs.scores[-1][0, :3], expected_slice, atol=1e-4))
@ -537,7 +537,7 @@ class GitModelIntegrationTest(unittest.TestCase):
expected_shape = torch.Size((1, 15))
self.assertEqual(generated_ids.shape, expected_shape)
self.assertEquals(generated_caption, "what does the front of the bus say at the top? special")
self.assertEqual(generated_caption, "what does the front of the bus say at the top? special")
def test_batched_generation(self):
processor = GitProcessor.from_pretrained("microsoft/git-base-coco")
@ -555,4 +555,4 @@ class GitModelIntegrationTest(unittest.TestCase):
generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50)
generated_captions = processor.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEquals(generated_captions, ["two cats sleeping on a pink blanket next to remotes."] * 2)
self.assertEqual(generated_captions, ["two cats sleeping on a pink blanket next to remotes."] * 2)

View File

@ -297,8 +297,8 @@ class Mask2FormerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (2, 512, 512))
self.assertEqual(inputs["mask_labels"][1].shape, (4, 512, 512))
self.assertEquals(inputs["mask_labels"][0].sum().item(), 41527.0)
self.assertEquals(inputs["mask_labels"][1].sum().item(), 26259.0)
self.assertEqual(inputs["mask_labels"][0].sum().item(), 41527.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 26259.0)
def test_integration_semantic_segmentation(self):
# load 2 images and corresponding semantic annotations from the hub
@ -339,8 +339,8 @@ class Mask2FormerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (3, 512, 512))
self.assertEqual(inputs["mask_labels"][1].shape, (8, 512, 512))
self.assertEquals(inputs["mask_labels"][0].sum().item(), 170200.0)
self.assertEquals(inputs["mask_labels"][1].sum().item(), 257036.0)
self.assertEqual(inputs["mask_labels"][0].sum().item(), 170200.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 257036.0)
def test_integration_panoptic_segmentation(self):
# load 2 images and corresponding panoptic annotations from the hub
@ -400,8 +400,8 @@ class Mask2FormerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (79, 512, 711))
self.assertEqual(inputs["mask_labels"][1].shape, (61, 512, 711))
self.assertEquals(inputs["mask_labels"][0].sum().item(), 315193.0)
self.assertEquals(inputs["mask_labels"][1].sum().item(), 350747.0)
self.assertEqual(inputs["mask_labels"][0].sum().item(), 315193.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 350747.0)
def test_binary_mask_to_rle(self):
fake_binary_mask = np.zeros((20, 50))

View File

@ -297,8 +297,8 @@ class MaskFormerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase)
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (2, 512, 512))
self.assertEqual(inputs["mask_labels"][1].shape, (4, 512, 512))
self.assertEquals(inputs["mask_labels"][0].sum().item(), 41527.0)
self.assertEquals(inputs["mask_labels"][1].sum().item(), 26259.0)
self.assertEqual(inputs["mask_labels"][0].sum().item(), 41527.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 26259.0)
def test_integration_semantic_segmentation(self):
# load 2 images and corresponding semantic annotations from the hub
@ -339,8 +339,8 @@ class MaskFormerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase)
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (3, 512, 512))
self.assertEqual(inputs["mask_labels"][1].shape, (8, 512, 512))
self.assertEquals(inputs["mask_labels"][0].sum().item(), 170200.0)
self.assertEquals(inputs["mask_labels"][1].sum().item(), 257036.0)
self.assertEqual(inputs["mask_labels"][0].sum().item(), 170200.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 257036.0)
def test_integration_panoptic_segmentation(self):
# load 2 images and corresponding panoptic annotations from the hub
@ -400,8 +400,8 @@ class MaskFormerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase)
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (79, 512, 711))
self.assertEqual(inputs["mask_labels"][1].shape, (61, 512, 711))
self.assertEquals(inputs["mask_labels"][0].sum().item(), 315193.0)
self.assertEquals(inputs["mask_labels"][1].sum().item(), 350747.0)
self.assertEqual(inputs["mask_labels"][0].sum().item(), 315193.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 350747.0)
def test_binary_mask_to_rle(self):
fake_binary_mask = np.zeros((20, 50))

View File

@ -88,13 +88,13 @@ class RemBertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
encoded_string = tokenizer.encode(text)
self.assertListEqual(encoded_string, [1000, 7, 0, 1001])
decode_text = tokenizer.convert_tokens_to_string(tokens)
self.assertEquals(decode_text, text)
self.assertEqual(decode_text, text)
text = "That's awesome! 🤩 #HuggingFace, 🌟 Have a great day! 🌈"
tokens = tokenizer.tokenize(text)
self.assertListEqual( tokens, ['▁That', "'", 's', '▁a', 'w', 'es', 'ome', '!', '', '🤩', '', '#', 'H', 'u', 'g', 'g', 'ing', 'F', 'a', 'ce', ',', '', '🌟', '▁H', 'a', 've', '▁a', '▁great', '▁day', '!', '', '🌈']) # fmt: skip
decode_text = tokenizer.convert_tokens_to_string(tokens)
self.assertEquals(decode_text, "That's awesome! 🤩 #HuggingFace, 🌟 Have a great day! 🌈")
self.assertEqual(decode_text, "That's awesome! 🤩 #HuggingFace, 🌟 Have a great day! 🌈")
text = "In the sky up above"
tokens = tokenizer._tokenize(text)

View File

@ -277,7 +277,7 @@ class Speech2TextFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unitt
input_speech = self._load_datasamples(1)
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
input_features = feature_extractor(input_speech, return_tensors="pt").input_features
self.assertEquals(input_features.shape, (1, 584, 24))
self.assertEqual(input_features.shape, (1, 584, 24))
self.assertTrue(np.allclose(input_features[0, 0, :30], expected, atol=1e-4))
def test_feat_extract_from_and_save_pretrained(self):

View File

@ -401,7 +401,7 @@ class SpeechT5FeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest
input_speech = self._load_datasamples(1)
feature_extractor = SpeechT5FeatureExtractor()
input_values = feature_extractor(input_speech, return_tensors="pt").input_values
self.assertEquals(input_values.shape, (1, 93680))
self.assertEqual(input_values.shape, (1, 93680))
self.assertTrue(torch.allclose(input_values[0, :30], EXPECTED_INPUT_VALUES, atol=1e-6))
def test_integration_target(self):
@ -417,5 +417,5 @@ class SpeechT5FeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest
input_speech = self._load_datasamples(1)
feature_extractor = SpeechT5FeatureExtractor()
input_values = feature_extractor(audio_target=input_speech, return_tensors="pt").input_values
self.assertEquals(input_values.shape, (1, 366, 80))
self.assertEqual(input_values.shape, (1, 366, 80))
self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-4))

View File

@ -176,7 +176,7 @@ class TvltFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.Tes
feature_extractor = TvltFeatureExtractor()
audio_values = feature_extractor(input_speech, return_tensors="pt").audio_values
self.assertEquals(audio_values.shape, (1, 1, 192, 128))
self.assertEqual(audio_values.shape, (1, 1, 192, 128))
expected_slice = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]])
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2], expected_slice, atol=1e-4))

View File

@ -574,4 +574,4 @@ class UdopModelIntegrationTests(unittest.TestCase):
predicted_ids = model.generate(**encoding)
predicted_text = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
self.assertEquals(predicted_text, "2013")
self.assertEqual(predicted_text, "2013")

View File

@ -792,7 +792,7 @@ class FlaxModelTesterMixin:
types = flatten_dict(types)
for name, type_ in types.items():
self.assertEquals(type_, jnp.float32, msg=f"param {name} is not initialized in fp32.")
self.assertEqual(type_, jnp.float32, msg=f"param {name} is not initialized in fp32.")
def test_to_bf16(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()

View File

@ -1608,7 +1608,7 @@ class TokenizerTesterMixin:
with self.subTest(f"{(chunk/len(input_full_vocab_string))*100}%"):
slow_encode = slow_tokenizer.encode(string_to_check)
fast_encode = rust_tokenizer.encode(string_to_check)
self.assertEquals(
self.assertEqual(
slow_encode,
fast_encode,
"Hint: the following tokenization diff were obtained for slow vs fast:\n "
@ -1620,7 +1620,7 @@ class TokenizerTesterMixin:
for chunk in range(0, len(input_full_vocab_ids) - 100, 100):
ids_to_decode = input_full_vocab_ids[chunk : chunk + 100]
with self.subTest(f"{(chunk/len(input_full_vocab_string))*100}%"):
self.assertEquals(
self.assertEqual(
slow_tokenizer.decode(
ids_to_decode,
space_between_special_tokens=False,