remove unhandled parameter (#38145)

This commit is contained in:
Ita Zaporozhets 2025-06-02 15:57:32 +02:00 committed by GitHub
parent c72ba69441
commit 05ad826002
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 3 additions and 6 deletions

View File

@ -231,7 +231,6 @@ class CodeLlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
batch = tokenizer( batch = tokenizer(
text=text, text=text,
max_length=3, max_length=3,
max_target_length=10,
return_tensors="pt", return_tensors="pt",
) )
except NotImplementedError: except NotImplementedError:
@ -241,7 +240,7 @@ class CodeLlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
batch = tokenizer(text, max_length=3, return_tensors="pt") batch = tokenizer(text, max_length=3, return_tensors="pt")
self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.input_ids.shape[1], 3)
batch_encoder_only = tokenizer(text=text, max_length=3, max_target_length=10, return_tensors="pt") batch_encoder_only = tokenizer(text=text, max_length=3, return_tensors="pt")
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3) self.assertEqual(batch_encoder_only.input_ids.shape[1], 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
self.assertNotIn("decoder_input_ids", batch_encoder_only) self.assertNotIn("decoder_input_ids", batch_encoder_only)

View File

@ -79,7 +79,6 @@ class GemmaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
batch = tokenizer( batch = tokenizer(
text=text, text=text,
max_length=3, max_length=3,
max_target_length=10,
return_tensors="pt", return_tensors="pt",
) )
except NotImplementedError: except NotImplementedError:
@ -89,7 +88,7 @@ class GemmaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
batch = tokenizer(text, max_length=3, return_tensors="pt") batch = tokenizer(text, max_length=3, return_tensors="pt")
self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.input_ids.shape[1], 3)
batch_encoder_only = tokenizer(text=text, max_length=3, max_target_length=10, return_tensors="pt") batch_encoder_only = tokenizer(text=text, max_length=3, return_tensors="pt")
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3) self.assertEqual(batch_encoder_only.input_ids.shape[1], 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
self.assertNotIn("decoder_input_ids", batch_encoder_only) self.assertNotIn("decoder_input_ids", batch_encoder_only)

View File

@ -229,7 +229,6 @@ class LlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
batch = tokenizer( batch = tokenizer(
text=text, text=text,
max_length=3, max_length=3,
max_target_length=10,
return_tensors="pt", return_tensors="pt",
) )
except NotImplementedError: except NotImplementedError:
@ -239,7 +238,7 @@ class LlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
batch = tokenizer(text, max_length=3, return_tensors="pt") batch = tokenizer(text, max_length=3, return_tensors="pt")
self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.input_ids.shape[1], 3)
batch_encoder_only = tokenizer(text=text, max_length=3, max_target_length=10, return_tensors="pt") batch_encoder_only = tokenizer(text=text, max_length=3, return_tensors="pt")
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3) self.assertEqual(batch_encoder_only.input_ids.shape[1], 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
self.assertNotIn("decoder_input_ids", batch_encoder_only) self.assertNotIn("decoder_input_ids", batch_encoder_only)