From 05ad826002f613a294169933510a043804a2a354 Mon Sep 17 00:00:00 2001 From: Ita Zaporozhets <31893021+itazap@users.noreply.github.com> Date: Mon, 2 Jun 2025 15:57:32 +0200 Subject: [PATCH] remove unhandled parameter (#38145) --- tests/models/code_llama/test_tokenization_code_llama.py | 3 +-- tests/models/gemma/test_tokenization_gemma.py | 3 +-- tests/models/llama/test_tokenization_llama.py | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/models/code_llama/test_tokenization_code_llama.py b/tests/models/code_llama/test_tokenization_code_llama.py index a12d2fc4c55..236ab21d2d2 100644 --- a/tests/models/code_llama/test_tokenization_code_llama.py +++ b/tests/models/code_llama/test_tokenization_code_llama.py @@ -231,7 +231,6 @@ class CodeLlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): batch = tokenizer( text=text, max_length=3, - max_target_length=10, return_tensors="pt", ) except NotImplementedError: @@ -241,7 +240,7 @@ class CodeLlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): batch = tokenizer(text, max_length=3, return_tensors="pt") self.assertEqual(batch.input_ids.shape[1], 3) - batch_encoder_only = tokenizer(text=text, max_length=3, max_target_length=10, return_tensors="pt") + batch_encoder_only = tokenizer(text=text, max_length=3, return_tensors="pt") self.assertEqual(batch_encoder_only.input_ids.shape[1], 3) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3) self.assertNotIn("decoder_input_ids", batch_encoder_only) diff --git a/tests/models/gemma/test_tokenization_gemma.py b/tests/models/gemma/test_tokenization_gemma.py index b56a8e95914..91a5cebaed5 100644 --- a/tests/models/gemma/test_tokenization_gemma.py +++ b/tests/models/gemma/test_tokenization_gemma.py @@ -79,7 +79,6 @@ class GemmaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): batch = tokenizer( text=text, max_length=3, - max_target_length=10, return_tensors="pt", ) except NotImplementedError: @@ -89,7 +88,7 @@ class GemmaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): batch = tokenizer(text, max_length=3, return_tensors="pt") self.assertEqual(batch.input_ids.shape[1], 3) - batch_encoder_only = tokenizer(text=text, max_length=3, max_target_length=10, return_tensors="pt") + batch_encoder_only = tokenizer(text=text, max_length=3, return_tensors="pt") self.assertEqual(batch_encoder_only.input_ids.shape[1], 3) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3) self.assertNotIn("decoder_input_ids", batch_encoder_only) diff --git a/tests/models/llama/test_tokenization_llama.py b/tests/models/llama/test_tokenization_llama.py index a69ea3948ef..aa2cf161036 100644 --- a/tests/models/llama/test_tokenization_llama.py +++ b/tests/models/llama/test_tokenization_llama.py @@ -229,7 +229,6 @@ class LlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): batch = tokenizer( text=text, max_length=3, - max_target_length=10, return_tensors="pt", ) except NotImplementedError: @@ -239,7 +238,7 @@ class LlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): batch = tokenizer(text, max_length=3, return_tensors="pt") self.assertEqual(batch.input_ids.shape[1], 3) - batch_encoder_only = tokenizer(text=text, max_length=3, max_target_length=10, return_tensors="pt") + batch_encoder_only = tokenizer(text=text, max_length=3, return_tensors="pt") self.assertEqual(batch_encoder_only.input_ids.shape[1], 3) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3) self.assertNotIn("decoder_input_ids", batch_encoder_only)