Fix padding for IDEFICS (#26396)

* fix

* fixup

* tests

* fixup
This commit is contained in:
Shauray Singh 2023-09-27 14:26:07 +05:30 committed by GitHub
parent 408b2b3c50
commit abd2531034
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 30 additions and 10 deletions

View File

@ -280,7 +280,7 @@ class IdeficsProcessor(ProcessorMixin):
else:
return fake_token + image_token + fake_token
all_texts = []
all_prompts = []
all_images = []
for sample in prompts:
# the model was trained on samples starting with <s>
@ -321,17 +321,18 @@ class IdeficsProcessor(ProcessorMixin):
image_objects = self.image_processor(image_objects, transform=transform)
text_encoding = self.tokenizer(
text=full_text,
add_special_tokens=False,
padding=padding,
truncation=truncation,
max_length=max_length,
)
all_texts.append(text_encoding["input_ids"])
all_prompts.append(full_text)
all_images.append(image_objects)
text_encoding = self.tokenizer(
text=all_prompts,
add_special_tokens=False,
padding=padding,
truncation=truncation,
max_length=max_length,
)
all_texts = text_encoding["input_ids"]
max_seq_len = max(len(x) for x in all_texts)
# max_num_images has to be at least 1 even when there are no images

View File

@ -141,6 +141,25 @@ class IdeficsProcessorTest(TestCasePlus):
self.assertListEqual(decoded_tok, decoded_processor)
def test_tokenizer_padding(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer(padding_side="right")
processor = IdeficsProcessor(tokenizer=tokenizer, image_processor=image_processor)
predicted_tokens = [
"<s>Describe this image.\nAssistant:<unk><unk><unk><unk><unk><unk><unk><unk><unk>",
"<s>Describe this image.\nAssistant:<unk><unk><unk><unk><unk><unk><unk><unk><unk><unk>",
]
prompts = [[prompt] for prompt in self.prepare_prompts()[2]]
max_length = processor(prompts, padding="max_length", truncation=True, max_length=20)
longest = processor(prompts, padding="longest", truncation=True, max_length=30)
decoded_max_length = processor.tokenizer.decode(max_length["input_ids"][-1])
decoded_longest = processor.tokenizer.decode(longest["input_ids"][-1])
self.assertEqual(decoded_max_length, predicted_tokens[1])
self.assertEqual(decoded_longest, predicted_tokens[0])
def test_model_input_names(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()