From ecfe9be7054e81f8841b8e97e6599e1a2d35ed7e Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Fri, 19 Apr 2024 09:06:01 +0200 Subject: [PATCH] [UDOP] Add special tokens to tokenizer (#29594) * Add special tokens * Add special tokens * Use fmt * Uncomment code * Add test * Remove scripts * Address comments * Improve tests * Address comment * Remove flag --- .../models/udop/convert_udop_to_hf.py | 18 ++++++++++-- tests/models/udop/test_tokenization_udop.py | 28 +++++++++++++++++++ 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/src/transformers/models/udop/convert_udop_to_hf.py b/src/transformers/models/udop/convert_udop_to_hf.py index f9cf07f1286..7cbb2f161d5 100644 --- a/src/transformers/models/udop/convert_udop_to_hf.py +++ b/src/transformers/models/udop/convert_udop_to_hf.py @@ -119,13 +119,25 @@ def convert_udop_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_h assert missing_keys == ["encoder.embed_patches.proj.weight", "encoder.embed_patches.proj.bias"] assert unexpected_keys == ["pos_embed"] - # prepare dummy inputs - tokenizer = UdopTokenizer.from_pretrained("t5-base", legacy=True) + # Add extra_ids to the special token list + # NOTE special tokens have a unique order + # see https://github.com/huggingface/transformers/issues/29591 for details + # fmt: off + additional_special_tokens = ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ''] + # fmt: on + + tokenizer = UdopTokenizer.from_pretrained( + "/Users/nielsrogge/Documents/UDOP/udop-unimodel-large-512", + legacy=True, + additional_special_tokens=additional_special_tokens, + ) size = {"height": image_size, "width": image_size} image_processor = LayoutLMv3ImageProcessor( image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD, size=size ) processor = UdopProcessor(image_processor=image_processor, tokenizer=tokenizer) + + # prepare dummy inputs input_ids, bbox, image = prepare_dummy_inputs(tokenizer, image_processor) prompt = "Question answering. In which year is the report made?" encoding = processor(images=get_image(), text=prompt, return_tensors="pt") @@ -183,7 +195,7 @@ def convert_udop_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_h if pytorch_dump_folder_path is not None: model.save_pretrained(pytorch_dump_folder_path) - tokenizer.save_pretrained(pytorch_dump_folder_path) + processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model.push_to_hub(f"microsoft/{model_name}") diff --git a/tests/models/udop/test_tokenization_udop.py b/tests/models/udop/test_tokenization_udop.py index 720eb099521..d022128ed14 100644 --- a/tests/models/udop/test_tokenization_udop.py +++ b/tests/models/udop/test_tokenization_udop.py @@ -1893,3 +1893,31 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): self.assertListEqual(encoding_p["attention_mask"], [1, 1, 1]) self.assertDictEqual(dict(encoding_p), dict(encoding_r)) self.assertEqual(tokenizer_p.decode(encoding_p["input_ids"]), expected_decoding) + + def test_special_tokens(self): + tokenizer_p = UdopTokenizer.from_pretrained("microsoft/udop-large") + tokenizer_r = UdopTokenizerFast.from_pretrained("microsoft/udop-large") + + # encode + text = "paragraph. Hey" + encoding_p = tokenizer_p.encode(text) + encoding_r = tokenizer_r.encode(text) + + assert encoding_p == encoding_r == [8986, 32942, 3, 5, 9459, 1] + + # decode + # this is different between slow/fast tokenizer + # due tothe former having `spaces_between_special_tokens=True` by default + ids = [0, 8986, 32942, 32966, 32554, 32551, 1] + + # test slow tokenizer + decoding = tokenizer_p.decode(ids, spaces_between_special_tokens=False) + + excepted_decoding = "paragraph" + assert decoding == excepted_decoding + + # test fast tokenizer + decoding = tokenizer_r.decode(ids) + + excepted_decoding = " paragraph" + assert decoding == excepted_decoding