mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-03 12:50:06 +06:00
Add UDOP (#22940)
* First draft
* More improvements
* More improvements
* More fixes
* Fix copies
* More improvements
* More fixes
* More improvements
* Convert checkpoint
* More improvements, set up tests
* Fix more tests
* Add UdopModel
* More improvements
* Fix equivalence test
* More fixes
* Redesign model
* Extend conversion script
* Use real inputs for conversion script
* Add image processor
* Improve conversion script
* Add UdopTokenizer
* Add fast tokenizer
* Add converter
* Update README's
* Add processor
* Add fully fledged tokenizer
* Add fast tokenizer
* Use processor in conversion script
* Add tokenizer tests
* Fix one more test
* Fix more tests
* Fix tokenizer tests
* Enable fast tokenizer tests
* Fix more tests
* Fix additional_special_tokens of fast tokenizer
* Fix tokenizer tests
* Fix more tests
* Fix equivalence test
* Rename image to pixel_values
* Rename seg_data to bbox
* More renamings
* Remove vis_special_token
* More improvements
* Add docs
* Fix copied from
* Update slow tokenizer
* Update fast tokenizer design
* Make text input optional
* Add first draft of processor tests
* Fix more processor tests
* Fix decoder_start_token_id
* Fix test_initialization
* Add integration test
* More improvements
* Improve processor, add test
* Add more copied from
* Add more copied from
* Add more copied from
* Add more copied from
* Remove print statement
* Update README and auto mapping
* Delete files
* Delete another file
* Remove code
* Fix test
* Fix docs
* Remove asserts
* Add doc tests
* Include UDOP in exotic model tests
* Add expected tesseract decodings
* Add sentencepiece
* Use same design as T5
* Add UdopEncoderModel
* Add UdopEncoderModel to tests
* More fixes
* Fix fast tokenizer
* Fix one more test
* Remove parallelisable attribute
* Fix copies
* Remove legacy file
* Copy from T5Tokenizer
* Fix rebase
* More fixes, copy from T5
* More fixes
* Fix init
* Use ArthurZ/udop for tests
* Make all model tests pass
* Remove UdopForConditionalGeneration from auto mapping
* Fix more tests
* fixups
* more fixups
* fix the tokenizers
* remove un-necessary changes
* nits
* nits
* replace truncate_sequences_boxes with truncate_sequences for fix-copies
* nit current path
* add a test for input ids
* ids that we should get taken from c9f7a32f57
* nits converting
* nits
* apply ruff
* nits
* nits
* style
* fix slow order of addition
* fix udop fast range as well
* fixup
* nits
* Add docstrings
* Fix gradient checkpointing
* Update code examples
* Skip tests
* Update integration test
* Address comment
* Make fixup
* Remove extra ids from tokenizer
* Skip test
* Apply suggestions from code review
Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com>
* Update year
* Address comment
* Address more comments
* Address comments
* Add copied from
* Update CI
* Rename script
* Update model id
* Add AddedToken, skip tests
* Update CI
* Fix doc tests
* Do not use Tesseract for the doc tests
* Remove kwargs
* Add original inputs
* Update casting
* Fix doc test
* Update question
* Update question
* Use LayoutLMv3ImageProcessor
* Update organization
* Improve docs
* Update forward signature
* Make images optional
* Remove deprecated device argument
* Add comment, add add_prefix_space
* More improvements
* Remove kwargs
---------
Co-authored-by: ArthurZucker <arthur.zucker@gmail.com>
Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com>
This commit is contained in:
parent
ed74d97871
commit
836921fdeb
@ -475,6 +475,7 @@ exotic_models_job = CircleCIJob(
|
||||
"pip install -U --upgrade-strategy eager 'git+https://github.com/facebookresearch/detectron2.git'",
|
||||
"sudo apt install tesseract-ocr",
|
||||
"pip install -U --upgrade-strategy eager pytesseract",
|
||||
"pip install --upgrade-strategy eager sentencepiece",
|
||||
"pip install -U --upgrade-strategy eager natten==0.15.1+torch210cpu -f https://shi-labs.com/natten/wheels",
|
||||
"pip install -U --upgrade-strategy eager python-Levenshtein",
|
||||
"pip install -U --upgrade-strategy eager opencv-python",
|
||||
@ -485,6 +486,7 @@ exotic_models_job = CircleCIJob(
|
||||
"tests/models/*layoutlmv*",
|
||||
"tests/models/*nat",
|
||||
"tests/models/deta",
|
||||
"tests/models/udop",
|
||||
"tests/models/nougat",
|
||||
],
|
||||
pytest_num_workers=1,
|
||||
|
@ -511,6 +511,7 @@ Current number of checkpoints: ** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal.
|
||||
1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (from Intel) released with the paper [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) by Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding.
|
||||
1. **[UDOP](https://huggingface.co/docs/transformers/main/model_doc/udop)** (from Microsoft Research) released with the paper [Unifying Vision, Text, and Layout for Universal Document Processing](https://arxiv.org/abs/2212.02623) by Zineng Tang, Ziyi Yang, Guoxin Wang, Yuwei Fang, Yang Liu, Chenguang Zhu, Michael Zeng, Cha Zhang, Mohit Bansal.
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||
|
@ -484,6 +484,7 @@ Número actual de puntos de control: ** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal.
|
||||
1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (from Intel) released with the paper [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) by Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding.
|
||||
1. **[UDOP](https://huggingface.co/docs/transformers/main/model_doc/udop)** (from Microsoft Research) released with the paper [Unifying Vision, Text, and Layout for Universal Document Processing](https://arxiv.org/abs/2212.02623) by Zineng Tang, Ziyi Yang, Guoxin Wang, Yuwei Fang, Yang Liu, Chenguang Zhu, Michael Zeng, Cha Zhang, Mohit Bansal.
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||
|
@ -505,6 +505,7 @@ Nombre actuel de points de contrôle : ** (de Microsoft), publié dans l'article [TrOCR : Reconnaissance optique de caractères basée sur un transformateur avec des modèles pré-entraînés](https://arxiv.org/abs/2109.10282) par Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (de l'UNC Chapel Hill) a été publié dans l'article [TVLT : Transformer Vision-Language sans texte](https://arxiv.org/abs/2209.14156) par Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal.
|
||||
1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (d'Intel) a été publié dans l'article [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) par Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding.
|
||||
1. **[UDOP](https://huggingface.co/docs/transformers/main/model_doc/udop)** (de Microsoft Research) publié dans l'article [Unifying Vision, Text, and Layout for Universal Document Processing](https://arxiv.org/abs/2212.02623) parZineng Tang, Ziyi Yang, Guoxin Wang, Yuwei Fang, Yang Liu, Chenguang Zhu, Michael Zeng, Cha Zhang, Mohit Bansal.
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (de Google Research) a été publié dans l'article [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) par Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler.
|
||||
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (de Google Research) a été publié dans l'article [UniMax : Échantillonnage linguistique plus équitable et plus efficace pour l'entraînement préalable multilingue à grande échelle](https://openreview.net/forum?id=kXwdL1cWOAi) par Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (de Microsoft Research) a été publié dans l'article [UniSpeech : Apprentissage unifié de la représentation de la parole avec des données étiquetées et non étiquetées](https://arxiv.org/abs/2101.07597) par Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||
|
@ -458,6 +458,7 @@ conda install conda-forge::transformers
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal.
|
||||
1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (from Intel) released with the paper [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) by Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding.
|
||||
1. **[UDOP](https://huggingface.co/docs/transformers/main/model_doc/udop)** (Microsoft Research से) Zineng Tang, Ziyi Yang, Guoxin Wang, Yuwei Fang, Yang Liu, Chenguang Zhu, Michael Zeng, Cha Zhang, Mohit Bansal. द्वाराअनुसंधान पत्र [Unifying Vision, Text, and Layout for Universal Document Processing](https://arxiv.org/abs/2212.02623) के साथ जारी किया गया
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (Google Research से) Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. द्वाराअनुसंधान पत्र [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) के साथ जारी किया गया
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (माइक्रोसॉफ्ट रिसर्च से) साथ में दिया गया पेपर [UniSpeech: यूनिफाइड स्पीच रिप्रेजेंटेशन लर्निंग विद लेबलेड एंड अनलेबल्ड डेटा](https://arxiv.org/abs/2101.07597) चेंगई वांग, यू वू, याओ कियान, केनिची कुमातानी, शुजी लियू, फुरु वेई, माइकल ज़ेंग, ज़ुएदोंग हुआंग द्वारा।
|
||||
|
@ -518,6 +518,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (Microsoft から), Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei から公開された研究論文: [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282)
|
||||
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill から), Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal から公開された研究論文: [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156)
|
||||
1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (Intel から), Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding から公開された研究論文: [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995)
|
||||
1. **[UDOP](https://huggingface.co/docs/transformers/main/model_doc/udop)** (Microsoft Research から) Zineng Tang, Ziyi Yang, Guoxin Wang, Yuwei Fang, Yang Liu, Chenguang Zhu, Michael Zeng, Cha Zhang, Mohit Bansal. から公開された研究論文 [Unifying Vision, Text, and Layout for Universal Document Processing](https://arxiv.org/abs/2212.02623)
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (Google Research から) Yi Tay, Mostafa Dehghani, Vinh Q から公開された研究論文: [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (Google Research から) Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. から公開された研究論文 [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi)
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (Microsoft Research から) Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang から公開された研究論文: [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597)
|
||||
|
@ -433,6 +433,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (Microsoft 에서) Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 의 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 논문과 함께 발표했습니다.
|
||||
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill 에서) Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal 의 [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) 논문과 함께 발표했습니다.
|
||||
1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (Intel 에서) Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding 의 [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) 논문과 함께 발표했습니다.
|
||||
1. **[UDOP](https://huggingface.co/docs/transformers/main/model_doc/udop)** (Microsoft Research 에서 제공)은 Zineng Tang, Ziyi Yang, Guoxin Wang, Yuwei Fang, Yang Liu, Chenguang Zhu, Michael Zeng, Cha Zhang, Mohit Bansal.의 [Unifying Vision, Text, and Layout for Universal Document Processing](https://arxiv.org/abs/2212.02623)논문과 함께 발표했습니다.
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (Google Research 에서) Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzle 의 [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) 논문과 함께 발표했습니다.
|
||||
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (Google Research 에서 제공)은 Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.의 [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi)논문과 함께 발표했습니다.
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (Microsoft Research 에서) Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 의 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 논문과 함께 발표했습니다.
|
||||
|
@ -457,6 +457,7 @@ conda install conda-forge::transformers
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (来自 Microsoft) 伴随论文 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 由 Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 发布。
|
||||
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (来自 UNC Chapel Hill) 伴随论文 [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) 由 Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal 发布。
|
||||
1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (来自 Intel) 伴随论文 [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) 由 Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding 发布.
|
||||
1. **[UDOP](https://huggingface.co/docs/transformers/main/model_doc/udop)** (来自 Microsoft Research) 伴随论文 [Unifying Vision, Text, and Layout for Universal Document Processing](https://arxiv.org/abs/2212.02623) 由 Zineng Tang, Ziyi Yang, Guoxin Wang, Yuwei Fang, Yang Liu, Chenguang Zhu, Michael Zeng, Cha Zhang, Mohit Bansal 发布。
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (来自 Google Research) 伴随论文 [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) 由 Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant 发布。
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (来自 Microsoft Research) 伴随论文 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 由 Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 发布。
|
||||
|
@ -469,6 +469,7 @@ conda install conda-forge::transformers
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal.
|
||||
1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (from Intel) released with the paper [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) by Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding.
|
||||
1. **[UDOP](https://huggingface.co/docs/transformers/main/model_doc/udop)** (from Microsoft Research) released with the paper [Unifying Vision, Text, and Layout for Universal Document Processing](https://arxiv.org/abs/2212.02623) by Zineng Tang, Ziyi Yang, Guoxin Wang, Yuwei Fang, Yang Liu, Chenguang Zhu, Michael Zeng, Cha Zhang, Mohit Bansal.
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||
|
@ -770,6 +770,8 @@
|
||||
title: TVLT
|
||||
- local: model_doc/tvp
|
||||
title: TVP
|
||||
- local: model_doc/udop
|
||||
title: UDOP
|
||||
- local: model_doc/vilt
|
||||
title: ViLT
|
||||
- local: model_doc/vipllava
|
||||
|
@ -279,6 +279,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [TrOCR](model_doc/trocr) | ✅ | ❌ | ❌ |
|
||||
| [TVLT](model_doc/tvlt) | ✅ | ❌ | ❌ |
|
||||
| [TVP](model_doc/tvp) | ✅ | ❌ | ❌ |
|
||||
| [UDOP](model_doc/udop) | ✅ | ❌ | ❌ |
|
||||
| [UL2](model_doc/ul2) | ✅ | ✅ | ✅ |
|
||||
| [UMT5](model_doc/umt5) | ✅ | ❌ | ❌ |
|
||||
| [UniSpeech](model_doc/unispeech) | ✅ | ❌ | ❌ |
|
||||
|
102
docs/source/en/model_doc/udop.md
Normal file
102
docs/source/en/model_doc/udop.md
Normal file
@ -0,0 +1,102 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# UDOP
|
||||
|
||||
## Overview
|
||||
|
||||
The UDOP model was proposed in [Unifying Vision, Text, and Layout for Universal Document Processing](https://arxiv.org/abs/2212.02623) by Zineng Tang, Ziyi Yang, Guoxin Wang, Yuwei Fang, Yang Liu, Chenguang Zhu, Michael Zeng, Cha Zhang, Mohit Bansal.
|
||||
UDOP adopts an encoder-decoder Transformer architecture based on [T5](t5) for document AI tasks like document image classification, document parsing and document visual question answering.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
We propose Universal Document Processing (UDOP), a foundation Document AI model which unifies text, image, and layout modalities together with varied task formats, including document understanding and generation. UDOP leverages the spatial correlation between textual content and document image to model image, text, and layout modalities with one uniform representation. With a novel Vision-Text-Layout Transformer, UDOP unifies pretraining and multi-domain downstream tasks into a prompt-based sequence generation scheme. UDOP is pretrained on both large-scale unlabeled document corpora using innovative self-supervised objectives and diverse labeled data. UDOP also learns to generate document images from text and layout modalities via masked image reconstruction. To the best of our knowledge, this is the first time in the field of document AI that one model simultaneously achieves high-quality neural document editing and content customization. Our method sets the state-of-the-art on 9 Document AI tasks, e.g., document understanding and QA, across diverse data domains like finance reports, academic papers, and websites. UDOP ranks first on the leaderboard of the Document Understanding Benchmark (DUE).*
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/udop_architecture.jpg"
|
||||
alt="drawing" width="600"/>
|
||||
|
||||
<small> UDOP architecture. Taken from the <a href="https://arxiv.org/abs/2212.02623">original paper.</a> </small>
|
||||
|
||||
## Usage tips
|
||||
|
||||
- In addition to *input_ids*, [`UdopForConditionalGeneration`] also expects the input `bbox`, which are
|
||||
the bounding boxes (i.e. 2D-positions) of the input tokens. These can be obtained using an external OCR engine such
|
||||
as Google's [Tesseract](https://github.com/tesseract-ocr/tesseract) (there's a [Python wrapper](https://pypi.org/project/pytesseract/) available). Each bounding box should be in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the
|
||||
position of the lower right corner. Note that one first needs to normalize the bounding boxes to be on a 0-1000
|
||||
scale. To normalize, you can use the following function:
|
||||
|
||||
```python
|
||||
def normalize_bbox(bbox, width, height):
|
||||
return [
|
||||
int(1000 * (bbox[0] / width)),
|
||||
int(1000 * (bbox[1] / height)),
|
||||
int(1000 * (bbox[2] / width)),
|
||||
int(1000 * (bbox[3] / height)),
|
||||
]
|
||||
```
|
||||
|
||||
Here, `width` and `height` correspond to the width and height of the original document in which the token
|
||||
occurs. Those can be obtained using the Python Image Library (PIL) library for example, as follows:
|
||||
|
||||
```python
|
||||
from PIL import Image
|
||||
|
||||
# Document can be a png, jpg, etc. PDFs must be converted to images.
|
||||
image = Image.open(name_of_your_document).convert("RGB")
|
||||
|
||||
width, height = image.size
|
||||
```
|
||||
|
||||
- At inference time, it's recommended to use the `generate` method to autoregressively generate text given a document image.
|
||||
- One can use [`UdopProcessor`] to prepare images and text for the model. By default, this class uses the Tesseract engine to extract a list of words
|
||||
and boxes (coordinates) from a given document. Its functionality is equivalent to that of [`LayoutLMv3Processor`], hence it supports passing either
|
||||
`apply_ocr=False` in case you prefer to use your own OCR engine or `apply_ocr=True` in case you want the default OCR engine to be used.
|
||||
|
||||
This model was contributed by [nielsr](https://huggingface.co/nielsr).
|
||||
The original code can be found [here](https://github.com/microsoft/UDOP).
|
||||
|
||||
|
||||
## UdopConfig
|
||||
|
||||
[[autodoc]] UdopConfig
|
||||
|
||||
## UdopTokenizer
|
||||
|
||||
[[autodoc]] UdopTokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
- get_special_tokens_mask
|
||||
- create_token_type_ids_from_sequences
|
||||
- save_vocabulary
|
||||
|
||||
## UdopTokenizerFast
|
||||
|
||||
[[autodoc]] UdopTokenizerFast
|
||||
|
||||
## UdopProcessor
|
||||
|
||||
[[autodoc]] UdopProcessor
|
||||
- __call__
|
||||
|
||||
## UdopModel
|
||||
|
||||
[[autodoc]] UdopModel
|
||||
- forward
|
||||
|
||||
## UdopForConditionalGeneration
|
||||
|
||||
[[autodoc]] UdopForConditionalGeneration
|
||||
- forward
|
||||
|
||||
## UdopEncoderModel
|
||||
|
||||
[[autodoc]] UdopEncoderModel
|
||||
- forward
|
@ -856,6 +856,11 @@ _import_structure = {
|
||||
"TvpConfig",
|
||||
"TvpProcessor",
|
||||
],
|
||||
"models.udop": [
|
||||
"UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
||||
"UdopConfig",
|
||||
"UdopProcessor",
|
||||
],
|
||||
"models.umt5": ["UMT5Config"],
|
||||
"models.unispeech": [
|
||||
"UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
||||
@ -1135,6 +1140,7 @@ else:
|
||||
_import_structure["models.speech_to_text"].append("Speech2TextTokenizer")
|
||||
_import_structure["models.speecht5"].append("SpeechT5Tokenizer")
|
||||
_import_structure["models.t5"].append("T5Tokenizer")
|
||||
_import_structure["models.udop"].append("UdopTokenizer")
|
||||
_import_structure["models.xglm"].append("XGLMTokenizer")
|
||||
_import_structure["models.xlm_prophetnet"].append("XLMProphetNetTokenizer")
|
||||
_import_structure["models.xlm_roberta"].append("XLMRobertaTokenizer")
|
||||
@ -1214,6 +1220,7 @@ else:
|
||||
_import_structure["models.splinter"].append("SplinterTokenizerFast")
|
||||
_import_structure["models.squeezebert"].append("SqueezeBertTokenizerFast")
|
||||
_import_structure["models.t5"].append("T5TokenizerFast")
|
||||
_import_structure["models.udop"].append("UdopTokenizerFast")
|
||||
_import_structure["models.whisper"].append("WhisperTokenizerFast")
|
||||
_import_structure["models.xglm"].append("XGLMTokenizerFast")
|
||||
_import_structure["models.xlm_roberta"].append("XLMRobertaTokenizerFast")
|
||||
@ -3411,6 +3418,15 @@ else:
|
||||
"TvpPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.udop"].extend(
|
||||
[
|
||||
"UDOP_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"UdopEncoderModel",
|
||||
"UdopForConditionalGeneration",
|
||||
"UdopModel",
|
||||
"UdopPreTrainedModel",
|
||||
],
|
||||
)
|
||||
_import_structure["models.umt5"].extend(
|
||||
[
|
||||
"UMT5EncoderModel",
|
||||
@ -5640,6 +5656,7 @@ if TYPE_CHECKING:
|
||||
TvpConfig,
|
||||
TvpProcessor,
|
||||
)
|
||||
from .models.udop import UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP, UdopConfig, UdopProcessor
|
||||
from .models.umt5 import UMT5Config
|
||||
from .models.unispeech import (
|
||||
UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
||||
@ -5915,6 +5932,7 @@ if TYPE_CHECKING:
|
||||
from .models.speech_to_text import Speech2TextTokenizer
|
||||
from .models.speecht5 import SpeechT5Tokenizer
|
||||
from .models.t5 import T5Tokenizer
|
||||
from .models.udop import UdopTokenizer
|
||||
from .models.xglm import XGLMTokenizer
|
||||
from .models.xlm_prophetnet import XLMProphetNetTokenizer
|
||||
from .models.xlm_roberta import XLMRobertaTokenizer
|
||||
@ -5987,6 +6005,7 @@ if TYPE_CHECKING:
|
||||
from .models.splinter import SplinterTokenizerFast
|
||||
from .models.squeezebert import SqueezeBertTokenizerFast
|
||||
from .models.t5 import T5TokenizerFast
|
||||
from .models.udop import UdopTokenizerFast
|
||||
from .models.whisper import WhisperTokenizerFast
|
||||
from .models.xglm import XGLMTokenizerFast
|
||||
from .models.xlm_roberta import XLMRobertaTokenizerFast
|
||||
@ -7827,6 +7846,13 @@ if TYPE_CHECKING:
|
||||
TvpModel,
|
||||
TvpPreTrainedModel,
|
||||
)
|
||||
from .models.udop import (
|
||||
UDOP_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
UdopEncoderModel,
|
||||
UdopForConditionalGeneration,
|
||||
UdopModel,
|
||||
UdopPreTrainedModel,
|
||||
)
|
||||
from .models.umt5 import (
|
||||
UMT5EncoderModel,
|
||||
UMT5ForConditionalGeneration,
|
||||
|
@ -1039,6 +1039,17 @@ class T5Converter(SpmConverter):
|
||||
)
|
||||
|
||||
|
||||
class UdopConverter(SpmConverter):
|
||||
def post_processor(self):
|
||||
return processors.TemplateProcessing(
|
||||
single=["$A", "</s>"],
|
||||
pair=["$A", "</s>", "$B", "</s>"],
|
||||
special_tokens=[
|
||||
("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
class WhisperConverter(Converter):
|
||||
def converted(self) -> Tokenizer:
|
||||
vocab = self.original_tokenizer.encoder
|
||||
@ -1471,6 +1482,7 @@ SLOW_TO_FAST_CONVERTERS = {
|
||||
"SeamlessM4TTokenizer": SeamlessM4TConverter,
|
||||
"SqueezeBertTokenizer": BertConverter,
|
||||
"T5Tokenizer": T5Converter,
|
||||
"UdopTokenizer": UdopConverter,
|
||||
"WhisperTokenizer": WhisperConverter,
|
||||
"XLMRobertaTokenizer": XLMRobertaConverter,
|
||||
"XLNetTokenizer": XLNetConverter,
|
||||
|
@ -220,6 +220,7 @@ from . import (
|
||||
trocr,
|
||||
tvlt,
|
||||
tvp,
|
||||
udop,
|
||||
umt5,
|
||||
unispeech,
|
||||
unispeech_sat,
|
||||
|
@ -231,6 +231,7 @@ CONFIG_MAPPING_NAMES = OrderedDict(
|
||||
("trocr", "TrOCRConfig"),
|
||||
("tvlt", "TvltConfig"),
|
||||
("tvp", "TvpConfig"),
|
||||
("udop", "UdopConfig"),
|
||||
("umt5", "UMT5Config"),
|
||||
("unispeech", "UniSpeechConfig"),
|
||||
("unispeech-sat", "UniSpeechSatConfig"),
|
||||
@ -454,6 +455,7 @@ CONFIG_ARCHIVE_MAP_MAPPING_NAMES = OrderedDict(
|
||||
("transfo-xl", "TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("tvlt", "TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("tvp", "TVP_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("udop", "UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("unispeech", "UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("unispeech-sat", "UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("univnet", "UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
@ -715,6 +717,7 @@ MODEL_NAMES_MAPPING = OrderedDict(
|
||||
("trocr", "TrOCR"),
|
||||
("tvlt", "TVLT"),
|
||||
("tvp", "TVP"),
|
||||
("udop", "UDOP"),
|
||||
("ul2", "UL2"),
|
||||
("umt5", "UMT5"),
|
||||
("unispeech", "UniSpeech"),
|
||||
|
@ -108,6 +108,7 @@ IMAGE_PROCESSOR_MAPPING_NAMES = OrderedDict(
|
||||
("timesformer", "VideoMAEImageProcessor"),
|
||||
("tvlt", "TvltImageProcessor"),
|
||||
("tvp", "TvpImageProcessor"),
|
||||
("udop", "LayoutLMv3ImageProcessor"),
|
||||
("upernet", "SegformerImageProcessor"),
|
||||
("van", "ConvNextImageProcessor"),
|
||||
("videomae", "VideoMAEImageProcessor"),
|
||||
|
@ -219,6 +219,7 @@ MODEL_MAPPING_NAMES = OrderedDict(
|
||||
("transfo-xl", "TransfoXLModel"),
|
||||
("tvlt", "TvltModel"),
|
||||
("tvp", "TvpModel"),
|
||||
("udop", "UdopModel"),
|
||||
("umt5", "UMT5Model"),
|
||||
("unispeech", "UniSpeechModel"),
|
||||
("unispeech-sat", "UniSpeechSatModel"),
|
||||
|
@ -418,6 +418,13 @@ else:
|
||||
("tapex", ("TapexTokenizer", None)),
|
||||
("transfo-xl", ("TransfoXLTokenizer", None)),
|
||||
("tvp", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
|
||||
(
|
||||
"udop",
|
||||
(
|
||||
"UdopTokenizer" if is_sentencepiece_available() else None,
|
||||
"UdopTokenizerFast" if is_tokenizers_available() else None,
|
||||
),
|
||||
),
|
||||
(
|
||||
"umt5",
|
||||
(
|
||||
|
98
src/transformers/models/udop/__init__.py
Normal file
98
src/transformers/models/udop/__init__.py
Normal file
@ -0,0 +1,98 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from ...utils import (
|
||||
OptionalDependencyNotAvailable,
|
||||
_LazyModule,
|
||||
is_sentencepiece_available,
|
||||
is_tokenizers_available,
|
||||
is_torch_available,
|
||||
)
|
||||
|
||||
|
||||
_import_structure = {
|
||||
"configuration_udop": ["UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP", "UdopConfig"],
|
||||
"processing_udop": ["UdopProcessor"],
|
||||
}
|
||||
|
||||
try:
|
||||
if not is_sentencepiece_available():
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
pass
|
||||
else:
|
||||
_import_structure["tokenization_udop"] = ["UdopTokenizer"]
|
||||
|
||||
try:
|
||||
if not is_tokenizers_available():
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
pass
|
||||
else:
|
||||
_import_structure["tokenization_udop_fast"] = ["UdopTokenizerFast"]
|
||||
|
||||
try:
|
||||
if not is_torch_available():
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
pass
|
||||
else:
|
||||
_import_structure["modeling_udop"] = [
|
||||
"UDOP_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"UdopForConditionalGeneration",
|
||||
"UdopPreTrainedModel",
|
||||
"UdopModel",
|
||||
"UdopEncoderModel",
|
||||
]
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .configuration_udop import UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP, UdopConfig
|
||||
from .processing_udop import UdopProcessor
|
||||
|
||||
try:
|
||||
if not is_sentencepiece_available():
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
pass
|
||||
else:
|
||||
from .tokenization_udop import UdopTokenizer
|
||||
|
||||
try:
|
||||
if not is_tokenizers_available():
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
pass
|
||||
else:
|
||||
from .tokenization_udop_fast import UdopTokenizerFast
|
||||
|
||||
try:
|
||||
if not is_torch_available():
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
pass
|
||||
else:
|
||||
from .modeling_udop import (
|
||||
UDOP_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
UdopEncoderModel,
|
||||
UdopForConditionalGeneration,
|
||||
UdopModel,
|
||||
UdopPreTrainedModel,
|
||||
)
|
||||
|
||||
else:
|
||||
import sys
|
||||
|
||||
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
162
src/transformers/models/udop/configuration_udop.py
Normal file
162
src/transformers/models/udop/configuration_udop.py
Normal file
@ -0,0 +1,162 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2024 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" UDOP model configuration"""
|
||||
|
||||
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
UDOP_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
||||
"microsoft/udop-large": "https://huggingface.co/microsoft/udop-large/resolve/main/config.json",
|
||||
}
|
||||
|
||||
|
||||
class UdopConfig(PretrainedConfig):
|
||||
r"""
|
||||
This is the configuration class to store the configuration of a [`UdopForConditionalGeneration`]. It is used to
|
||||
instantiate a UDOP model according to the specified arguments, defining the model architecture. Instantiating a
|
||||
configuration with the defaults will yield a similar configuration to that of the UDOP
|
||||
[microsoft/udop-large](https://huggingface.co/microsoft/udop-large) architecture.
|
||||
|
||||
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
||||
documentation from [`PretrainedConfig`] for more information.
|
||||
|
||||
Arguments:
|
||||
vocab_size (`int`, *optional*, defaults to 33201):
|
||||
Vocabulary size of the UDOP model. Defines the number of different tokens that can be represented by the
|
||||
`inputs_ids` passed when calling [`UdopForConditionalGeneration`].
|
||||
d_model (`int`, *optional*, defaults to 1024):
|
||||
Size of the encoder layers and the pooler layer.
|
||||
d_kv (`int`, *optional*, defaults to 64):
|
||||
Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will
|
||||
be defined as `num_heads * d_kv`.
|
||||
d_ff (`int`, *optional*, defaults to 4096):
|
||||
Size of the intermediate feed forward layer in each `UdopBlock`.
|
||||
num_layers (`int`, *optional*, defaults to 24):
|
||||
Number of hidden layers in the Transformer encoder and decoder.
|
||||
num_decoder_layers (`int`, *optional*):
|
||||
Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
|
||||
num_heads (`int`, *optional*, defaults to 16):
|
||||
Number of attention heads for each attention layer in the Transformer encoder and decoder.
|
||||
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
|
||||
The number of buckets to use for each attention layer.
|
||||
relative_attention_max_distance (`int`, *optional*, defaults to 128):
|
||||
The maximum distance of the longer sequences for the bucket separation.
|
||||
relative_bias_args (`List[dict]`, *optional*, defaults to `[{'type': '1d'}, {'type': 'horizontal'}, {'type': 'vertical'}]`):
|
||||
A list of dictionaries containing the arguments for the relative bias layers.
|
||||
dropout_rate (`float`, *optional*, defaults to 0.1):
|
||||
The ratio for all dropout layers.
|
||||
layer_norm_epsilon (`float`, *optional*, defaults to 1e-06):
|
||||
The epsilon used by the layer normalization layers.
|
||||
initializer_factor (`float`, *optional*, defaults to 1.0):
|
||||
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
|
||||
testing).
|
||||
feed_forward_proj (`string`, *optional*, defaults to `"relu"`):
|
||||
Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. Udopv1.1 uses the
|
||||
`"gated-gelu"` feed forward projection. Original Udop uses `"relu"`.
|
||||
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
|
||||
Whether the model should behave as an encoder/decoder or not.
|
||||
use_cache (`bool`, *optional*, defaults to `True`):
|
||||
Whether or not the model should return the last key/values attentions (not used by all models).
|
||||
pad_token_id (`int`, *optional*, defaults to 0):
|
||||
The id of the padding token in the vocabulary.
|
||||
eos_token_id (`int`, *optional*, defaults to 1):
|
||||
The id of the end-of-sequence token in the vocabulary.
|
||||
max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
|
||||
The maximum absolute position embeddings for relative position encoding.
|
||||
image_size (`int`, *optional*, defaults to 224):
|
||||
The size of the input images.
|
||||
patch_size (`int`, *optional*, defaults to 16):
|
||||
The patch size used by the vision encoder.
|
||||
num_channels (`int`, *optional*, defaults to 3):
|
||||
The number of channels in the input images.
|
||||
"""
|
||||
|
||||
model_type = "udop"
|
||||
keys_to_ignore_at_inference = ["past_key_values"]
|
||||
attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_size=33201,
|
||||
d_model=1024,
|
||||
d_kv=64,
|
||||
d_ff=4096,
|
||||
num_layers=24,
|
||||
num_decoder_layers=None,
|
||||
num_heads=16,
|
||||
relative_attention_num_buckets=32,
|
||||
relative_attention_max_distance=128,
|
||||
relative_bias_args=[{"type": "1d"}, {"type": "horizontal"}, {"type": "vertical"}],
|
||||
dropout_rate=0.1,
|
||||
layer_norm_epsilon=1e-6,
|
||||
initializer_factor=1.0,
|
||||
feed_forward_proj="relu",
|
||||
is_encoder_decoder=True,
|
||||
use_cache=True,
|
||||
pad_token_id=0,
|
||||
eos_token_id=1,
|
||||
max_2d_position_embeddings=1024,
|
||||
image_size=224,
|
||||
patch_size=16,
|
||||
num_channels=3,
|
||||
**kwargs,
|
||||
):
|
||||
self.vocab_size = vocab_size
|
||||
self.d_model = d_model
|
||||
self.d_kv = d_kv
|
||||
self.d_ff = d_ff
|
||||
self.num_layers = num_layers
|
||||
self.num_decoder_layers = (
|
||||
num_decoder_layers if num_decoder_layers is not None else self.num_layers
|
||||
) # default = symmetry
|
||||
self.num_heads = num_heads
|
||||
self.relative_attention_num_buckets = relative_attention_num_buckets
|
||||
self.relative_attention_max_distance = relative_attention_max_distance
|
||||
self.dropout_rate = dropout_rate
|
||||
self.layer_norm_epsilon = layer_norm_epsilon
|
||||
self.initializer_factor = initializer_factor
|
||||
self.feed_forward_proj = feed_forward_proj
|
||||
self.use_cache = use_cache
|
||||
|
||||
# UDOP attributes
|
||||
self.max_2d_position_embeddings = max_2d_position_embeddings
|
||||
self.image_size = image_size
|
||||
self.patch_size = patch_size
|
||||
self.num_channels = num_channels
|
||||
if not isinstance(relative_bias_args, list):
|
||||
raise ValueError("`relative_bias_args` should be a list of dictionaries.")
|
||||
self.relative_bias_args = relative_bias_args
|
||||
|
||||
act_info = self.feed_forward_proj.split("-")
|
||||
self.dense_act_fn = act_info[-1]
|
||||
self.is_gated_act = act_info[0] == "gated"
|
||||
|
||||
if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
|
||||
raise ValueError(
|
||||
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
|
||||
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
|
||||
"'gated-gelu' or 'relu'"
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
pad_token_id=pad_token_id,
|
||||
eos_token_id=eos_token_id,
|
||||
is_encoder_decoder=is_encoder_decoder,
|
||||
**kwargs,
|
||||
)
|
213
src/transformers/models/udop/convert_udop_to_hf.py
Normal file
213
src/transformers/models/udop/convert_udop_to_hf.py
Normal file
@ -0,0 +1,213 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2024 The HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Convert UDOP checkpoints from the original repository. URL: https://github.com/microsoft/i-Code/tree/main/i-Code-Doc"""
|
||||
|
||||
|
||||
import argparse
|
||||
|
||||
import torch
|
||||
from huggingface_hub import hf_hub_download
|
||||
from PIL import Image
|
||||
from torchvision import transforms as T
|
||||
|
||||
from transformers import (
|
||||
LayoutLMv3ImageProcessor,
|
||||
UdopConfig,
|
||||
UdopForConditionalGeneration,
|
||||
UdopProcessor,
|
||||
UdopTokenizer,
|
||||
)
|
||||
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
|
||||
|
||||
|
||||
def original_transform(image, image_size=224):
|
||||
transform = T.Compose(
|
||||
[
|
||||
T.Resize([image_size, image_size]),
|
||||
T.ToTensor(),
|
||||
T.Normalize(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
|
||||
]
|
||||
)
|
||||
|
||||
image = transform(image)
|
||||
return image
|
||||
|
||||
|
||||
def get_image():
|
||||
filepath = hf_hub_download(
|
||||
repo_id="hf-internal-testing/fixtures_docvqa", filename="document_2.png", repo_type="dataset"
|
||||
)
|
||||
image = Image.open(filepath).convert("RGB")
|
||||
|
||||
return image
|
||||
|
||||
|
||||
def prepare_dummy_inputs(tokenizer, image_processor):
|
||||
prompt = "Question answering. What is the name of the company?"
|
||||
prompt = "Question answering. In which year is the report made?"
|
||||
prompt_ids = tokenizer.encode(prompt, add_special_tokens=False)
|
||||
|
||||
image = get_image()
|
||||
# words, boxes = apply_tesseract(image, lang=None)
|
||||
# fmt: off
|
||||
words = ['7', 'ITC', 'Limited', 'REPORT', 'AND', 'ACCOUNTS', '2013', 'ITC’s', 'Brands:', 'An', 'Asset', 'for', 'the', 'Nation', 'The', 'consumer', 'needs', 'and', 'aspirations', 'they', 'fulfil,', 'the', 'benefit', 'they', 'generate', 'for', 'millions', 'across', 'ITC’s', 'value', 'chains,', 'the', 'future-ready', 'capabilities', 'that', 'support', 'them,', 'and', 'the', 'value', 'that', 'they', 'create', 'for', 'the', 'country,', 'have', 'made', 'ITC’s', 'brands', 'national', 'assets,', 'adding', 'to', 'India’s', 'competitiveness.', 'It', 'is', 'ITC’s', 'aspiration', 'to', 'be', 'the', 'No', '1', 'FMCG', 'player', 'in', 'the', 'country,', 'driven', 'by', 'its', 'new', 'FMCG', 'businesses.', 'A', 'recent', 'Nielsen', 'report', 'has', 'highlighted', 'that', "ITC's", 'new', 'FMCG', 'businesses', 'are', 'the', 'fastest', 'growing', 'among', 'the', 'top', 'consumer', 'goods', 'companies', 'operating', 'in', 'India.', 'ITC', 'takes', 'justifiable', 'pride', 'that,', 'along', 'with', 'generating', 'economic', 'value,', 'these', 'celebrated', 'Indian', 'brands', 'also', 'drive', 'the', 'creation', 'of', 'larger', 'societal', 'capital', 'through', 'the', 'virtuous', 'cycle', 'of', 'sustainable', 'and', 'inclusive', 'growth.', 'DI', 'WILLS', '*', ';', 'LOVE', 'DELIGHTFULLY', 'SOFT', 'SKIN?', 'aia', 'Ans', 'Source:', 'https://www.industrydocuments.ucsf.edu/docs/snbx0223']
|
||||
boxes = [[0, 45, 67, 80], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [372, 59, 407, 66], [74, 136, 161, 158], [175, 137, 306, 158], [318, 137, 363, 158], [374, 137, 472, 158], [483, 136, 529, 158], [540, 137, 593, 158], [608, 137, 717, 158], [73, 194, 100, 203], [106, 196, 177, 203], [183, 194, 227, 203], [233, 194, 259, 203], [265, 194, 344, 205], [74, 211, 104, 222], [109, 210, 141, 221], [147, 211, 169, 220], [175, 210, 223, 220], [229, 211, 259, 222], [265, 211, 329, 222], [334, 210, 352, 220], [74, 227, 127, 236], [133, 229, 180, 236], [187, 227, 221, 236], [226, 227, 264, 236], [270, 227, 320, 237], [327, 227, 349, 236], [74, 243, 161, 254], [166, 243, 249, 254], [254, 243, 281, 252], [286, 244, 342, 254], [74, 260, 112, 270], [119, 260, 145, 269], [151, 260, 174, 269], [179, 260, 217, 269], [222, 260, 249, 269], [254, 260, 285, 271], [290, 260, 335, 269], [340, 259, 359, 269], [74, 276, 95, 284], [101, 276, 156, 287], [164, 276, 198, 284], [203, 276, 244, 284], [251, 275, 285, 284], [291, 276, 340, 284], [74, 292, 129, 301], [135, 292, 185, 302], [192, 292, 242, 303], [248, 292, 261, 301], [267, 292, 312, 301], [74, 308, 195, 319], [75, 335, 82, 344], [88, 335, 98, 344], [105, 335, 138, 344], [144, 335, 214, 346], [220, 336, 233, 344], [239, 335, 256, 344], [262, 335, 283, 344], [290, 335, 309, 344], [316, 335, 320, 344], [74, 351, 119, 360], [126, 352, 170, 362], [176, 352, 186, 360], [192, 352, 214, 360], [220, 352, 276, 362], [282, 352, 326, 360], [333, 352, 349, 362], [74, 368, 89, 377], [95, 370, 124, 377], [129, 367, 175, 377], [181, 368, 266, 377], [272, 368, 283, 376], [289, 368, 333, 377], [74, 384, 126, 393], [134, 385, 175, 395], [181, 384, 206, 393], [212, 384, 292, 395], [298, 384, 325, 393], [330, 384, 366, 393], [74, 403, 103, 409], [109, 400, 154, 409], [161, 401, 241, 409], [247, 403, 269, 409], [275, 401, 296, 409], [302, 400, 349, 409], [74, 417, 131, 428], [137, 419, 186, 428], [192, 417, 214, 426], [219, 417, 242, 428], [248, 419, 319, 426], [74, 433, 119, 444], [125, 433, 204, 444], [210, 433, 278, 444], [285, 433, 295, 441], [302, 433, 340, 442], [75, 449, 98, 458], [104, 449, 142, 458], [146, 449, 215, 460], [221, 449, 258, 460], [263, 449, 293, 459], [300, 449, 339, 460], [74, 466, 101, 474], [108, 466, 185, 476], [191, 466, 261, 474], [267, 466, 309, 476], [315, 466, 354, 474], [74, 482, 151, 491], [158, 482, 201, 491], [208, 482, 258, 491], [263, 482, 292, 491], [298, 482, 333, 491], [338, 482, 360, 491], [74, 498, 131, 507], [137, 498, 150, 507], [156, 498, 197, 509], [202, 498, 257, 507], [263, 498, 310, 509], [74, 515, 128, 525], [134, 515, 156, 523], [161, 515, 218, 523], [223, 515, 261, 525], [267, 514, 280, 523], [74, 531, 156, 540], [162, 531, 188, 540], [195, 531, 257, 540], [263, 531, 315, 542], [871, 199, 878, 202], [883, 199, 908, 202], [894, 251, 904, 257], [841, 268, 841, 270], [784, 373, 811, 378], [816, 373, 896, 378], [784, 381, 811, 387], [815, 381, 847, 387], [645, 908, 670, 915], [692, 908, 712, 915], [220, 984, 285, 993], [293, 983, 779, 996]]
|
||||
# fmt: on
|
||||
text_list = []
|
||||
bbox_list = []
|
||||
for text, box in zip(words, boxes):
|
||||
if text == "":
|
||||
continue
|
||||
sub_tokens = tokenizer.tokenize(text)
|
||||
for sub_token in sub_tokens:
|
||||
text_list.append(sub_token)
|
||||
bbox_list.append(box)
|
||||
|
||||
input_ids = tokenizer.convert_tokens_to_ids(text_list)
|
||||
|
||||
input_ids = prompt_ids + input_ids
|
||||
bbox = [[0, 0, 0, 0]] * len(prompt_ids) + bbox_list
|
||||
|
||||
pixel_values = image_processor(image, return_tensors="pt").pixel_values
|
||||
original_pixel_values = original_transform(image, image_size=image_processor.size["height"]).unsqueeze(0)
|
||||
# verify pixel values
|
||||
assert torch.allclose(original_pixel_values, pixel_values)
|
||||
print("Pixel values are ok!")
|
||||
|
||||
return torch.tensor(input_ids).unsqueeze(0), torch.tensor(bbox).unsqueeze(0).float(), pixel_values
|
||||
|
||||
|
||||
def convert_udop_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False):
|
||||
# model_name to checkpoint_path
|
||||
name_to_checkpoint_path = {
|
||||
"udop-large": "/Users/nielsrogge/Documents/UDOP/udop-unimodel-large-224/pytorch_model.bin",
|
||||
"udop-large-512": "/Users/nielsrogge/Documents/UDOP/udop-unimodel-large-512/pytorch_model.bin",
|
||||
"udop-large-512-300k": "/Users/nielsrogge/Documents/UDOP/udop-unimodel-large-512-300k-steps/pytorch_model.bin",
|
||||
}
|
||||
|
||||
# load original state dict
|
||||
checkpoint_path = name_to_checkpoint_path[model_name]
|
||||
state_dict = torch.load(checkpoint_path, map_location="cpu")
|
||||
|
||||
print("Checkpoint path:", checkpoint_path)
|
||||
|
||||
# create HF model
|
||||
image_size = 512 if "512" in model_name else 224
|
||||
config = UdopConfig(decoder_start_token_id=0, image_size=image_size)
|
||||
model = UdopForConditionalGeneration(config)
|
||||
model.eval()
|
||||
|
||||
# rename keys
|
||||
state_dict = {k.replace("cell2dembedding", "cell_2d_embedding"): v for k, v in state_dict.items()}
|
||||
|
||||
# load weights
|
||||
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
|
||||
print("Missing keys:", missing_keys)
|
||||
print("Unexpected keys:", unexpected_keys)
|
||||
assert missing_keys == ["encoder.embed_patches.proj.weight", "encoder.embed_patches.proj.bias"]
|
||||
assert unexpected_keys == ["pos_embed"]
|
||||
|
||||
# prepare dummy inputs
|
||||
tokenizer = UdopTokenizer.from_pretrained("t5-base", legacy=True)
|
||||
size = {"height": image_size, "width": image_size}
|
||||
image_processor = LayoutLMv3ImageProcessor(
|
||||
image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD, size=size
|
||||
)
|
||||
processor = UdopProcessor(image_processor=image_processor, tokenizer=tokenizer)
|
||||
input_ids, bbox, image = prepare_dummy_inputs(tokenizer, image_processor)
|
||||
prompt = "Question answering. In which year is the report made?"
|
||||
encoding = processor(images=get_image(), text=prompt, return_tensors="pt")
|
||||
|
||||
input_ids = encoding.input_ids
|
||||
try:
|
||||
EXPECTED_INPUT_IDS = torch.tensor([[11860, 18243, 5, 86, 84, 215, 19, 8, 934, 263, 58, 1, 489, 27, 3838, 7363, 4083, 14536, 3430, 5686, 5911, 17161, 134, 2038, 27, 3838, 22, 7, 4688, 7, 10, 389, 18202, 21, 8, 11046, 37, 3733, 523, 11, 38, 2388, 1628, 3, 13133, 23334, 6, 8, 1656, 79, 3806, 21, 4040, 640, 27, 3838, 22, 7, 701, 16534, 6, 8, 3, 76, 2693, 18, 23015, 5644, 24, 380, 3, 6015, 6, 11, 8, 701, 24, 79, 482, 21, 3, 88, 684, 6, 43, 263, 27, 3838, 22, 7, 3635, 1157, 4089, 6, 2651, 12, 1547, 22, 7, 3265, 655, 5, 19, 27, 3838, 22, 7, 38, 2388, 257, 12, 36, 8, 465, 209, 13409, 12150, 1959, 16, 8, 684, 6, 6737, 57, 165, 126, 13409, 12150, 1623, 5, 71, 1100, 30298, 934, 65, 12566, 24, 27, 3838, 31, 7, 126, 13409, 12150, 1623, 33, 8, 10391, 1710, 859, 8, 420, 3733, 4968, 688, 2699, 16, 1547, 5, 27, 3838, 1217, 131, 99, 23, 179, 6064, 24, 6, 590, 28, 3, 11600, 1456, 701, 6, 175, 9443, 2557, 3635, 92, 1262, 8, 3409, 13, 2186, 3, 27908, 1784, 190, 8, 3, 5771, 17, 13281, 4005, 13, 5086, 11, 13066, 1170, 5, 10826, 16309, 134, 3, 2, 276, 26, 3, 55, 391, 13570, 5, 10315, 309, 3577, 19114, 371, 4254, 5121, 5055, 6245, 3, 10047, 3162, 58, 3, 9, 61, 1713, 2703, 476, 667, 25158, 301, 6058, 6038, 476, 3765, 9149, 10, 4893, 1303, 1986, 5, 13580, 7, 8224, 28244, 7, 5, 76, 75, 7, 89, 5, 15, 1259, 87, 7171, 7, 87, 7, 29, 115, 226, 4305, 2773, 1]]) # fmt: skip
|
||||
torch.testing.assert_close(EXPECTED_INPUT_IDS, input_ids)
|
||||
bbox = encoding.bbox.float()
|
||||
pixel_values = encoding.pixel_values
|
||||
except Exception:
|
||||
print("Input_ids don't match, preparing dummy inputs")
|
||||
input_ids, bbox, pixel_values = prepare_dummy_inputs(tokenizer, image_processor)
|
||||
|
||||
# Verify single forward pass
|
||||
print("Testing single forward pass..")
|
||||
with torch.no_grad():
|
||||
decoder_input_ids = torch.tensor([[101]])
|
||||
outputs = model(input_ids=input_ids, bbox=bbox, pixel_values=pixel_values, decoder_input_ids=decoder_input_ids)
|
||||
print("Shape of logits:", outputs.logits.shape)
|
||||
print("First values of logits:", outputs.logits[0, :3, :3])
|
||||
|
||||
# tensor([[-18.5262, 1.5087, -15.7051]]) on linux
|
||||
# tensor([[-19.4976, 0.8515, -17.1873]]) on mac
|
||||
try:
|
||||
assert torch.allclose(outputs.logits[0, :3, :3], torch.tensor([[-18.5262, 1.5087, -15.7051]]), atol=1e-4)
|
||||
print("Looks ok!")
|
||||
except Exception:
|
||||
print("logits don't match let's try to generate")
|
||||
|
||||
# Verify autoregressive decoding
|
||||
print("Testing generation...")
|
||||
model_kwargs = {"bbox": bbox, "pixel_values": pixel_values}
|
||||
outputs = model.generate(input_ids=input_ids, **model_kwargs, max_new_tokens=20)
|
||||
|
||||
print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
|
||||
# autoregressive decoding with original input data
|
||||
print("Testing generation with original inputs...")
|
||||
filepath = hf_hub_download(repo_id="nielsr/test-image", filename="input_ids_udop.pt", repo_type="dataset")
|
||||
input_ids = torch.load(filepath)
|
||||
filepath = hf_hub_download(repo_id="nielsr/test-image", filename="bbox_udop.pt", repo_type="dataset")
|
||||
bbox = torch.load(filepath)
|
||||
pixel_values_filename = "pixel_values_udop_512.pt" if "512" in model_name else "pixel_values_udop_224.pt"
|
||||
filepath = hf_hub_download(repo_id="nielsr/test-image", filename=pixel_values_filename, repo_type="dataset")
|
||||
pixel_values = torch.load(filepath)
|
||||
|
||||
print("Decoded input ids:", tokenizer.decode(input_ids[0], skip_special_tokens=True))
|
||||
print("Bbox shape:", bbox.shape)
|
||||
|
||||
model_kwargs = {"bbox": bbox, "pixel_values": pixel_values}
|
||||
outputs = model.generate(input_ids=input_ids, **model_kwargs, max_new_tokens=20)
|
||||
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
|
||||
print("Generated:", generated_text)
|
||||
|
||||
if pytorch_dump_folder_path is not None:
|
||||
model.save_pretrained(pytorch_dump_folder_path)
|
||||
tokenizer.save_pretrained(pytorch_dump_folder_path)
|
||||
|
||||
if push_to_hub:
|
||||
model.push_to_hub(f"microsoft/{model_name}")
|
||||
processor.push_to_hub(f"microsoft/{model_name}")
|
||||
# BIG note here: to save the fast tokenizer files in the repo on the hub, you need to do the following:
|
||||
# see https://discuss.huggingface.co/t/convert-slow-xlmrobertatokenizer-to-fast-one/20876
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
# Required parameters
|
||||
parser.add_argument(
|
||||
"--model_name",
|
||||
default="udop-large",
|
||||
type=str,
|
||||
choices=["udop-large", "udop-large-512", "udop-large-512-300k"],
|
||||
help=("Name of the UDOP model you'd like to convert."),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
convert_udop_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
|
2030
src/transformers/models/udop/modeling_udop.py
Normal file
2030
src/transformers/models/udop/modeling_udop.py
Normal file
File diff suppressed because it is too large
Load Diff
204
src/transformers/models/udop/processing_udop.py
Normal file
204
src/transformers/models/udop/processing_udop.py
Normal file
@ -0,0 +1,204 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2024 The HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Processor class for UDOP.
|
||||
"""
|
||||
|
||||
from typing import List, Optional, Union
|
||||
|
||||
from ...image_utils import ImageInput
|
||||
from ...processing_utils import ProcessorMixin
|
||||
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
|
||||
from ...utils import TensorType
|
||||
|
||||
|
||||
class UdopProcessor(ProcessorMixin):
|
||||
r"""
|
||||
Constructs a UDOP processor which combines a LayoutLMv3 image processor and a UDOP tokenizer into a single processor.
|
||||
|
||||
[`UdopProcessor`] offers all the functionalities you need to prepare data for the model.
|
||||
|
||||
It first uses [`LayoutLMv3ImageProcessor`] to resize, rescale and normalize document images, and optionally applies OCR
|
||||
to get words and normalized bounding boxes. These are then provided to [`UdopTokenizer`] or [`UdopTokenizerFast`],
|
||||
which turns the words and bounding boxes into token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`.
|
||||
Optionally, one can provide integer `word_labels`, which are turned into token-level `labels` for token
|
||||
classification tasks (such as FUNSD, CORD).
|
||||
|
||||
Additionally, it also supports passing `text_target` and `text_pair_target` to the tokenizer, which can be used to
|
||||
prepare labels for language modeling tasks.
|
||||
|
||||
Args:
|
||||
image_processor (`LayoutLMv3ImageProcessor`):
|
||||
An instance of [`LayoutLMv3ImageProcessor`]. The image processor is a required input.
|
||||
tokenizer (`UdopTokenizer` or `UdopTokenizerFast`):
|
||||
An instance of [`UdopTokenizer`] or [`UdopTokenizerFast`]. The tokenizer is a required input.
|
||||
"""
|
||||
|
||||
attributes = ["image_processor", "tokenizer"]
|
||||
image_processor_class = "LayoutLMv3ImageProcessor"
|
||||
tokenizer_class = ("UdopTokenizer", "UdopTokenizerFast")
|
||||
|
||||
def __init__(self, image_processor, tokenizer):
|
||||
super().__init__(image_processor, tokenizer)
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
images: Optional[ImageInput] = None,
|
||||
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
|
||||
text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
|
||||
boxes: Union[List[List[int]], List[List[List[int]]]] = None,
|
||||
word_labels: Optional[Union[List[int], List[List[int]]]] = None,
|
||||
text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
|
||||
text_pair_target: Optional[
|
||||
Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]
|
||||
] = None,
|
||||
add_special_tokens: bool = True,
|
||||
padding: Union[bool, str, PaddingStrategy] = False,
|
||||
truncation: Union[bool, str, TruncationStrategy] = False,
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
return_overflowing_tokens: bool = False,
|
||||
return_special_tokens_mask: bool = False,
|
||||
return_offsets_mapping: bool = False,
|
||||
return_length: bool = False,
|
||||
verbose: bool = True,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
) -> BatchEncoding:
|
||||
"""
|
||||
This method first forwards the `images` argument to [`~UdopImageProcessor.__call__`]. In case
|
||||
[`UdopImageProcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and
|
||||
bounding boxes along with the additional arguments to [`~UdopTokenizer.__call__`] and returns the output,
|
||||
together with the prepared `pixel_values`. In case [`UdopImageProcessor`] was initialized with `apply_ocr` set
|
||||
to `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the
|
||||
additional arguments to [`~UdopTokenizer.__call__`] and returns the output, together with the prepared
|
||||
`pixel_values`.
|
||||
|
||||
Alternatively, one can pass `text_target` and `text_pair_target` to prepare the targets of UDOP.
|
||||
|
||||
Please refer to the docstring of the above two methods for more information.
|
||||
"""
|
||||
# verify input
|
||||
if self.image_processor.apply_ocr and (boxes is not None):
|
||||
raise ValueError(
|
||||
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True."
|
||||
)
|
||||
|
||||
if self.image_processor.apply_ocr and (word_labels is not None):
|
||||
raise ValueError(
|
||||
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True."
|
||||
)
|
||||
|
||||
if return_overflowing_tokens is True and return_offsets_mapping is False:
|
||||
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping.")
|
||||
|
||||
if text_target is not None:
|
||||
# use the processor to prepare the targets of UDOP
|
||||
return self.tokenizer(
|
||||
text_target=text_target,
|
||||
text_pair_target=text_pair_target,
|
||||
add_special_tokens=add_special_tokens,
|
||||
padding=padding,
|
||||
truncation=truncation,
|
||||
max_length=max_length,
|
||||
stride=stride,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_token_type_ids=return_token_type_ids,
|
||||
return_attention_mask=return_attention_mask,
|
||||
return_overflowing_tokens=return_overflowing_tokens,
|
||||
return_special_tokens_mask=return_special_tokens_mask,
|
||||
return_offsets_mapping=return_offsets_mapping,
|
||||
return_length=return_length,
|
||||
verbose=verbose,
|
||||
return_tensors=return_tensors,
|
||||
)
|
||||
|
||||
else:
|
||||
# use the processor to prepare the inputs of UDOP
|
||||
# first, apply the image processor
|
||||
features = self.image_processor(images=images, return_tensors=return_tensors)
|
||||
|
||||
# second, apply the tokenizer
|
||||
if text is not None and self.image_processor.apply_ocr and text_pair is None:
|
||||
if isinstance(text, str):
|
||||
text = [text] # add batch dimension (as the image processor always adds a batch dimension)
|
||||
text_pair = features["words"]
|
||||
|
||||
encoded_inputs = self.tokenizer(
|
||||
text=text if text is not None else features["words"],
|
||||
text_pair=text_pair if text_pair is not None else None,
|
||||
boxes=boxes if boxes is not None else features["boxes"],
|
||||
word_labels=word_labels,
|
||||
add_special_tokens=add_special_tokens,
|
||||
padding=padding,
|
||||
truncation=truncation,
|
||||
max_length=max_length,
|
||||
stride=stride,
|
||||
pad_to_multiple_of=pad_to_multiple_of,
|
||||
return_token_type_ids=return_token_type_ids,
|
||||
return_attention_mask=return_attention_mask,
|
||||
return_overflowing_tokens=return_overflowing_tokens,
|
||||
return_special_tokens_mask=return_special_tokens_mask,
|
||||
return_offsets_mapping=return_offsets_mapping,
|
||||
return_length=return_length,
|
||||
verbose=verbose,
|
||||
return_tensors=return_tensors,
|
||||
)
|
||||
|
||||
# add pixel values
|
||||
pixel_values = features.pop("pixel_values")
|
||||
if return_overflowing_tokens is True:
|
||||
pixel_values = self.get_overflowing_images(pixel_values, encoded_inputs["overflow_to_sample_mapping"])
|
||||
encoded_inputs["pixel_values"] = pixel_values
|
||||
|
||||
return encoded_inputs
|
||||
|
||||
# Copied from transformers.models.layoutlmv3.processing_layoutlmv3.LayoutLMv3Processor.get_overflowing_images
|
||||
def get_overflowing_images(self, images, overflow_to_sample_mapping):
|
||||
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
|
||||
images_with_overflow = []
|
||||
for sample_idx in overflow_to_sample_mapping:
|
||||
images_with_overflow.append(images[sample_idx])
|
||||
|
||||
if len(images_with_overflow) != len(overflow_to_sample_mapping):
|
||||
raise ValueError(
|
||||
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
|
||||
f" {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}"
|
||||
)
|
||||
|
||||
return images_with_overflow
|
||||
|
||||
# Copied from transformers.models.layoutlmv3.processing_layoutlmv3.LayoutLMv3Processor.batch_decode
|
||||
def batch_decode(self, *args, **kwargs):
|
||||
"""
|
||||
This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
|
||||
refer to the docstring of this method for more information.
|
||||
"""
|
||||
return self.tokenizer.batch_decode(*args, **kwargs)
|
||||
|
||||
# Copied from transformers.models.layoutlmv3.processing_layoutlmv3.LayoutLMv3Processor.decode
|
||||
def decode(self, *args, **kwargs):
|
||||
"""
|
||||
This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer
|
||||
to the docstring of this method for more information.
|
||||
"""
|
||||
return self.tokenizer.decode(*args, **kwargs)
|
||||
|
||||
@property
|
||||
# Copied from transformers.models.layoutlmv3.processing_layoutlmv3.LayoutLMv3Processor.model_input_names
|
||||
def model_input_names(self):
|
||||
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
|
1483
src/transformers/models/udop/tokenization_udop.py
Normal file
1483
src/transformers/models/udop/tokenization_udop.py
Normal file
File diff suppressed because it is too large
Load Diff
1012
src/transformers/models/udop/tokenization_udop_fast.py
Normal file
1012
src/transformers/models/udop/tokenization_udop_fast.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -8341,6 +8341,37 @@ class TvpPreTrainedModel(metaclass=DummyObject):
|
||||
requires_backends(self, ["torch"])
|
||||
|
||||
|
||||
UDOP_PRETRAINED_MODEL_ARCHIVE_LIST = None
|
||||
|
||||
|
||||
class UdopEncoderModel(metaclass=DummyObject):
|
||||
_backends = ["torch"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
requires_backends(self, ["torch"])
|
||||
|
||||
|
||||
class UdopForConditionalGeneration(metaclass=DummyObject):
|
||||
_backends = ["torch"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
requires_backends(self, ["torch"])
|
||||
|
||||
|
||||
class UdopModel(metaclass=DummyObject):
|
||||
_backends = ["torch"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
requires_backends(self, ["torch"])
|
||||
|
||||
|
||||
class UdopPreTrainedModel(metaclass=DummyObject):
|
||||
_backends = ["torch"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
requires_backends(self, ["torch"])
|
||||
|
||||
|
||||
class UMT5EncoderModel(metaclass=DummyObject):
|
||||
_backends = ["torch"]
|
||||
|
||||
|
@ -219,6 +219,13 @@ class T5Tokenizer(metaclass=DummyObject):
|
||||
requires_backends(self, ["sentencepiece"])
|
||||
|
||||
|
||||
class UdopTokenizer(metaclass=DummyObject):
|
||||
_backends = ["sentencepiece"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
requires_backends(self, ["sentencepiece"])
|
||||
|
||||
|
||||
class XGLMTokenizer(metaclass=DummyObject):
|
||||
_backends = ["sentencepiece"]
|
||||
|
||||
|
@ -408,6 +408,13 @@ class T5TokenizerFast(metaclass=DummyObject):
|
||||
requires_backends(self, ["tokenizers"])
|
||||
|
||||
|
||||
class UdopTokenizerFast(metaclass=DummyObject):
|
||||
_backends = ["tokenizers"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
requires_backends(self, ["tokenizers"])
|
||||
|
||||
|
||||
class WhisperTokenizerFast(metaclass=DummyObject):
|
||||
_backends = ["tokenizers"]
|
||||
|
||||
|
0
tests/models/udop/__init__.py
Normal file
0
tests/models/udop/__init__.py
Normal file
567
tests/models/udop/test_modeling_udop.py
Normal file
567
tests/models/udop/test_modeling_udop.py
Normal file
@ -0,0 +1,567 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2024 The HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
import inspect
|
||||
import unittest
|
||||
|
||||
from huggingface_hub import hf_hub_download
|
||||
|
||||
from transformers import UdopConfig, is_torch_available, is_vision_available
|
||||
from transformers.testing_utils import (
|
||||
require_sentencepiece,
|
||||
require_tokenizers,
|
||||
require_torch,
|
||||
require_vision,
|
||||
slow,
|
||||
torch_device,
|
||||
)
|
||||
from transformers.utils import cached_property
|
||||
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import ModelTesterMixin, ids_tensor
|
||||
from ...test_pipeline_mixin import PipelineTesterMixin
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
from transformers import UdopEncoderModel, UdopForConditionalGeneration, UdopModel, UdopProcessor
|
||||
from transformers.models.udop.modeling_udop import UDOP_PRETRAINED_MODEL_ARCHIVE_LIST
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
||||
|
||||
class UdopModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
vocab_size=99,
|
||||
batch_size=13,
|
||||
encoder_seq_length=7,
|
||||
decoder_seq_length=9,
|
||||
# For common tests
|
||||
is_training=True,
|
||||
use_attention_mask=True,
|
||||
use_labels=True,
|
||||
hidden_size=32,
|
||||
num_hidden_layers=5,
|
||||
num_attention_heads=4,
|
||||
d_ff=37,
|
||||
relative_attention_num_buckets=32,
|
||||
dropout_rate=0.1,
|
||||
initializer_factor=0.002,
|
||||
eos_token_id=1,
|
||||
pad_token_id=0,
|
||||
scope=None,
|
||||
decoder_layers=None,
|
||||
range_bbox=1000,
|
||||
decoder_start_token_id=0,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.encoder_seq_length = encoder_seq_length
|
||||
self.decoder_seq_length = decoder_seq_length
|
||||
# For common tests
|
||||
self.seq_length = self.decoder_seq_length
|
||||
self.is_training = is_training
|
||||
self.use_attention_mask = use_attention_mask
|
||||
self.use_labels = use_labels
|
||||
self.vocab_size = vocab_size
|
||||
self.hidden_size = hidden_size
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.d_ff = d_ff
|
||||
self.relative_attention_num_buckets = relative_attention_num_buckets
|
||||
self.dropout_rate = dropout_rate
|
||||
self.initializer_factor = initializer_factor
|
||||
self.eos_token_id = eos_token_id
|
||||
self.pad_token_id = pad_token_id
|
||||
self.scope = None
|
||||
self.decoder_layers = decoder_layers
|
||||
self.range_bbox = range_bbox
|
||||
self.decoder_start_token_id = decoder_start_token_id
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
|
||||
bbox = ids_tensor([self.batch_size, self.encoder_seq_length, 4], self.range_bbox).float()
|
||||
# Ensure that bbox is legal
|
||||
for i in range(bbox.shape[0]):
|
||||
for j in range(bbox.shape[1]):
|
||||
if bbox[i, j, 3] < bbox[i, j, 1]:
|
||||
t = bbox[i, j, 3]
|
||||
bbox[i, j, 3] = bbox[i, j, 1]
|
||||
bbox[i, j, 1] = t
|
||||
if bbox[i, j, 2] < bbox[i, j, 0]:
|
||||
t = bbox[i, j, 2]
|
||||
bbox[i, j, 2] = bbox[i, j, 0]
|
||||
bbox[i, j, 0] = t
|
||||
decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
|
||||
|
||||
attention_mask = None
|
||||
decoder_attention_mask = None
|
||||
if self.use_attention_mask:
|
||||
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
|
||||
decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
|
||||
|
||||
lm_labels = None
|
||||
if self.use_labels:
|
||||
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
|
||||
|
||||
config = self.get_config()
|
||||
|
||||
return (
|
||||
config,
|
||||
input_ids,
|
||||
bbox,
|
||||
decoder_input_ids,
|
||||
attention_mask,
|
||||
decoder_attention_mask,
|
||||
lm_labels,
|
||||
)
|
||||
|
||||
def get_config(self):
|
||||
return UdopConfig(
|
||||
vocab_size=self.vocab_size,
|
||||
d_model=self.hidden_size,
|
||||
d_ff=self.d_ff,
|
||||
d_kv=self.hidden_size // self.num_attention_heads,
|
||||
num_layers=self.num_hidden_layers,
|
||||
num_decoder_layers=self.decoder_layers,
|
||||
num_heads=self.num_attention_heads,
|
||||
relative_attention_num_buckets=self.relative_attention_num_buckets,
|
||||
dropout_rate=self.dropout_rate,
|
||||
initializer_factor=self.initializer_factor,
|
||||
eos_token_id=self.eos_token_id,
|
||||
bos_token_id=self.pad_token_id,
|
||||
pad_token_id=self.pad_token_id,
|
||||
decoder_start_token_id=self.decoder_start_token_id,
|
||||
)
|
||||
|
||||
def create_and_check_model(
|
||||
self,
|
||||
config,
|
||||
input_ids,
|
||||
bbox,
|
||||
decoder_input_ids,
|
||||
attention_mask,
|
||||
decoder_attention_mask,
|
||||
lm_labels,
|
||||
):
|
||||
model = UdopModel(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
result = model(
|
||||
input_ids=input_ids,
|
||||
bbox=bbox,
|
||||
decoder_input_ids=decoder_input_ids,
|
||||
attention_mask=attention_mask,
|
||||
decoder_attention_mask=decoder_attention_mask,
|
||||
)
|
||||
result = model(input_ids=input_ids, bbox=bbox, decoder_input_ids=decoder_input_ids)
|
||||
decoder_output = result.last_hidden_state
|
||||
decoder_past = result.past_key_values
|
||||
encoder_output = result.encoder_last_hidden_state
|
||||
|
||||
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size))
|
||||
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size))
|
||||
# There should be `num_layers` key value embeddings stored in decoder_past
|
||||
self.parent.assertEqual(len(decoder_past), config.num_layers)
|
||||
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
|
||||
self.parent.assertEqual(len(decoder_past[0]), 4)
|
||||
|
||||
def create_and_check_with_lm_head(
|
||||
self,
|
||||
config,
|
||||
input_ids,
|
||||
bbox,
|
||||
decoder_input_ids,
|
||||
attention_mask,
|
||||
decoder_attention_mask,
|
||||
lm_labels,
|
||||
):
|
||||
model = UdopForConditionalGeneration(config=config).to(torch_device).eval()
|
||||
outputs = model(
|
||||
input_ids=input_ids,
|
||||
bbox=bbox,
|
||||
decoder_input_ids=decoder_input_ids,
|
||||
decoder_attention_mask=decoder_attention_mask,
|
||||
labels=lm_labels,
|
||||
)
|
||||
self.parent.assertEqual(len(outputs), 4)
|
||||
self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size))
|
||||
self.parent.assertEqual(outputs["loss"].size(), ())
|
||||
|
||||
def create_and_check_generate_with_past_key_values(
|
||||
self,
|
||||
config,
|
||||
input_ids,
|
||||
bbox,
|
||||
decoder_input_ids,
|
||||
attention_mask,
|
||||
decoder_attention_mask,
|
||||
lm_labels,
|
||||
):
|
||||
model = UdopForConditionalGeneration(config=config).to(torch_device).eval()
|
||||
torch.manual_seed(0)
|
||||
output_without_past_cache = model.generate(
|
||||
input_ids[:1], bbox=bbox[:1, :, :], num_beams=2, max_length=5, do_sample=True, use_cache=False
|
||||
)
|
||||
torch.manual_seed(0)
|
||||
output_with_past_cache = model.generate(
|
||||
input_ids[:1], bbox=bbox[:1, :, :], num_beams=2, max_length=5, do_sample=True
|
||||
)
|
||||
self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache))
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
(
|
||||
config,
|
||||
input_ids,
|
||||
bbox,
|
||||
decoder_input_ids,
|
||||
attention_mask,
|
||||
decoder_attention_mask,
|
||||
lm_labels,
|
||||
) = config_and_inputs
|
||||
|
||||
inputs_dict = {
|
||||
"input_ids": input_ids,
|
||||
"attention_mask": attention_mask,
|
||||
"bbox": bbox,
|
||||
"decoder_input_ids": decoder_input_ids,
|
||||
"decoder_attention_mask": decoder_attention_mask,
|
||||
"use_cache": False,
|
||||
}
|
||||
return config, inputs_dict
|
||||
|
||||
|
||||
@require_torch
|
||||
class UdopModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
all_model_classes = (
|
||||
(
|
||||
UdopModel,
|
||||
UdopForConditionalGeneration,
|
||||
)
|
||||
if is_torch_available()
|
||||
else ()
|
||||
)
|
||||
all_generative_model_classes = (UdopForConditionalGeneration,) if is_torch_available() else ()
|
||||
pipeline_model_mapping = {"feature-extraction": UdopModel} if is_torch_available() else {}
|
||||
fx_compatible = False
|
||||
test_pruning = False
|
||||
test_torchscript = False
|
||||
test_head_masking = False
|
||||
test_resize_embeddings = True
|
||||
test_model_parallel = False
|
||||
is_encoder_decoder = True
|
||||
# The small UDOP model needs higher percentages for CPU/MP tests
|
||||
model_split_percents = [0.8, 0.9]
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = UdopModelTester(self)
|
||||
self.config_tester = ConfigTester(self, config_class=UdopConfig, d_model=37)
|
||||
|
||||
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
|
||||
inputs_dict = copy.deepcopy(inputs_dict)
|
||||
if model_class.__name__ == "UdopForConditionalGeneration":
|
||||
if return_labels:
|
||||
inputs_dict["labels"] = torch.zeros(
|
||||
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
|
||||
)
|
||||
|
||||
return inputs_dict
|
||||
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
def test_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_model(*config_and_inputs)
|
||||
|
||||
def test_with_lm_head(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_with_lm_head(*config_and_inputs)
|
||||
|
||||
def test_generate_with_past_key_values(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs)
|
||||
|
||||
@unittest.skipIf(torch_device == "cpu", "Cant do half precision")
|
||||
def test_model_fp16_forward(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
|
||||
|
||||
@unittest.skip("Gradient checkpointing is not supported by this model")
|
||||
def test_training_gradient_checkpointing(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(
|
||||
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
|
||||
)
|
||||
def test_training_gradient_checkpointing_use_reentrant(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(
|
||||
reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
|
||||
)
|
||||
def test_training_gradient_checkpointing_use_reentrant_false(self):
|
||||
pass
|
||||
|
||||
def test_forward_signature(self):
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
signature = inspect.signature(model.forward)
|
||||
# signature.parameters is an OrderedDict => so arg_names order is deterministic
|
||||
arg_names = sorted([*signature.parameters.keys()])
|
||||
|
||||
expected_arg_names = [
|
||||
"attention_mask",
|
||||
"bbox",
|
||||
"cross_attn_head_mask",
|
||||
"decoder_attention_mask",
|
||||
"decoder_head_mask",
|
||||
"decoder_input_ids",
|
||||
"decoder_inputs_embeds",
|
||||
"encoder_outputs",
|
||||
"head_mask",
|
||||
"input_ids",
|
||||
"inputs_embeds",
|
||||
]
|
||||
if model_class in self.all_generative_model_classes:
|
||||
expected_arg_names.append(
|
||||
"labels",
|
||||
)
|
||||
expected_arg_names = sorted(expected_arg_names)
|
||||
self.assertListEqual(sorted(arg_names[: len(expected_arg_names)]), expected_arg_names)
|
||||
|
||||
@unittest.skip(
|
||||
"Not currently compatible. Fails with - NotImplementedError: Cannot copy out of meta tensor; no data!"
|
||||
)
|
||||
def test_save_load_low_cpu_mem_usage(self):
|
||||
pass
|
||||
|
||||
@slow
|
||||
def test_model_from_pretrained(self):
|
||||
for model_name in UDOP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
|
||||
model = UdopForConditionalGeneration.from_pretrained(model_name)
|
||||
self.assertIsNotNone(model)
|
||||
|
||||
|
||||
class UdopEncoderOnlyModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
vocab_size=99,
|
||||
batch_size=13,
|
||||
seq_length=7,
|
||||
# For common tests
|
||||
is_training=False,
|
||||
use_attention_mask=True,
|
||||
hidden_size=32,
|
||||
num_hidden_layers=5,
|
||||
decoder_layers=2,
|
||||
num_attention_heads=4,
|
||||
d_ff=37,
|
||||
relative_attention_num_buckets=32,
|
||||
dropout_rate=0.1,
|
||||
initializer_factor=0.002,
|
||||
eos_token_id=1,
|
||||
pad_token_id=0,
|
||||
scope=None,
|
||||
range_bbox=1000,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
# For common tests
|
||||
self.seq_length = seq_length
|
||||
self.is_training = is_training
|
||||
self.use_attention_mask = use_attention_mask
|
||||
self.vocab_size = vocab_size
|
||||
self.hidden_size = hidden_size
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.decoder_layers = decoder_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.d_ff = d_ff
|
||||
self.relative_attention_num_buckets = relative_attention_num_buckets
|
||||
self.dropout_rate = dropout_rate
|
||||
self.initializer_factor = initializer_factor
|
||||
self.eos_token_id = eos_token_id
|
||||
self.pad_token_id = pad_token_id
|
||||
self.scope = None
|
||||
self.range_bbox = range_bbox
|
||||
|
||||
def get_config(self):
|
||||
return UdopConfig(
|
||||
vocab_size=self.vocab_size,
|
||||
d_model=self.hidden_size,
|
||||
d_ff=self.d_ff,
|
||||
d_kv=self.hidden_size // self.num_attention_heads,
|
||||
num_layers=self.num_hidden_layers,
|
||||
num_decoder_layers=self.decoder_layers,
|
||||
num_heads=self.num_attention_heads,
|
||||
relative_attention_num_buckets=self.relative_attention_num_buckets,
|
||||
dropout_rate=self.dropout_rate,
|
||||
initializer_factor=self.initializer_factor,
|
||||
eos_token_id=self.eos_token_id,
|
||||
bos_token_id=self.pad_token_id,
|
||||
pad_token_id=self.pad_token_id,
|
||||
is_encoder_decoder=False,
|
||||
)
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
|
||||
bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox).float()
|
||||
# Ensure that bbox is legal
|
||||
for i in range(bbox.shape[0]):
|
||||
for j in range(bbox.shape[1]):
|
||||
if bbox[i, j, 3] < bbox[i, j, 1]:
|
||||
t = bbox[i, j, 3]
|
||||
bbox[i, j, 3] = bbox[i, j, 1]
|
||||
bbox[i, j, 1] = t
|
||||
if bbox[i, j, 2] < bbox[i, j, 0]:
|
||||
t = bbox[i, j, 2]
|
||||
bbox[i, j, 2] = bbox[i, j, 0]
|
||||
bbox[i, j, 0] = t
|
||||
|
||||
attention_mask = None
|
||||
if self.use_attention_mask:
|
||||
attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
|
||||
|
||||
config = self.get_config()
|
||||
|
||||
return (
|
||||
config,
|
||||
input_ids,
|
||||
bbox,
|
||||
attention_mask,
|
||||
)
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
(
|
||||
config,
|
||||
input_ids,
|
||||
bbox,
|
||||
attention_mask,
|
||||
) = config_and_inputs
|
||||
|
||||
inputs_dict = {
|
||||
"input_ids": input_ids,
|
||||
"bbox": bbox,
|
||||
"attention_mask": attention_mask,
|
||||
}
|
||||
return config, inputs_dict
|
||||
|
||||
def create_and_check_model(
|
||||
self,
|
||||
config,
|
||||
input_ids,
|
||||
bbox,
|
||||
attention_mask,
|
||||
):
|
||||
model = UdopEncoderModel(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
result = model(
|
||||
input_ids=input_ids,
|
||||
bbox=bbox,
|
||||
attention_mask=attention_mask,
|
||||
)
|
||||
encoder_output = result.last_hidden_state
|
||||
|
||||
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.seq_length, self.hidden_size))
|
||||
|
||||
def create_and_check_model_fp16_forward(
|
||||
self,
|
||||
config,
|
||||
input_ids,
|
||||
attention_mask,
|
||||
):
|
||||
model = UdopEncoderModel(config=config).to(torch_device).half().eval()
|
||||
output = model(input_ids, attention_mask=attention_mask)["last_hidden_state"]
|
||||
self.parent.assertFalse(torch.isnan(output).any().item())
|
||||
|
||||
|
||||
class UdopEncoderOnlyModelTest(ModelTesterMixin, unittest.TestCase):
|
||||
all_model_classes = (UdopEncoderModel,) if is_torch_available() else ()
|
||||
test_pruning = False
|
||||
test_torchscript = False
|
||||
test_head_masking = False
|
||||
test_resize_embeddings = False
|
||||
test_model_parallel = True
|
||||
all_parallelizable_model_classes = (UdopEncoderModel,) if is_torch_available() else ()
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = UdopEncoderOnlyModelTester(self)
|
||||
self.config_tester = ConfigTester(self, config_class=UdopConfig, d_model=37)
|
||||
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
def test_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_model(*config_and_inputs)
|
||||
|
||||
@unittest.skipIf(torch_device == "cpu", "Cant do half precision")
|
||||
def test_model_fp16_forward(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
|
||||
|
||||
@unittest.skip(
|
||||
"Not currently compatible. Fails with - NotImplementedError: Cannot copy out of meta tensor; no data!"
|
||||
)
|
||||
def test_save_load_low_cpu_mem_usage(self):
|
||||
pass
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_sentencepiece
|
||||
@require_tokenizers
|
||||
@require_vision
|
||||
@slow
|
||||
class UdopModelIntegrationTests(unittest.TestCase):
|
||||
@cached_property
|
||||
def image(self):
|
||||
filepath = hf_hub_download(
|
||||
repo_id="hf-internal-testing/fixtures_docvqa", filename="document_2.png", repo_type="dataset"
|
||||
)
|
||||
image = Image.open(filepath).convert("RGB")
|
||||
|
||||
return image
|
||||
|
||||
@cached_property
|
||||
def processor(self):
|
||||
return UdopProcessor.from_pretrained("microsoft/udop-large")
|
||||
|
||||
@cached_property
|
||||
def model(self):
|
||||
return UdopForConditionalGeneration.from_pretrained("microsoft/udop-large").to(torch_device)
|
||||
|
||||
def test_conditional_generation(self):
|
||||
processor = self.processor
|
||||
model = self.model
|
||||
|
||||
prompt = "Question answering. In which year is the report made?"
|
||||
encoding = processor(images=self.image, text=prompt, return_tensors="pt")
|
||||
|
||||
predicted_ids = model.generate(**encoding)
|
||||
|
||||
predicted_text = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
|
||||
self.assertEquals(predicted_text, "2013")
|
508
tests/models/udop/test_processor_udop.py
Normal file
508
tests/models/udop/test_processor_udop.py
Normal file
@ -0,0 +1,508 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
from typing import List
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers import PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast
|
||||
from transformers.models.udop import UdopTokenizer, UdopTokenizerFast
|
||||
from transformers.testing_utils import (
|
||||
require_pytesseract,
|
||||
require_sentencepiece,
|
||||
require_tokenizers,
|
||||
require_torch,
|
||||
slow,
|
||||
)
|
||||
from transformers.utils import FEATURE_EXTRACTOR_NAME, cached_property, is_pytesseract_available, is_torch_available
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
|
||||
if is_pytesseract_available():
|
||||
from PIL import Image
|
||||
|
||||
from transformers import LayoutLMv3ImageProcessor, UdopProcessor
|
||||
|
||||
|
||||
@require_pytesseract
|
||||
@require_sentencepiece
|
||||
@require_tokenizers
|
||||
class UdopProcessorTest(unittest.TestCase):
|
||||
tokenizer_class = UdopTokenizer
|
||||
rust_tokenizer_class = UdopTokenizerFast
|
||||
maxDiff = None
|
||||
|
||||
def setUp(self):
|
||||
image_processor_map = {
|
||||
"do_resize": True,
|
||||
"size": 224,
|
||||
"apply_ocr": True,
|
||||
}
|
||||
|
||||
self.tmpdirname = tempfile.mkdtemp()
|
||||
self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME)
|
||||
with open(self.feature_extraction_file, "w", encoding="utf-8") as fp:
|
||||
fp.write(json.dumps(image_processor_map) + "\n")
|
||||
|
||||
self.tokenizer_pretrained_name = "microsoft/udop-large"
|
||||
|
||||
def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer:
|
||||
return self.tokenizer_class.from_pretrained(self.tokenizer_pretrained_name, **kwargs)
|
||||
|
||||
def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast:
|
||||
return self.rust_tokenizer_class.from_pretrained(self.tokenizer_pretrained_name, **kwargs)
|
||||
|
||||
def get_tokenizers(self, **kwargs) -> List[PreTrainedTokenizerBase]:
|
||||
return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)]
|
||||
|
||||
def get_image_processor(self, **kwargs):
|
||||
return LayoutLMv3ImageProcessor.from_pretrained(self.tmpdirname, **kwargs)
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.tmpdirname)
|
||||
|
||||
def prepare_image_inputs(self):
|
||||
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
|
||||
or a list of PyTorch tensors if one specifies torchify=True.
|
||||
"""
|
||||
|
||||
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
|
||||
|
||||
image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
|
||||
|
||||
return image_inputs
|
||||
|
||||
def test_save_load_pretrained_default(self):
|
||||
image_processor = self.get_image_processor()
|
||||
tokenizers = self.get_tokenizers()
|
||||
for tokenizer in tokenizers:
|
||||
processor = UdopProcessor(image_processor=image_processor, tokenizer=tokenizer)
|
||||
|
||||
processor.save_pretrained(self.tmpdirname)
|
||||
processor = UdopProcessor.from_pretrained(self.tmpdirname)
|
||||
|
||||
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
|
||||
self.assertIsInstance(processor.tokenizer, (UdopTokenizer, UdopTokenizerFast))
|
||||
|
||||
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string())
|
||||
self.assertIsInstance(processor.image_processor, LayoutLMv3ImageProcessor)
|
||||
|
||||
def test_save_load_pretrained_additional_features(self):
|
||||
processor = UdopProcessor(image_processor=self.get_image_processor(), tokenizer=self.get_tokenizer())
|
||||
processor.save_pretrained(self.tmpdirname)
|
||||
|
||||
# slow tokenizer
|
||||
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
|
||||
image_processor_add_kwargs = self.get_image_processor(do_resize=False, size=30)
|
||||
|
||||
processor = UdopProcessor.from_pretrained(
|
||||
self.tmpdirname,
|
||||
use_fast=False,
|
||||
bos_token="(BOS)",
|
||||
eos_token="(EOS)",
|
||||
do_resize=False,
|
||||
size=30,
|
||||
)
|
||||
|
||||
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
|
||||
self.assertIsInstance(processor.tokenizer, UdopTokenizer)
|
||||
|
||||
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
|
||||
self.assertIsInstance(processor.image_processor, LayoutLMv3ImageProcessor)
|
||||
|
||||
# fast tokenizer
|
||||
tokenizer_add_kwargs = self.get_rust_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
|
||||
image_processor_add_kwargs = self.get_image_processor(do_resize=False, size=30)
|
||||
|
||||
processor = UdopProcessor.from_pretrained(
|
||||
self.tmpdirname, use_xlm=True, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30
|
||||
)
|
||||
|
||||
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
|
||||
self.assertIsInstance(processor.tokenizer, UdopTokenizerFast)
|
||||
|
||||
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
|
||||
self.assertIsInstance(processor.image_processor, LayoutLMv3ImageProcessor)
|
||||
|
||||
def test_model_input_names(self):
|
||||
image_processor = self.get_image_processor()
|
||||
tokenizer = self.get_tokenizer()
|
||||
|
||||
processor = UdopProcessor(tokenizer=tokenizer, image_processor=image_processor)
|
||||
|
||||
input_str = "lower newer"
|
||||
image_input = self.prepare_image_inputs()
|
||||
|
||||
inputs = processor(text=input_str, images=image_input)
|
||||
|
||||
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
|
||||
|
||||
def test_text_target(self):
|
||||
image_processor = self.get_image_processor()
|
||||
tokenizer = self.get_tokenizer()
|
||||
|
||||
processor = UdopProcessor(tokenizer=tokenizer, image_processor=image_processor)
|
||||
|
||||
text = "hello world"
|
||||
expected_decoding = "hello world</s>"
|
||||
|
||||
encoding_processor = processor(text_target=text)
|
||||
encoding_tokenizer = tokenizer(text_target=text)
|
||||
|
||||
self.assertListEqual(encoding_processor["input_ids"], [21820, 296, 1])
|
||||
self.assertListEqual(encoding_processor["attention_mask"], [1, 1, 1])
|
||||
self.assertDictEqual(dict(encoding_processor), dict(encoding_tokenizer))
|
||||
self.assertEqual(tokenizer.decode(encoding_processor["input_ids"]), expected_decoding)
|
||||
|
||||
@slow
|
||||
def test_overflowing_tokens(self):
|
||||
# In the case of overflowing tokens, test that we still have 1-to-1 mapping between the images and input_ids (sequences that are too long are broken down into multiple sequences).
|
||||
|
||||
from datasets import load_dataset
|
||||
|
||||
# set up
|
||||
datasets = load_dataset("nielsr/funsd")
|
||||
processor = UdopProcessor.from_pretrained("microsoft/udop-large", apply_ocr=False)
|
||||
|
||||
def preprocess_data(examples):
|
||||
images = [Image.open(path).convert("RGB") for path in examples["image_path"]]
|
||||
words = examples["words"]
|
||||
boxes = examples["bboxes"]
|
||||
word_labels = examples["ner_tags"]
|
||||
encoded_inputs = processor(
|
||||
images,
|
||||
words,
|
||||
boxes=boxes,
|
||||
word_labels=word_labels,
|
||||
max_length=512,
|
||||
padding="max_length",
|
||||
truncation=True,
|
||||
return_overflowing_tokens=True,
|
||||
stride=50,
|
||||
return_offsets_mapping=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
return encoded_inputs
|
||||
|
||||
train_data = preprocess_data(datasets["train"])
|
||||
|
||||
self.assertEqual(len(train_data["pixel_values"]), len(train_data["input_ids"]))
|
||||
|
||||
|
||||
# different use cases tests
|
||||
@require_sentencepiece
|
||||
@require_torch
|
||||
@require_pytesseract
|
||||
class UdopProcessorIntegrationTests(unittest.TestCase):
|
||||
@cached_property
|
||||
def get_images(self):
|
||||
# we verify our implementation on 2 document images from the DocVQA dataset
|
||||
from datasets import load_dataset
|
||||
|
||||
ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test")
|
||||
|
||||
image_1 = Image.open(ds[0]["file"]).convert("RGB")
|
||||
image_2 = Image.open(ds[1]["file"]).convert("RGB")
|
||||
|
||||
return image_1, image_2
|
||||
|
||||
@cached_property
|
||||
def get_tokenizers(self):
|
||||
slow_tokenizer = UdopTokenizer.from_pretrained("microsoft/udop-large")
|
||||
fast_tokenizer = UdopTokenizerFast.from_pretrained("microsoft/udop-large")
|
||||
return [slow_tokenizer, fast_tokenizer]
|
||||
|
||||
@slow
|
||||
def test_processor_case_1(self):
|
||||
# case 1: document image classification (training, inference) + token classification (inference), apply_ocr = True
|
||||
|
||||
image_processor = LayoutLMv3ImageProcessor()
|
||||
tokenizers = self.get_tokenizers
|
||||
images = self.get_images
|
||||
|
||||
for tokenizer in tokenizers:
|
||||
processor = UdopProcessor(image_processor=image_processor, tokenizer=tokenizer)
|
||||
|
||||
# not batched
|
||||
input_image_processor = image_processor(images[0], return_tensors="pt")
|
||||
input_processor = processor(images[0], return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "input_ids", "pixel_values"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify pixel_values
|
||||
self.assertTrue(
|
||||
torch.allclose(input_image_processor["pixel_values"], input_processor["pixel_values"], atol=1e-2)
|
||||
)
|
||||
|
||||
# verify input_ids
|
||||
# this was obtained with Tesseract 4.1.1
|
||||
# fmt: off
|
||||
expected_decoding = "11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer</s>" # noqa: E231
|
||||
# fmt: on
|
||||
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# batched
|
||||
input_image_processor = image_processor(images, return_tensors="pt")
|
||||
input_processor = processor(images, padding=True, return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "input_ids", "pixel_values"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify pixel_values
|
||||
self.assertTrue(
|
||||
torch.allclose(input_image_processor["pixel_values"], input_processor["pixel_values"], atol=1e-2)
|
||||
)
|
||||
|
||||
# verify input_ids
|
||||
# this was obtained with Tesseract 4.1.1
|
||||
# fmt: off
|
||||
expected_decoding = "7 ITC Limited REPORT AND ACCOUNTS 2013 ITC’s Brands: An Asset for the Nation The consumer needs and aspirations they fulfil, the benefit they generate for millions across ITC’s value chains, the future-ready capabilities that support them, and the value that they create for the country, have made ITC’s brands national assets, adding to India’s competitiveness. It is ITC’s aspiration to be the No 1 FMCG player in the country, driven by its new FMCG businesses. A recent Nielsen report has highlighted that ITC's new FMCG businesses are the fastest growing among the top consumer goods companies operating in India. ITC takes justifiable pride that, along with generating economic value, these celebrated Indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. DI WILLS * ; LOVE DELIGHTFULLY SOFT SKIN? aia Ans Source: https://www.industrydocuments.ucsf.edu/docs/snbx0223</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>" # noqa: E231
|
||||
# fmt: on
|
||||
decoding = processor.decode(input_processor.input_ids[1].tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
@slow
|
||||
def test_processor_case_2(self):
|
||||
# case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False
|
||||
|
||||
image_processor = LayoutLMv3ImageProcessor(apply_ocr=False)
|
||||
tokenizers = self.get_tokenizers
|
||||
images = self.get_images
|
||||
|
||||
for tokenizer in tokenizers:
|
||||
processor = UdopProcessor(image_processor=image_processor, tokenizer=tokenizer)
|
||||
|
||||
# not batched
|
||||
words = ["hello", "world"]
|
||||
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
|
||||
input_processor = processor(images[0], words, boxes=boxes, return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "input_ids", "pixel_values"]
|
||||
actual_keys = list(input_processor.keys())
|
||||
for key in expected_keys:
|
||||
self.assertIn(key, actual_keys)
|
||||
|
||||
# verify input_ids
|
||||
expected_decoding = "hello world</s>"
|
||||
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# batched
|
||||
words = [["hello", "world"], ["my", "name", "is", "niels"]]
|
||||
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
|
||||
input_processor = processor(images, words, boxes=boxes, padding=True, return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "input_ids", "pixel_values"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify input_ids
|
||||
expected_decoding = "hello world</s><pad><pad><pad><pad>"
|
||||
decoding = processor.decode(input_processor.input_ids[0].tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# verify bbox
|
||||
expected_bbox = [
|
||||
[3, 2, 5, 1],
|
||||
[6, 7, 4, 2],
|
||||
[3, 9, 2, 4],
|
||||
[1, 1, 2, 3],
|
||||
[1, 1, 2, 3],
|
||||
[1, 1, 2, 3],
|
||||
[1000, 1000, 1000, 1000],
|
||||
]
|
||||
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
|
||||
|
||||
@slow
|
||||
def test_processor_case_3(self):
|
||||
# case 3: token classification (training), apply_ocr=False
|
||||
|
||||
image_processor = LayoutLMv3ImageProcessor(apply_ocr=False)
|
||||
tokenizers = self.get_tokenizers
|
||||
images = self.get_images
|
||||
|
||||
for tokenizer in tokenizers:
|
||||
processor = UdopProcessor(image_processor=image_processor, tokenizer=tokenizer)
|
||||
|
||||
# not batched
|
||||
words = ["weirdly", "world"]
|
||||
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
|
||||
word_labels = [1, 2]
|
||||
input_processor = processor(images[0], words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "input_ids", "labels", "pixel_values"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify input_ids
|
||||
expected_decoding = "weirdly world</s>"
|
||||
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# verify labels
|
||||
expected_labels = [1, -100, 2, -100]
|
||||
self.assertListEqual(input_processor.labels.squeeze().tolist(), expected_labels)
|
||||
|
||||
# batched
|
||||
words = [["hello", "world"], ["my", "name", "is", "niels"]]
|
||||
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
|
||||
word_labels = [[1, 2], [6, 3, 10, 2]]
|
||||
input_processor = processor(
|
||||
images, words, boxes=boxes, word_labels=word_labels, padding=True, return_tensors="pt"
|
||||
)
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "input_ids", "labels", "pixel_values"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify input_ids
|
||||
expected_decoding = "my name is niels</s>"
|
||||
decoding = processor.decode(input_processor.input_ids[1].tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# verify bbox
|
||||
expected_bbox = [
|
||||
[3, 2, 5, 1],
|
||||
[6, 7, 4, 2],
|
||||
[3, 9, 2, 4],
|
||||
[1, 1, 2, 3],
|
||||
[1, 1, 2, 3],
|
||||
[1, 1, 2, 3],
|
||||
[1000, 1000, 1000, 1000],
|
||||
]
|
||||
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
|
||||
|
||||
# verify labels
|
||||
expected_labels = [6, 3, 10, 2, -100, -100, -100]
|
||||
self.assertListEqual(input_processor.labels[1].tolist(), expected_labels)
|
||||
|
||||
@slow
|
||||
def test_processor_case_4(self):
|
||||
# case 4: visual question answering (inference), apply_ocr=True
|
||||
|
||||
image_processor = LayoutLMv3ImageProcessor()
|
||||
tokenizers = self.get_tokenizers
|
||||
images = self.get_images
|
||||
|
||||
for tokenizer in tokenizers:
|
||||
processor = UdopProcessor(image_processor=image_processor, tokenizer=tokenizer)
|
||||
|
||||
# not batched
|
||||
question = "What's his name?"
|
||||
input_processor = processor(images[0], question, return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "input_ids", "pixel_values"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify input_ids
|
||||
# this was obtained with Tesseract 4.1.1
|
||||
# fmt: off
|
||||
expected_decoding = "What's his name?</s> 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer</s>" # noqa: E231
|
||||
# fmt: on
|
||||
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# batched
|
||||
questions = ["How old is he?", "what's the time"]
|
||||
input_processor = processor(
|
||||
images, questions, padding="max_length", max_length=20, truncation=True, return_tensors="pt"
|
||||
)
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "input_ids", "pixel_values"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify input_ids
|
||||
# this was obtained with Tesseract 4.1.1
|
||||
expected_decoding = "what's the time</s> 7 ITC Limited REPORT AND ACCOUNTS 2013 I</s>"
|
||||
decoding = processor.decode(input_processor.input_ids[1].tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# verify bbox
|
||||
# fmt: off
|
||||
expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [289, 59, 365, 66], [289, 59, 365, 66], [289, 59, 365, 66], [372, 59, 407, 66], [74, 136, 161, 158], [1000, 1000, 1000, 1000]] # noqa: E231
|
||||
# fmt: on
|
||||
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
|
||||
|
||||
@slow
|
||||
def test_processor_case_5(self):
|
||||
# case 5: visual question answering (inference), apply_ocr=False
|
||||
|
||||
image_processor = LayoutLMv3ImageProcessor(apply_ocr=False)
|
||||
tokenizers = self.get_tokenizers
|
||||
images = self.get_images
|
||||
|
||||
for tokenizer in tokenizers:
|
||||
processor = UdopProcessor(image_processor=image_processor, tokenizer=tokenizer)
|
||||
|
||||
# not batched
|
||||
question = "What's his name?"
|
||||
words = ["hello", "world"]
|
||||
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
|
||||
input_processor = processor(images[0], question, words, boxes, return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "input_ids", "pixel_values"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify input_ids
|
||||
expected_decoding = "What's his name?</s> hello world</s>"
|
||||
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# batched
|
||||
questions = ["How old is he?", "what's the time"]
|
||||
words = [["hello", "world"], ["my", "name", "is", "niels"]]
|
||||
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
|
||||
input_processor = processor(images, questions, words, boxes, padding=True, return_tensors="pt")
|
||||
|
||||
# verify keys
|
||||
expected_keys = ["attention_mask", "bbox", "input_ids", "pixel_values"]
|
||||
actual_keys = sorted(input_processor.keys())
|
||||
self.assertListEqual(actual_keys, expected_keys)
|
||||
|
||||
# verify input_ids
|
||||
expected_decoding = "How old is he?</s> hello world</s><pad><pad><pad>"
|
||||
decoding = processor.decode(input_processor.input_ids[0].tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
expected_decoding = "what's the time</s> my name is niels</s>"
|
||||
decoding = processor.decode(input_processor.input_ids[1].tolist())
|
||||
self.assertSequenceEqual(decoding, expected_decoding)
|
||||
|
||||
# verify bbox
|
||||
expected_bbox = [[3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]]
|
||||
self.assertListEqual(input_processor.bbox[1].tolist()[-5:], expected_bbox)
|
1886
tests/models/udop/test_tokenization_udop.py
Normal file
1886
tests/models/udop/test_tokenization_udop.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -84,6 +84,8 @@ SPECIAL_CASES_TO_ALLOW = {
|
||||
"ClapAudioConfig": ["num_classes"],
|
||||
# Not used, but providing useful information to users
|
||||
"SpeechT5HifiGanConfig": ["sampling_rate"],
|
||||
# used internally in the configuration class file
|
||||
"UdopConfig": ["feed_forward_proj"],
|
||||
# Actually used in the config or generation config, in that case necessary for the sub-components generation
|
||||
"SeamlessM4TConfig": [
|
||||
"max_new_tokens",
|
||||
|
@ -61,6 +61,7 @@ PATH_TO_DOC = "docs/source/en"
|
||||
PRIVATE_MODELS = [
|
||||
"AltRobertaModel",
|
||||
"DPRSpanPredictor",
|
||||
"UdopStack",
|
||||
"LongT5Stack",
|
||||
"RealmBertModel",
|
||||
"T5Stack",
|
||||
@ -304,6 +305,7 @@ IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [
|
||||
"SeamlessM4TCodeHifiGan",
|
||||
"SeamlessM4TForSpeechToSpeech", # no auto class for speech-to-speech
|
||||
"TvpForVideoGrounding",
|
||||
"UdopForConditionalGeneration",
|
||||
"SeamlessM4Tv2NARTextToUnitModel",
|
||||
"SeamlessM4Tv2NARTextToUnitForConditionalGeneration",
|
||||
"SeamlessM4Tv2CodeHifiGan",
|
||||
|
Loading…
Reference in New Issue
Block a user