mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-03 12:50:06 +06:00
Add support for conditional detr (#18948)
* added conditional_detr files * checked copies * checked copies * fixed style and copies * fixed style and copies * fixed hub * fixed style * Update README.md Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/_toctree.yml Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/index.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/model_doc/conditional_detr.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * fixed some doc issue * changed prefix to ConditionalDetr * fixed docs * Update README_ko.md * added spatial_model_name * fixed fix-copies * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added some copied from * added some copied from * added some copied from * added some copied from * fixed use_pretrained issue * changed post-process * added conditional_detr files * checked copies * checked copies * fixed style and copies * fixed style and copies * fixed hub * fixed style * Update README.md Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/_toctree.yml Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/index.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * fixed some doc issue * Update docs/source/en/model_doc/conditional_detr.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * changed prefix to ConditionalDetr * fixed docs * Update README_ko.md * added spatial_model_name * fixed fix-copies * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added some copied from * added some copied from * added some copied from * added some copied from * fixed use_pretrained issue * changed post-process * fix style quality and copies * fix style quality and copies * fix style quality and copies * fix style quality and copies * add more fix-copies * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * fixed some variable names & added more fix-copies * fixed some variable names & added more fix-copies * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added more copied from * fixed quality * changed pretrained config * added more copied-from and fixed the issue in feature_extraction_auto * added conditional_detr files * checked copies * checked copies * fixed style and copies * fixed style and copies * fixed hub * fixed style * Update README.md Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/_toctree.yml Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/index.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * fixed some doc issue * Update docs/source/en/model_doc/conditional_detr.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * changed prefix to ConditionalDetr * fixed docs * Update README_ko.md * added spatial_model_name * fixed fix-copies * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added some copied from * added some copied from * added some copied from * added some copied from * fixed use_pretrained issue * changed post-process * added conditional_detr files * checked copies * fixed style and copies * fixed some doc issue * changed prefix to ConditionalDetr * fixed docs * added spatial_model_name * fixed fix-copies * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added some copied from * added some copied from * added some copied from * added some copied from * fix style quality and copies * fix style quality and copies * fix style quality and copies * add more fix-copies * fixed some variable names & added more fix-copies * fixed some variable names & added more fix-copies * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added more copied from * fixed quality * changed pretrained config * added more copied-from and fixed the issue in feature_extraction_auto * fixed style * added conditional_detr files * checked copies * checked copies * fixed style and copies * fixed style and copies * fixed hub * fixed style * Update README.md Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/_toctree.yml Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/index.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * fixed some doc issue * Update docs/source/en/model_doc/conditional_detr.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * changed prefix to ConditionalDetr * fixed docs * Update README_ko.md * added spatial_model_name * fixed fix-copies * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added some copied from * added some copied from * added some copied from * added some copied from * fixed use_pretrained issue * changed post-process * added conditional_detr files * checked copies * fixed style and copies * fixed some doc issue * changed prefix to ConditionalDetr * fixed docs * added spatial_model_name * fixed fix-copies * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added some copied from * added some copied from * added some copied from * added some copied from * fix style quality and copies * fix style quality and copies * fix style quality and copies * add more fix-copies * fixed some variable names & added more fix-copies * fixed some variable names & added more fix-copies * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added more copied from * fixed quality * changed pretrained config * added more copied-from and fixed the issue in feature_extraction_auto * rebased Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Depu Meng <depumeng@Depus-MacBook-Pro.local>
This commit is contained in:
parent
c7fd28999f
commit
126a739058
@ -278,6 +278,7 @@ Current number of checkpoints: ** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
|
@ -228,6 +228,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
|
@ -252,6 +252,7 @@ conda install -c huggingface transformers
|
||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (来自 Google Research) 伴随论文 [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) 由 Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting 发布。
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (来自 OpenAI) 伴随论文 [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) 由 Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever 发布。
|
||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (来自 Salesforce) 伴随论文 [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) 由 Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong 发布。
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (来自 Microsoft Research Asia) 伴随论文 [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) 由 Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang 发布。
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (来自 YituTech) 伴随论文 [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) 由 Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan 发布。
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (来自 Facebook AI) 伴随论文 [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) 由 Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie 发布。
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (来自 Tsinghua University) 伴随论文 [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) 由 Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun 发布。
|
||||
|
@ -264,6 +264,7 @@ conda install -c huggingface transformers
|
||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
|
@ -362,6 +362,8 @@
|
||||
sections:
|
||||
- local: model_doc/beit
|
||||
title: BEiT
|
||||
- local: model_doc/conditional_detr
|
||||
title: Conditional DETR
|
||||
- local: model_doc/convnext
|
||||
title: ConvNeXT
|
||||
- local: model_doc/cvt
|
||||
|
@ -68,6 +68,7 @@ The documentation is organized into five sections:
|
||||
1. **[CANINE](model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||
1. **[CLIP](model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[CodeGen](model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||
1. **[Conditional DETR](model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[ConvBERT](model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[ConvNeXT](model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||
1. **[CPM](model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
@ -215,6 +216,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| CANINE | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| CLIP | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| CodeGen | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
| Conditional DETR | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| ConvBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| ConvNeXT | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||
| CTRL | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
|
53
docs/source/en/model_doc/conditional_detr.mdx
Normal file
53
docs/source/en/model_doc/conditional_detr.mdx
Normal file
@ -0,0 +1,53 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Conditional DETR
|
||||
|
||||
## Overview
|
||||
|
||||
The Conditional DETR model was proposed in [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang. Conditional DETR presents a conditional cross-attention mechanism for fast DETR training. Conditional DETR converges 6.7× to 10× faster than DETR.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*The recently-developed DETR approach applies the transformer encoder and decoder architecture to object detection and achieves promising performance. In this paper, we handle the critical issue, slow training convergence, and present a conditional cross-attention mechanism for fast DETR training. Our approach is motivated by that the cross-attention in DETR relies highly on the content embeddings for localizing the four extremities and predicting the box, which increases the need for high-quality content embeddings and thus the training difficulty. Our approach, named conditional DETR, learns a conditional spatial query from the decoder embedding for decoder multi-head cross-attention. The benefit is that through the conditional spatial query, each cross-attention head is able to attend to a band containing a distinct region, e.g., one object extremity or a region inside the object box. This narrows down the spatial range for localizing the distinct regions for object classification and box regression, thus relaxing the dependence on the content embeddings and easing the training. Empirical results show that conditional DETR converges 6.7× faster for the backbones R50 and R101 and 10× faster for stronger backbones DC5-R50 and DC5-R101. Code is available at https://github.com/Atten4Vis/ConditionalDETR.*
|
||||
|
||||
|
||||
This model was contributed by [DepuMeng](https://huggingface.co/DepuMeng). The original code can be found [here](https://github.com/Atten4Vis/ConditionalDETR).
|
||||
|
||||
|
||||
## ConditionalDetrConfig
|
||||
|
||||
[[autodoc]] ConditionalDetrConfig
|
||||
|
||||
## ConditionalDetrFeatureExtractor
|
||||
|
||||
[[autodoc]] ConditionalDetrFeatureExtractor
|
||||
- __call__
|
||||
- pad_and_create_pixel_mask
|
||||
- post_process
|
||||
- post_process_segmentation
|
||||
- post_process_panoptic
|
||||
|
||||
## ConditionalDetrModel
|
||||
|
||||
[[autodoc]] ConditionalDetrModel
|
||||
- forward
|
||||
|
||||
## ConditionalDetrForObjectDetection
|
||||
|
||||
[[autodoc]] ConditionalDetrForObjectDetection
|
||||
- forward
|
||||
|
||||
## ConditionalDetrForSegmentation
|
||||
|
||||
[[autodoc]] ConditionalDetrForSegmentation
|
||||
- forward
|
@ -57,6 +57,7 @@ Ready-made configurations include the following architectures:
|
||||
- CamemBERT
|
||||
- CLIP
|
||||
- CodeGen
|
||||
- Conditional DETR
|
||||
- ConvBERT
|
||||
- ConvNeXT
|
||||
- Data2VecText
|
||||
|
@ -172,6 +172,7 @@ _import_structure = {
|
||||
"CLIPVisionConfig",
|
||||
],
|
||||
"models.codegen": ["CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP", "CodeGenConfig", "CodeGenTokenizer"],
|
||||
"models.conditional_detr": ["CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig"],
|
||||
"models.convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertTokenizer"],
|
||||
"models.convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig"],
|
||||
"models.cpm": [],
|
||||
@ -660,6 +661,7 @@ else:
|
||||
_import_structure["models.convnext"].append("ConvNextFeatureExtractor")
|
||||
_import_structure["models.deit"].append("DeiTFeatureExtractor")
|
||||
_import_structure["models.detr"].append("DetrFeatureExtractor")
|
||||
_import_structure["models.conditional_detr"].append("ConditionalDetrFeatureExtractor")
|
||||
_import_structure["models.donut"].append("DonutFeatureExtractor")
|
||||
_import_structure["models.dpt"].append("DPTFeatureExtractor")
|
||||
_import_structure["models.flava"].extend(["FlavaFeatureExtractor", "FlavaProcessor"])
|
||||
@ -708,6 +710,15 @@ else:
|
||||
"DetrPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.conditional_detr"].extend(
|
||||
[
|
||||
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"ConditionalDetrForObjectDetection",
|
||||
"ConditionalDetrForSegmentation",
|
||||
"ConditionalDetrModel",
|
||||
"ConditionalDetrPreTrainedModel",
|
||||
]
|
||||
)
|
||||
|
||||
try:
|
||||
if not is_scatter_available():
|
||||
@ -3075,6 +3086,7 @@ if TYPE_CHECKING:
|
||||
CLIPVisionConfig,
|
||||
)
|
||||
from .models.codegen import CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP, CodeGenConfig, CodeGenTokenizer
|
||||
from .models.conditional_detr import CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig
|
||||
from .models.convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertTokenizer
|
||||
from .models.convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig
|
||||
from .models.ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig, CTRLTokenizer
|
||||
@ -3498,6 +3510,7 @@ if TYPE_CHECKING:
|
||||
from .image_utils import ImageFeatureExtractionMixin
|
||||
from .models.beit import BeitFeatureExtractor
|
||||
from .models.clip import CLIPFeatureExtractor
|
||||
from .models.conditional_detr import ConditionalDetrFeatureExtractor
|
||||
from .models.convnext import ConvNextFeatureExtractor
|
||||
from .models.deit import DeiTFeatureExtractor
|
||||
from .models.detr import DetrFeatureExtractor
|
||||
@ -3527,6 +3540,13 @@ if TYPE_CHECKING:
|
||||
except OptionalDependencyNotAvailable:
|
||||
from .utils.dummy_timm_and_vision_objects import *
|
||||
else:
|
||||
from .models.conditional_detr import (
|
||||
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
ConditionalDetrForObjectDetection,
|
||||
ConditionalDetrForSegmentation,
|
||||
ConditionalDetrModel,
|
||||
ConditionalDetrPreTrainedModel,
|
||||
)
|
||||
from .models.deformable_detr import (
|
||||
DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
DeformableDetrForObjectDetection,
|
||||
|
@ -38,6 +38,7 @@ from . import (
|
||||
canine,
|
||||
clip,
|
||||
codegen,
|
||||
conditional_detr,
|
||||
convbert,
|
||||
convnext,
|
||||
cpm,
|
||||
|
@ -43,6 +43,7 @@ CONFIG_MAPPING_NAMES = OrderedDict(
|
||||
("canine", "CanineConfig"),
|
||||
("clip", "CLIPConfig"),
|
||||
("codegen", "CodeGenConfig"),
|
||||
("conditional_detr", "ConditionalDetrConfig"),
|
||||
("convbert", "ConvBertConfig"),
|
||||
("convnext", "ConvNextConfig"),
|
||||
("ctrl", "CTRLConfig"),
|
||||
@ -175,6 +176,7 @@ CONFIG_ARCHIVE_MAP_MAPPING_NAMES = OrderedDict(
|
||||
("canine", "CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("clip", "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("codegen", "CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("conditional_detr", "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("convbert", "CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("convnext", "CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("ctrl", "CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
@ -300,6 +302,7 @@ MODEL_NAMES_MAPPING = OrderedDict(
|
||||
("canine", "CANINE"),
|
||||
("clip", "CLIP"),
|
||||
("codegen", "CodeGen"),
|
||||
("conditional_detr", "Conditional DETR"),
|
||||
("convbert", "ConvBERT"),
|
||||
("convnext", "ConvNeXT"),
|
||||
("cpm", "CPM"),
|
||||
|
@ -39,6 +39,7 @@ FEATURE_EXTRACTOR_MAPPING_NAMES = OrderedDict(
|
||||
[
|
||||
("beit", "BeitFeatureExtractor"),
|
||||
("clip", "CLIPFeatureExtractor"),
|
||||
("conditional_detr", "ConditionalDetrFeatureExtractor"),
|
||||
("convnext", "ConvNextFeatureExtractor"),
|
||||
("cvt", "ConvNextFeatureExtractor"),
|
||||
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
|
||||
@ -46,7 +47,6 @@ FEATURE_EXTRACTOR_MAPPING_NAMES = OrderedDict(
|
||||
("deformable_detr", "DetrFeatureExtractor"),
|
||||
("deit", "DeiTFeatureExtractor"),
|
||||
("detr", "DetrFeatureExtractor"),
|
||||
("detr", "DetrFeatureExtractor"),
|
||||
("donut", "DonutFeatureExtractor"),
|
||||
("dpt", "DPTFeatureExtractor"),
|
||||
("flava", "FlavaFeatureExtractor"),
|
||||
|
@ -42,6 +42,7 @@ MODEL_MAPPING_NAMES = OrderedDict(
|
||||
("canine", "CanineModel"),
|
||||
("clip", "CLIPModel"),
|
||||
("codegen", "CodeGenModel"),
|
||||
("conditional_detr", "ConditionalDetrModel"),
|
||||
("convbert", "ConvBertModel"),
|
||||
("convnext", "ConvNextModel"),
|
||||
("ctrl", "CTRLModel"),
|
||||
@ -455,6 +456,7 @@ MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict(
|
||||
MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict(
|
||||
[
|
||||
# Model for Object Detection mapping
|
||||
("conditional_detr", "ConditionalDetrForObjectDetection"),
|
||||
("deformable_detr", "DeformableDetrForObjectDetection"),
|
||||
("detr", "DetrForObjectDetection"),
|
||||
("yolos", "YolosForObjectDetection"),
|
||||
|
87
src/transformers/models/conditional_detr/__init__.py
Normal file
87
src/transformers/models/conditional_detr/__init__.py
Normal file
@ -0,0 +1,87 @@
|
||||
# flake8: noqa
|
||||
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
||||
# module, but to preserve other warnings. So, don't check this module at all.
|
||||
|
||||
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_timm_available, is_vision_available
|
||||
|
||||
|
||||
_import_structure = {
|
||||
"configuration_conditional_detr": [
|
||||
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
||||
"ConditionalDetrConfig",
|
||||
"ConditionalDetrOnnxConfig",
|
||||
]
|
||||
}
|
||||
|
||||
try:
|
||||
if not is_vision_available():
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
pass
|
||||
else:
|
||||
_import_structure["feature_extraction_conditional_detr"] = ["ConditionalDetrFeatureExtractor"]
|
||||
|
||||
try:
|
||||
if not is_timm_available():
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
pass
|
||||
else:
|
||||
_import_structure["modeling_conditional_detr"] = [
|
||||
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"ConditionalDetrForObjectDetection",
|
||||
"ConditionalDetrForSegmentation",
|
||||
"ConditionalDetrModel",
|
||||
"ConditionalDetrPreTrainedModel",
|
||||
]
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .configuration_conditional_detr import (
|
||||
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
||||
ConditionalDetrConfig,
|
||||
ConditionalDetrOnnxConfig,
|
||||
)
|
||||
|
||||
try:
|
||||
if not is_vision_available():
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
pass
|
||||
else:
|
||||
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
|
||||
|
||||
try:
|
||||
if not is_timm_available():
|
||||
raise OptionalDependencyNotAvailable()
|
||||
except OptionalDependencyNotAvailable:
|
||||
pass
|
||||
else:
|
||||
from .modeling_conditional_detr import (
|
||||
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
ConditionalDetrForObjectDetection,
|
||||
ConditionalDetrForSegmentation,
|
||||
ConditionalDetrModel,
|
||||
ConditionalDetrPreTrainedModel,
|
||||
)
|
||||
|
||||
else:
|
||||
import sys
|
||||
|
||||
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
@ -0,0 +1,240 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" Conditional DETR model configuration"""
|
||||
|
||||
from collections import OrderedDict
|
||||
from typing import Mapping
|
||||
|
||||
from packaging import version
|
||||
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...onnx import OnnxConfig
|
||||
from ...utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
||||
"microsoft/conditional-detr-resnet-50": (
|
||||
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
class ConditionalDetrConfig(PretrainedConfig):
|
||||
r"""
|
||||
This is the configuration class to store the configuration of a [`ConditionalDetrModel`]. It is used to instantiate
|
||||
a Conditional DETR model according to the specified arguments, defining the model architecture. Instantiating a
|
||||
configuration with the defaults will yield a similar configuration to that of the Conditional DETR
|
||||
[microsoft/conditional-detr-resnet-50](https://huggingface.co/microsoft/conditional-detr-resnet-50) architecture.
|
||||
|
||||
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
||||
documentation from [`PretrainedConfig`] for more information.
|
||||
|
||||
Args:
|
||||
num_channels (`int`, *optional*, defaults to 3):
|
||||
The number of input channels.
|
||||
num_queries (`int`, *optional*, defaults to 100):
|
||||
Number of object queries, i.e. detection slots. This is the maximal number of objects
|
||||
[`ConditionalDetrModel`] can detect in a single image. For COCO, we recommend 100 queries.
|
||||
d_model (`int`, *optional*, defaults to 256):
|
||||
Dimension of the layers.
|
||||
encoder_layers (`int`, *optional*, defaults to 6):
|
||||
Number of encoder layers.
|
||||
decoder_layers (`int`, *optional*, defaults to 6):
|
||||
Number of decoder layers.
|
||||
encoder_attention_heads (`int`, *optional*, defaults to 8):
|
||||
Number of attention heads for each attention layer in the Transformer encoder.
|
||||
decoder_attention_heads (`int`, *optional*, defaults to 8):
|
||||
Number of attention heads for each attention layer in the Transformer decoder.
|
||||
decoder_ffn_dim (`int`, *optional*, defaults to 2048):
|
||||
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
|
||||
encoder_ffn_dim (`int`, *optional*, defaults to 2048):
|
||||
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
|
||||
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
|
||||
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
||||
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
||||
dropout (`float`, *optional*, defaults to 0.1):
|
||||
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
||||
attention_dropout (`float`, *optional*, defaults to 0.0):
|
||||
The dropout ratio for the attention probabilities.
|
||||
activation_dropout (`float`, *optional*, defaults to 0.0):
|
||||
The dropout ratio for activations inside the fully connected layer.
|
||||
init_std (`float`, *optional*, defaults to 0.02):
|
||||
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
||||
init_xavier_std (`float`, *optional*, defaults to 1):
|
||||
The scaling factor used for the Xavier initialization gain in the HM Attention map module.
|
||||
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
|
||||
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
|
||||
for more details.
|
||||
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
|
||||
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
|
||||
for more details.
|
||||
auxiliary_loss (`bool`, *optional*, defaults to `False`):
|
||||
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
|
||||
position_embedding_type (`str`, *optional*, defaults to `"sine"`):
|
||||
Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
|
||||
backbone (`str`, *optional*, defaults to `"resnet50"`):
|
||||
Name of convolutional backbone to use. Supports any convolutional backbone from the timm package. For a
|
||||
list of all available models, see [this
|
||||
page](https://rwightman.github.io/pytorch-image-models/#load-a-pretrained-model).
|
||||
use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
|
||||
Whether to use pretrained weights for the backbone.
|
||||
dilation (`bool`, *optional*, defaults to `False`):
|
||||
Whether to replace stride with dilation in the last convolutional block (DC5).
|
||||
class_cost (`float`, *optional*, defaults to 1):
|
||||
Relative weight of the classification error in the Hungarian matching cost.
|
||||
bbox_cost (`float`, *optional*, defaults to 5):
|
||||
Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
|
||||
giou_cost (`float`, *optional*, defaults to 2):
|
||||
Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
|
||||
mask_loss_coefficient (`float`, *optional*, defaults to 1):
|
||||
Relative weight of the Focal loss in the panoptic segmentation loss.
|
||||
dice_loss_coefficient (`float`, *optional*, defaults to 1):
|
||||
Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
|
||||
bbox_loss_coefficient (`float`, *optional*, defaults to 5):
|
||||
Relative weight of the L1 bounding box loss in the object detection loss.
|
||||
giou_loss_coefficient (`float`, *optional*, defaults to 2):
|
||||
Relative weight of the generalized IoU loss in the object detection loss.
|
||||
eos_coefficient (`float`, *optional*, defaults to 0.1):
|
||||
Relative classification weight of the 'no-object' class in the object detection loss.
|
||||
|
||||
Examples:
|
||||
|
||||
```python
|
||||
>>> from transformers import ConditionalDetrModel, ConditionalDetrConfig
|
||||
|
||||
>>> # Initializing a Conditional DETR microsoft/conditional-detr-resnet-50 style configuration
|
||||
>>> configuration = ConditionalDetrConfig()
|
||||
|
||||
>>> # Initializing a model from the microsoft/conditional-detr-resnet-50 style configuration
|
||||
>>> model = ConditionalDetrModel(configuration)
|
||||
|
||||
>>> # Accessing the model configuration
|
||||
>>> configuration = model.config
|
||||
```"""
|
||||
model_type = "conditional_detr"
|
||||
keys_to_ignore_at_inference = ["past_key_values"]
|
||||
attribute_map = {
|
||||
"hidden_size": "d_model",
|
||||
"num_attention_heads": "encoder_attention_heads",
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
num_channels=3,
|
||||
num_queries=300,
|
||||
max_position_embeddings=1024,
|
||||
encoder_layers=6,
|
||||
encoder_ffn_dim=2048,
|
||||
encoder_attention_heads=8,
|
||||
decoder_layers=6,
|
||||
decoder_ffn_dim=2048,
|
||||
decoder_attention_heads=8,
|
||||
encoder_layerdrop=0.0,
|
||||
decoder_layerdrop=0.0,
|
||||
is_encoder_decoder=True,
|
||||
activation_function="relu",
|
||||
d_model=256,
|
||||
dropout=0.1,
|
||||
attention_dropout=0.0,
|
||||
activation_dropout=0.0,
|
||||
init_std=0.02,
|
||||
init_xavier_std=1.0,
|
||||
classifier_dropout=0.0,
|
||||
scale_embedding=False,
|
||||
auxiliary_loss=False,
|
||||
position_embedding_type="sine",
|
||||
backbone="resnet50",
|
||||
use_pretrained_backbone=True,
|
||||
dilation=False,
|
||||
class_cost=2,
|
||||
bbox_cost=5,
|
||||
giou_cost=2,
|
||||
mask_loss_coefficient=1,
|
||||
dice_loss_coefficient=1,
|
||||
cls_loss_coefficient=2,
|
||||
bbox_loss_coefficient=5,
|
||||
giou_loss_coefficient=2,
|
||||
focal_alpha=0.25,
|
||||
**kwargs
|
||||
):
|
||||
self.num_channels = num_channels
|
||||
self.num_queries = num_queries
|
||||
self.max_position_embeddings = max_position_embeddings
|
||||
self.d_model = d_model
|
||||
self.encoder_ffn_dim = encoder_ffn_dim
|
||||
self.encoder_layers = encoder_layers
|
||||
self.encoder_attention_heads = encoder_attention_heads
|
||||
self.decoder_ffn_dim = decoder_ffn_dim
|
||||
self.decoder_layers = decoder_layers
|
||||
self.decoder_attention_heads = decoder_attention_heads
|
||||
self.dropout = dropout
|
||||
self.attention_dropout = attention_dropout
|
||||
self.activation_dropout = activation_dropout
|
||||
self.activation_function = activation_function
|
||||
self.init_std = init_std
|
||||
self.init_xavier_std = init_xavier_std
|
||||
self.encoder_layerdrop = encoder_layerdrop
|
||||
self.decoder_layerdrop = decoder_layerdrop
|
||||
self.num_hidden_layers = encoder_layers
|
||||
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
|
||||
self.auxiliary_loss = auxiliary_loss
|
||||
self.position_embedding_type = position_embedding_type
|
||||
self.backbone = backbone
|
||||
self.use_pretrained_backbone = use_pretrained_backbone
|
||||
self.dilation = dilation
|
||||
# Hungarian matcher
|
||||
self.class_cost = class_cost
|
||||
self.bbox_cost = bbox_cost
|
||||
self.giou_cost = giou_cost
|
||||
# Loss coefficients
|
||||
self.mask_loss_coefficient = mask_loss_coefficient
|
||||
self.dice_loss_coefficient = dice_loss_coefficient
|
||||
self.cls_loss_coefficient = cls_loss_coefficient
|
||||
self.bbox_loss_coefficient = bbox_loss_coefficient
|
||||
self.giou_loss_coefficient = giou_loss_coefficient
|
||||
self.focal_alpha = focal_alpha
|
||||
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
|
||||
|
||||
@property
|
||||
def num_attention_heads(self) -> int:
|
||||
return self.encoder_attention_heads
|
||||
|
||||
@property
|
||||
def hidden_size(self) -> int:
|
||||
return self.d_model
|
||||
|
||||
|
||||
class ConditionalDetrOnnxConfig(OnnxConfig):
|
||||
|
||||
torch_onnx_minimum_version = version.parse("1.11")
|
||||
|
||||
@property
|
||||
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
||||
return OrderedDict(
|
||||
[
|
||||
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
|
||||
("pixel_mask", {0: "batch"}),
|
||||
]
|
||||
)
|
||||
|
||||
@property
|
||||
def atol_for_validation(self) -> float:
|
||||
return 1e-5
|
||||
|
||||
@property
|
||||
def default_onnx_opset(self) -> int:
|
||||
return 12
|
@ -0,0 +1,325 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Convert Conditional DETR checkpoints."""
|
||||
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from collections import OrderedDict
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
from PIL import Image
|
||||
|
||||
import requests
|
||||
from huggingface_hub import hf_hub_download
|
||||
from transformers import (
|
||||
ConditionalDetrConfig,
|
||||
ConditionalDetrFeatureExtractor,
|
||||
ConditionalDetrForObjectDetection,
|
||||
ConditionalDetrForSegmentation,
|
||||
)
|
||||
from transformers.utils import logging
|
||||
|
||||
|
||||
logging.set_verbosity_info()
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
# here we list all keys to be renamed (original name on the left, our name on the right)
|
||||
rename_keys = []
|
||||
for i in range(6):
|
||||
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
|
||||
rename_keys.append(
|
||||
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
|
||||
)
|
||||
rename_keys.append(
|
||||
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
|
||||
)
|
||||
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
|
||||
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
|
||||
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
|
||||
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
|
||||
rename_keys.append(
|
||||
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
|
||||
)
|
||||
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
|
||||
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
|
||||
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
|
||||
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
|
||||
)
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
|
||||
)
|
||||
rename_keys.append(
|
||||
(
|
||||
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
|
||||
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
|
||||
)
|
||||
)
|
||||
rename_keys.append(
|
||||
(
|
||||
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
|
||||
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
|
||||
)
|
||||
)
|
||||
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
|
||||
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
|
||||
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
|
||||
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
|
||||
)
|
||||
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
|
||||
)
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
|
||||
)
|
||||
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
|
||||
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
|
||||
|
||||
# q, k, v projections in self/cross-attention in decoder for conditional DETR
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
|
||||
)
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
|
||||
)
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
|
||||
)
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
|
||||
)
|
||||
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
|
||||
)
|
||||
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
|
||||
)
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
|
||||
)
|
||||
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
|
||||
)
|
||||
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
|
||||
)
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
|
||||
)
|
||||
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
|
||||
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
|
||||
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
|
||||
)
|
||||
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
|
||||
)
|
||||
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
|
||||
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
|
||||
rename_keys.append(
|
||||
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
|
||||
)
|
||||
|
||||
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
|
||||
# for conditional DETR, also convert reference point head and query scale MLP
|
||||
rename_keys.extend(
|
||||
[
|
||||
("input_proj.weight", "input_projection.weight"),
|
||||
("input_proj.bias", "input_projection.bias"),
|
||||
("query_embed.weight", "query_position_embeddings.weight"),
|
||||
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
|
||||
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
|
||||
("class_embed.weight", "class_labels_classifier.weight"),
|
||||
("class_embed.bias", "class_labels_classifier.bias"),
|
||||
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
|
||||
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
|
||||
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
|
||||
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
|
||||
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
|
||||
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
|
||||
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
|
||||
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
|
||||
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
|
||||
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
|
||||
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
|
||||
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
|
||||
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
|
||||
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
|
||||
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
|
||||
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def rename_key(state_dict, old, new):
|
||||
val = state_dict.pop(old)
|
||||
state_dict[new] = val
|
||||
|
||||
|
||||
def rename_backbone_keys(state_dict):
|
||||
new_state_dict = OrderedDict()
|
||||
for key, value in state_dict.items():
|
||||
if "backbone.0.body" in key:
|
||||
new_key = key.replace("backbone.0.body", "backbone.conv_encoder.model")
|
||||
new_state_dict[new_key] = value
|
||||
else:
|
||||
new_state_dict[key] = value
|
||||
|
||||
return new_state_dict
|
||||
|
||||
|
||||
def read_in_q_k_v(state_dict, is_panoptic=False):
|
||||
prefix = ""
|
||||
if is_panoptic:
|
||||
prefix = "conditional_detr."
|
||||
|
||||
# first: transformer encoder
|
||||
for i in range(6):
|
||||
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
|
||||
in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
|
||||
in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
|
||||
# next, add query, keys and values (in that order) to the state dict
|
||||
state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
|
||||
state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
|
||||
state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
|
||||
state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
|
||||
state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
|
||||
state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
|
||||
|
||||
|
||||
# We will verify our results on an image of cute cats
|
||||
def prepare_img():
|
||||
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
im = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
return im
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def convert_conditional_detr_checkpoint(model_name, pytorch_dump_folder_path):
|
||||
"""
|
||||
Copy/paste/tweak model's weights to our CONDITIONAL_DETR structure.
|
||||
"""
|
||||
|
||||
# load default config
|
||||
config = ConditionalDetrConfig()
|
||||
# set backbone and dilation attributes
|
||||
if "resnet101" in model_name:
|
||||
config.backbone = "resnet101"
|
||||
if "dc5" in model_name:
|
||||
config.dilation = True
|
||||
is_panoptic = "panoptic" in model_name
|
||||
if is_panoptic:
|
||||
config.num_labels = 250
|
||||
else:
|
||||
config.num_labels = 91
|
||||
repo_id = "datasets/huggingface/label-files"
|
||||
filename = "coco-detection-id2label.json"
|
||||
id2label = json.load(open(hf_hub_download(repo_id, filename), "r"))
|
||||
id2label = {int(k): v for k, v in id2label.items()}
|
||||
config.id2label = id2label
|
||||
config.label2id = {v: k for k, v in id2label.items()}
|
||||
|
||||
# load feature extractor
|
||||
format = "coco_panoptic" if is_panoptic else "coco_detection"
|
||||
feature_extractor = ConditionalDetrFeatureExtractor(format=format)
|
||||
|
||||
# prepare image
|
||||
img = prepare_img()
|
||||
encoding = feature_extractor(images=img, return_tensors="pt")
|
||||
pixel_values = encoding["pixel_values"]
|
||||
|
||||
logger.info(f"Converting model {model_name}...")
|
||||
|
||||
# load original model from torch hub
|
||||
conditional_detr = torch.hub.load("DeppMeng/ConditionalDETR", model_name, pretrained=True).eval()
|
||||
state_dict = conditional_detr.state_dict()
|
||||
# rename keys
|
||||
for src, dest in rename_keys:
|
||||
if is_panoptic:
|
||||
src = "conditional_detr." + src
|
||||
rename_key(state_dict, src, dest)
|
||||
state_dict = rename_backbone_keys(state_dict)
|
||||
# query, key and value matrices need special treatment
|
||||
read_in_q_k_v(state_dict, is_panoptic=is_panoptic)
|
||||
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
|
||||
prefix = "conditional_detr.model." if is_panoptic else "model."
|
||||
for key in state_dict.copy().keys():
|
||||
if is_panoptic:
|
||||
if (
|
||||
key.startswith("conditional_detr")
|
||||
and not key.startswith("class_labels_classifier")
|
||||
and not key.startswith("bbox_predictor")
|
||||
):
|
||||
val = state_dict.pop(key)
|
||||
state_dict["conditional_detr.model" + key[4:]] = val
|
||||
elif "class_labels_classifier" in key or "bbox_predictor" in key:
|
||||
val = state_dict.pop(key)
|
||||
state_dict["conditional_detr." + key] = val
|
||||
elif key.startswith("bbox_attention") or key.startswith("mask_head"):
|
||||
continue
|
||||
else:
|
||||
val = state_dict.pop(key)
|
||||
state_dict[prefix + key] = val
|
||||
else:
|
||||
if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
|
||||
val = state_dict.pop(key)
|
||||
state_dict[prefix + key] = val
|
||||
# finally, create HuggingFace model and load state dict
|
||||
model = ConditionalDetrForSegmentation(config) if is_panoptic else ConditionalDetrForObjectDetection(config)
|
||||
model.load_state_dict(state_dict)
|
||||
model.eval()
|
||||
model.push_to_hub(repo_id=model_name, organization="DepuMeng", commit_message="Add model")
|
||||
# verify our conversion
|
||||
original_outputs = conditional_detr(pixel_values)
|
||||
outputs = model(pixel_values)
|
||||
assert torch.allclose(outputs.logits, original_outputs["pred_logits"], atol=1e-4)
|
||||
assert torch.allclose(outputs.pred_boxes, original_outputs["pred_boxes"], atol=1e-4)
|
||||
if is_panoptic:
|
||||
assert torch.allclose(outputs.pred_masks, original_outputs["pred_masks"], atol=1e-4)
|
||||
|
||||
# Save model and feature extractor
|
||||
logger.info(f"Saving PyTorch model and feature extractor to {pytorch_dump_folder_path}...")
|
||||
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
|
||||
model.save_pretrained(pytorch_dump_folder_path)
|
||||
feature_extractor.save_pretrained(pytorch_dump_folder_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"--model_name",
|
||||
default="conditional_detr_resnet50",
|
||||
type=str,
|
||||
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
|
@ -0,0 +1,949 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Feature extractor class for Conditional DETR."""
|
||||
|
||||
import io
|
||||
import pathlib
|
||||
from collections import defaultdict
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
|
||||
from ...image_utils import ImageFeatureExtractionMixin, is_torch_tensor
|
||||
from ...utils import TensorType, is_torch_available, logging
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
ImageInput = Union[Image.Image, np.ndarray, "torch.Tensor", List[Image.Image], List[np.ndarray], List["torch.Tensor"]]
|
||||
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.center_to_corners_format
|
||||
def center_to_corners_format(x):
|
||||
"""
|
||||
Converts a PyTorch tensor of bounding boxes of center format (center_x, center_y, width, height) to corners format
|
||||
(x_0, y_0, x_1, y_1).
|
||||
"""
|
||||
x_c, y_c, w, h = x.unbind(-1)
|
||||
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
|
||||
return torch.stack(b, dim=-1)
|
||||
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.corners_to_center_format
|
||||
def corners_to_center_format(x):
|
||||
"""
|
||||
Converts a NumPy array of bounding boxes of shape (number of bounding boxes, 4) of corners format (x_0, y_0, x_1,
|
||||
y_1) to center format (center_x, center_y, width, height).
|
||||
"""
|
||||
x_transposed = x.T
|
||||
x0, y0, x1, y1 = x_transposed[0], x_transposed[1], x_transposed[2], x_transposed[3]
|
||||
b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
|
||||
return np.stack(b, axis=-1)
|
||||
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.masks_to_boxes
|
||||
def masks_to_boxes(masks):
|
||||
"""
|
||||
Compute the bounding boxes around the provided panoptic segmentation masks.
|
||||
|
||||
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
|
||||
|
||||
Returns a [N, 4] tensor, with the boxes in corner (xyxy) format.
|
||||
"""
|
||||
if masks.size == 0:
|
||||
return np.zeros((0, 4))
|
||||
|
||||
h, w = masks.shape[-2:]
|
||||
|
||||
y = np.arange(0, h, dtype=np.float32)
|
||||
x = np.arange(0, w, dtype=np.float32)
|
||||
# see https://github.com/pytorch/pytorch/issues/50276
|
||||
y, x = np.meshgrid(y, x, indexing="ij")
|
||||
|
||||
x_mask = masks * np.expand_dims(x, axis=0)
|
||||
x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
|
||||
x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))
|
||||
x_min = x.filled(fill_value=1e8)
|
||||
x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
|
||||
|
||||
y_mask = masks * np.expand_dims(y, axis=0)
|
||||
y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
|
||||
y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))
|
||||
y_min = y.filled(fill_value=1e8)
|
||||
y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
|
||||
|
||||
return np.stack([x_min, y_min, x_max, y_max], 1)
|
||||
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.rgb_to_id
|
||||
def rgb_to_id(color):
|
||||
if isinstance(color, np.ndarray) and len(color.shape) == 3:
|
||||
if color.dtype == np.uint8:
|
||||
color = color.astype(np.int32)
|
||||
return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
|
||||
return int(color[0] + 256 * color[1] + 256 * 256 * color[2])
|
||||
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.id_to_rgb
|
||||
def id_to_rgb(id_map):
|
||||
if isinstance(id_map, np.ndarray):
|
||||
id_map_copy = id_map.copy()
|
||||
rgb_shape = tuple(list(id_map.shape) + [3])
|
||||
rgb_map = np.zeros(rgb_shape, dtype=np.uint8)
|
||||
for i in range(3):
|
||||
rgb_map[..., i] = id_map_copy % 256
|
||||
id_map_copy //= 256
|
||||
return rgb_map
|
||||
color = []
|
||||
for _ in range(3):
|
||||
color.append(id_map % 256)
|
||||
id_map //= 256
|
||||
return color
|
||||
|
||||
|
||||
class ConditionalDetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
|
||||
r"""
|
||||
Constructs a Conditional DETR feature extractor.
|
||||
|
||||
This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users
|
||||
should refer to this superclass for more information regarding those methods.
|
||||
|
||||
|
||||
Args:
|
||||
format (`str`, *optional*, defaults to `"coco_detection"`):
|
||||
Data format of the annotations. One of "coco_detection" or "coco_panoptic".
|
||||
do_resize (`bool`, *optional*, defaults to `True`):
|
||||
Whether to resize the input to a certain `size`.
|
||||
size (`int`, *optional*, defaults to 800):
|
||||
Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a
|
||||
sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of
|
||||
the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size *
|
||||
height / width, size)`.
|
||||
max_size (`int`, *optional*, defaults to `1333`):
|
||||
The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is
|
||||
set to `True`.
|
||||
do_normalize (`bool`, *optional*, defaults to `True`):
|
||||
Whether or not to normalize the input with mean and standard deviation.
|
||||
image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
|
||||
The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.
|
||||
image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
|
||||
The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the
|
||||
ImageNet std.
|
||||
"""
|
||||
|
||||
model_input_names = ["pixel_values", "pixel_mask"]
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__init__
|
||||
def __init__(
|
||||
self,
|
||||
format="coco_detection",
|
||||
do_resize=True,
|
||||
size=800,
|
||||
max_size=1333,
|
||||
do_normalize=True,
|
||||
image_mean=None,
|
||||
image_std=None,
|
||||
**kwargs
|
||||
):
|
||||
super().__init__(**kwargs)
|
||||
self.format = self._is_valid_format(format)
|
||||
self.do_resize = do_resize
|
||||
self.size = size
|
||||
self.max_size = max_size
|
||||
self.do_normalize = do_normalize
|
||||
self.image_mean = image_mean if image_mean is not None else [0.485, 0.456, 0.406] # ImageNet mean
|
||||
self.image_std = image_std if image_std is not None else [0.229, 0.224, 0.225] # ImageNet std
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._is_valid_format
|
||||
def _is_valid_format(self, format):
|
||||
if format not in ["coco_detection", "coco_panoptic"]:
|
||||
raise ValueError(f"Format {format} not supported")
|
||||
return format
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare
|
||||
def prepare(self, image, target, return_segmentation_masks=False, masks_path=None):
|
||||
if self.format == "coco_detection":
|
||||
image, target = self.prepare_coco_detection(image, target, return_segmentation_masks)
|
||||
return image, target
|
||||
elif self.format == "coco_panoptic":
|
||||
image, target = self.prepare_coco_panoptic(image, target, masks_path)
|
||||
return image, target
|
||||
else:
|
||||
raise ValueError(f"Format {self.format} not supported")
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.convert_coco_poly_to_mask
|
||||
def convert_coco_poly_to_mask(self, segmentations, height, width):
|
||||
|
||||
try:
|
||||
from pycocotools import mask as coco_mask
|
||||
except ImportError:
|
||||
raise ImportError("Pycocotools is not installed in your environment.")
|
||||
|
||||
masks = []
|
||||
for polygons in segmentations:
|
||||
rles = coco_mask.frPyObjects(polygons, height, width)
|
||||
mask = coco_mask.decode(rles)
|
||||
if len(mask.shape) < 3:
|
||||
mask = mask[..., None]
|
||||
mask = np.asarray(mask, dtype=np.uint8)
|
||||
mask = np.any(mask, axis=2)
|
||||
masks.append(mask)
|
||||
if masks:
|
||||
masks = np.stack(masks, axis=0)
|
||||
else:
|
||||
masks = np.zeros((0, height, width), dtype=np.uint8)
|
||||
|
||||
return masks
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_detection with DETR->ConditionalDETR
|
||||
def prepare_coco_detection(self, image, target, return_segmentation_masks=False):
|
||||
"""
|
||||
Convert the target in COCO format into the format expected by ConditionalDETR.
|
||||
"""
|
||||
w, h = image.size
|
||||
|
||||
image_id = target["image_id"]
|
||||
image_id = np.asarray([image_id], dtype=np.int64)
|
||||
|
||||
# get all COCO annotations for the given image
|
||||
anno = target["annotations"]
|
||||
|
||||
anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0]
|
||||
|
||||
boxes = [obj["bbox"] for obj in anno]
|
||||
# guard against no boxes via resizing
|
||||
boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
|
||||
boxes[:, 2:] += boxes[:, :2]
|
||||
boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=w)
|
||||
boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=h)
|
||||
|
||||
classes = [obj["category_id"] for obj in anno]
|
||||
classes = np.asarray(classes, dtype=np.int64)
|
||||
|
||||
if return_segmentation_masks:
|
||||
segmentations = [obj["segmentation"] for obj in anno]
|
||||
masks = self.convert_coco_poly_to_mask(segmentations, h, w)
|
||||
|
||||
keypoints = None
|
||||
if anno and "keypoints" in anno[0]:
|
||||
keypoints = [obj["keypoints"] for obj in anno]
|
||||
keypoints = np.asarray(keypoints, dtype=np.float32)
|
||||
num_keypoints = keypoints.shape[0]
|
||||
if num_keypoints:
|
||||
keypoints = keypoints.reshape((-1, 3))
|
||||
|
||||
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
|
||||
boxes = boxes[keep]
|
||||
classes = classes[keep]
|
||||
if return_segmentation_masks:
|
||||
masks = masks[keep]
|
||||
if keypoints is not None:
|
||||
keypoints = keypoints[keep]
|
||||
|
||||
target = {}
|
||||
target["boxes"] = boxes
|
||||
target["class_labels"] = classes
|
||||
if return_segmentation_masks:
|
||||
target["masks"] = masks
|
||||
target["image_id"] = image_id
|
||||
if keypoints is not None:
|
||||
target["keypoints"] = keypoints
|
||||
|
||||
# for conversion to coco api
|
||||
area = np.asarray([obj["area"] for obj in anno], dtype=np.float32)
|
||||
iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno], dtype=np.int64)
|
||||
target["area"] = area[keep]
|
||||
target["iscrowd"] = iscrowd[keep]
|
||||
|
||||
target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64)
|
||||
target["size"] = np.asarray([int(h), int(w)], dtype=np.int64)
|
||||
|
||||
return image, target
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_panoptic
|
||||
def prepare_coco_panoptic(self, image, target, masks_path, return_masks=True):
|
||||
w, h = image.size
|
||||
ann_info = target.copy()
|
||||
ann_path = pathlib.Path(masks_path) / ann_info["file_name"]
|
||||
|
||||
if "segments_info" in ann_info:
|
||||
masks = np.asarray(Image.open(ann_path), dtype=np.uint32)
|
||||
masks = rgb_to_id(masks)
|
||||
|
||||
ids = np.array([ann["id"] for ann in ann_info["segments_info"]])
|
||||
masks = masks == ids[:, None, None]
|
||||
masks = np.asarray(masks, dtype=np.uint8)
|
||||
|
||||
labels = np.asarray([ann["category_id"] for ann in ann_info["segments_info"]], dtype=np.int64)
|
||||
|
||||
target = {}
|
||||
target["image_id"] = np.asarray(
|
||||
[ann_info["image_id"] if "image_id" in ann_info else ann_info["id"]], dtype=np.int64
|
||||
)
|
||||
if return_masks:
|
||||
target["masks"] = masks
|
||||
target["class_labels"] = labels
|
||||
|
||||
target["boxes"] = masks_to_boxes(masks)
|
||||
|
||||
target["size"] = np.asarray([int(h), int(w)], dtype=np.int64)
|
||||
target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64)
|
||||
if "segments_info" in ann_info:
|
||||
target["iscrowd"] = np.asarray([ann["iscrowd"] for ann in ann_info["segments_info"]], dtype=np.int64)
|
||||
target["area"] = np.asarray([ann["area"] for ann in ann_info["segments_info"]], dtype=np.float32)
|
||||
|
||||
return image, target
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._resize
|
||||
def _resize(self, image, size, target=None, max_size=None):
|
||||
"""
|
||||
Resize the image to the given size. Size can be min_size (scalar) or (w, h) tuple. If size is an int, smaller
|
||||
edge of the image will be matched to this number.
|
||||
|
||||
If given, also resize the target accordingly.
|
||||
"""
|
||||
if not isinstance(image, Image.Image):
|
||||
image = self.to_pil_image(image)
|
||||
|
||||
def get_size_with_aspect_ratio(image_size, size, max_size=None):
|
||||
w, h = image_size
|
||||
if max_size is not None:
|
||||
min_original_size = float(min((w, h)))
|
||||
max_original_size = float(max((w, h)))
|
||||
if max_original_size / min_original_size * size > max_size:
|
||||
size = int(round(max_size * min_original_size / max_original_size))
|
||||
|
||||
if (w <= h and w == size) or (h <= w and h == size):
|
||||
return (h, w)
|
||||
|
||||
if w < h:
|
||||
ow = size
|
||||
oh = int(size * h / w)
|
||||
else:
|
||||
oh = size
|
||||
ow = int(size * w / h)
|
||||
|
||||
return (oh, ow)
|
||||
|
||||
def get_size(image_size, size, max_size=None):
|
||||
if isinstance(size, (list, tuple)):
|
||||
return size
|
||||
else:
|
||||
# size returned must be (w, h) since we use PIL to resize images
|
||||
# so we revert the tuple
|
||||
return get_size_with_aspect_ratio(image_size, size, max_size)[::-1]
|
||||
|
||||
size = get_size(image.size, size, max_size)
|
||||
rescaled_image = self.resize(image, size=size)
|
||||
|
||||
if target is None:
|
||||
return rescaled_image, None
|
||||
|
||||
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
|
||||
ratio_width, ratio_height = ratios
|
||||
|
||||
target = target.copy()
|
||||
if "boxes" in target:
|
||||
boxes = target["boxes"]
|
||||
scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
|
||||
target["boxes"] = scaled_boxes
|
||||
|
||||
if "area" in target:
|
||||
area = target["area"]
|
||||
scaled_area = area * (ratio_width * ratio_height)
|
||||
target["area"] = scaled_area
|
||||
|
||||
w, h = size
|
||||
target["size"] = np.asarray([h, w], dtype=np.int64)
|
||||
|
||||
if "masks" in target:
|
||||
# use PyTorch as current workaround
|
||||
# TODO replace by self.resize
|
||||
masks = torch.from_numpy(target["masks"][:, None]).float()
|
||||
interpolated_masks = nn.functional.interpolate(masks, size=(h, w), mode="nearest")[:, 0] > 0.5
|
||||
target["masks"] = interpolated_masks.numpy()
|
||||
|
||||
return rescaled_image, target
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._normalize
|
||||
def _normalize(self, image, mean, std, target=None):
|
||||
"""
|
||||
Normalize the image with a certain mean and std.
|
||||
|
||||
If given, also normalize the target bounding boxes based on the size of the image.
|
||||
"""
|
||||
|
||||
image = self.normalize(image, mean=mean, std=std)
|
||||
if target is None:
|
||||
return image, None
|
||||
|
||||
target = target.copy()
|
||||
h, w = image.shape[-2:]
|
||||
|
||||
if "boxes" in target:
|
||||
boxes = target["boxes"]
|
||||
boxes = corners_to_center_format(boxes)
|
||||
boxes = boxes / np.asarray([w, h, w, h], dtype=np.float32)
|
||||
target["boxes"] = boxes
|
||||
|
||||
return image, target
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__call__ with Detr->ConditionalDetr,DETR->ConditionalDETR
|
||||
def __call__(
|
||||
self,
|
||||
images: ImageInput,
|
||||
annotations: Union[List[Dict], List[List[Dict]]] = None,
|
||||
return_segmentation_masks: Optional[bool] = False,
|
||||
masks_path: Optional[pathlib.Path] = None,
|
||||
pad_and_return_pixel_mask: Optional[bool] = True,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
**kwargs,
|
||||
) -> BatchFeature:
|
||||
"""
|
||||
Main method to prepare for the model one or several image(s) and optional annotations. Images are by default
|
||||
padded up to the largest image in a batch, and a pixel mask is created that indicates which pixels are
|
||||
real/which are padding.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass
|
||||
PIL images.
|
||||
|
||||
</Tip>
|
||||
|
||||
Args:
|
||||
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
|
||||
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
|
||||
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
|
||||
number of channels, H and W are image height and width.
|
||||
|
||||
annotations (`Dict`, `List[Dict]`, *optional*):
|
||||
The corresponding annotations in COCO format.
|
||||
|
||||
In case [`ConditionalDetrFeatureExtractor`] was initialized with `format = "coco_detection"`, the
|
||||
annotations for each image should have the following format: {'image_id': int, 'annotations':
|
||||
[annotation]}, with the annotations being a list of COCO object annotations.
|
||||
|
||||
In case [`ConditionalDetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`, the
|
||||
annotations for each image should have the following format: {'image_id': int, 'file_name': str,
|
||||
'segments_info': [segment_info]} with segments_info being a list of COCO panoptic annotations.
|
||||
|
||||
return_segmentation_masks (`Dict`, `List[Dict]`, *optional*, defaults to `False`):
|
||||
Whether to also include instance segmentation masks as part of the labels in case `format =
|
||||
"coco_detection"`.
|
||||
|
||||
masks_path (`pathlib.Path`, *optional*):
|
||||
Path to the directory containing the PNG files that store the class-agnostic image segmentations. Only
|
||||
relevant in case [`ConditionalDetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`.
|
||||
|
||||
pad_and_return_pixel_mask (`bool`, *optional*, defaults to `True`):
|
||||
Whether or not to pad images up to the largest image in a batch and create a pixel mask.
|
||||
|
||||
If left to the default, will return a pixel mask that is:
|
||||
|
||||
- 1 for pixels that are real (i.e. **not masked**),
|
||||
- 0 for pixels that are padding (i.e. **masked**).
|
||||
|
||||
return_tensors (`str` or [`~utils.TensorType`], *optional*):
|
||||
If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor`
|
||||
objects.
|
||||
|
||||
Returns:
|
||||
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
|
||||
|
||||
- **pixel_values** -- Pixel values to be fed to a model.
|
||||
- **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if
|
||||
*"pixel_mask"* is in `self.model_input_names`).
|
||||
- **labels** -- Optional labels to be fed to a model (when `annotations` are provided)
|
||||
"""
|
||||
# Input type checking for clearer error
|
||||
|
||||
valid_images = False
|
||||
valid_annotations = False
|
||||
valid_masks_path = False
|
||||
|
||||
# Check that images has a valid type
|
||||
if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images):
|
||||
valid_images = True
|
||||
elif isinstance(images, (list, tuple)):
|
||||
if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]):
|
||||
valid_images = True
|
||||
|
||||
if not valid_images:
|
||||
raise ValueError(
|
||||
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), "
|
||||
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
|
||||
)
|
||||
|
||||
is_batched = bool(
|
||||
isinstance(images, (list, tuple))
|
||||
and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]))
|
||||
)
|
||||
|
||||
# Check that annotations has a valid type
|
||||
if annotations is not None:
|
||||
if not is_batched:
|
||||
if self.format == "coco_detection":
|
||||
if isinstance(annotations, dict) and "image_id" in annotations and "annotations" in annotations:
|
||||
if isinstance(annotations["annotations"], (list, tuple)):
|
||||
# an image can have no annotations
|
||||
if len(annotations["annotations"]) == 0 or isinstance(annotations["annotations"][0], dict):
|
||||
valid_annotations = True
|
||||
elif self.format == "coco_panoptic":
|
||||
if isinstance(annotations, dict) and "image_id" in annotations and "segments_info" in annotations:
|
||||
if isinstance(annotations["segments_info"], (list, tuple)):
|
||||
# an image can have no segments (?)
|
||||
if len(annotations["segments_info"]) == 0 or isinstance(
|
||||
annotations["segments_info"][0], dict
|
||||
):
|
||||
valid_annotations = True
|
||||
else:
|
||||
if isinstance(annotations, (list, tuple)):
|
||||
if len(images) != len(annotations):
|
||||
raise ValueError("There must be as many annotations as there are images")
|
||||
if isinstance(annotations[0], Dict):
|
||||
if self.format == "coco_detection":
|
||||
if isinstance(annotations[0]["annotations"], (list, tuple)):
|
||||
valid_annotations = True
|
||||
elif self.format == "coco_panoptic":
|
||||
if isinstance(annotations[0]["segments_info"], (list, tuple)):
|
||||
valid_annotations = True
|
||||
|
||||
if not valid_annotations:
|
||||
raise ValueError(
|
||||
"""
|
||||
Annotations must of type `Dict` (single image) or `List[Dict]` (batch of images). In case of object
|
||||
detection, each dictionary should contain the keys 'image_id' and 'annotations', with the latter
|
||||
being a list of annotations in COCO format. In case of panoptic segmentation, each dictionary
|
||||
should contain the keys 'file_name', 'image_id' and 'segments_info', with the latter being a list
|
||||
of annotations in COCO format.
|
||||
"""
|
||||
)
|
||||
|
||||
# Check that masks_path has a valid type
|
||||
if masks_path is not None:
|
||||
if self.format == "coco_panoptic":
|
||||
if isinstance(masks_path, pathlib.Path):
|
||||
valid_masks_path = True
|
||||
if not valid_masks_path:
|
||||
raise ValueError(
|
||||
"The path to the directory containing the mask PNG files should be provided as a"
|
||||
" `pathlib.Path` object."
|
||||
)
|
||||
|
||||
if not is_batched:
|
||||
images = [images]
|
||||
if annotations is not None:
|
||||
annotations = [annotations]
|
||||
|
||||
# prepare (COCO annotations as a list of Dict -> ConditionalDETR target as a single Dict per image)
|
||||
if annotations is not None:
|
||||
for idx, (image, target) in enumerate(zip(images, annotations)):
|
||||
if not isinstance(image, Image.Image):
|
||||
image = self.to_pil_image(image)
|
||||
image, target = self.prepare(image, target, return_segmentation_masks, masks_path)
|
||||
images[idx] = image
|
||||
annotations[idx] = target
|
||||
|
||||
# transformations (resizing + normalization)
|
||||
if self.do_resize and self.size is not None:
|
||||
if annotations is not None:
|
||||
for idx, (image, target) in enumerate(zip(images, annotations)):
|
||||
image, target = self._resize(image=image, target=target, size=self.size, max_size=self.max_size)
|
||||
images[idx] = image
|
||||
annotations[idx] = target
|
||||
else:
|
||||
for idx, image in enumerate(images):
|
||||
images[idx] = self._resize(image=image, target=None, size=self.size, max_size=self.max_size)[0]
|
||||
|
||||
if self.do_normalize:
|
||||
if annotations is not None:
|
||||
for idx, (image, target) in enumerate(zip(images, annotations)):
|
||||
image, target = self._normalize(
|
||||
image=image, mean=self.image_mean, std=self.image_std, target=target
|
||||
)
|
||||
images[idx] = image
|
||||
annotations[idx] = target
|
||||
else:
|
||||
images = [
|
||||
self._normalize(image=image, mean=self.image_mean, std=self.image_std)[0] for image in images
|
||||
]
|
||||
|
||||
if pad_and_return_pixel_mask:
|
||||
# pad images up to largest image in batch and create pixel_mask
|
||||
max_size = self._max_by_axis([list(image.shape) for image in images])
|
||||
c, h, w = max_size
|
||||
padded_images = []
|
||||
pixel_mask = []
|
||||
for image in images:
|
||||
# create padded image
|
||||
padded_image = np.zeros((c, h, w), dtype=np.float32)
|
||||
padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image)
|
||||
padded_images.append(padded_image)
|
||||
# create pixel mask
|
||||
mask = np.zeros((h, w), dtype=np.int64)
|
||||
mask[: image.shape[1], : image.shape[2]] = True
|
||||
pixel_mask.append(mask)
|
||||
images = padded_images
|
||||
|
||||
# return as BatchFeature
|
||||
data = {}
|
||||
data["pixel_values"] = images
|
||||
if pad_and_return_pixel_mask:
|
||||
data["pixel_mask"] = pixel_mask
|
||||
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
|
||||
|
||||
if annotations is not None:
|
||||
# Convert to TensorType
|
||||
tensor_type = return_tensors
|
||||
if not isinstance(tensor_type, TensorType):
|
||||
tensor_type = TensorType(tensor_type)
|
||||
|
||||
if not tensor_type == TensorType.PYTORCH:
|
||||
raise ValueError("Only PyTorch is supported for the moment.")
|
||||
else:
|
||||
if not is_torch_available():
|
||||
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.")
|
||||
|
||||
encoded_inputs["labels"] = [
|
||||
{k: torch.from_numpy(v) for k, v in target.items()} for target in annotations
|
||||
]
|
||||
|
||||
return encoded_inputs
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._max_by_axis
|
||||
def _max_by_axis(self, the_list):
|
||||
# type: (List[List[int]]) -> List[int]
|
||||
maxes = the_list[0]
|
||||
for sublist in the_list[1:]:
|
||||
for index, item in enumerate(sublist):
|
||||
maxes[index] = max(maxes[index], item)
|
||||
return maxes
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.pad_and_create_pixel_mask
|
||||
def pad_and_create_pixel_mask(
|
||||
self, pixel_values_list: List["torch.Tensor"], return_tensors: Optional[Union[str, TensorType]] = None
|
||||
):
|
||||
"""
|
||||
Pad images up to the largest image in a batch and create a corresponding `pixel_mask`.
|
||||
|
||||
Args:
|
||||
pixel_values_list (`List[torch.Tensor]`):
|
||||
List of images (pixel values) to be padded. Each image should be a tensor of shape (C, H, W).
|
||||
return_tensors (`str` or [`~utils.TensorType`], *optional*):
|
||||
If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor`
|
||||
objects.
|
||||
|
||||
Returns:
|
||||
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
|
||||
|
||||
- **pixel_values** -- Pixel values to be fed to a model.
|
||||
- **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if
|
||||
*"pixel_mask"* is in `self.model_input_names`).
|
||||
|
||||
"""
|
||||
|
||||
max_size = self._max_by_axis([list(image.shape) for image in pixel_values_list])
|
||||
c, h, w = max_size
|
||||
padded_images = []
|
||||
pixel_mask = []
|
||||
for image in pixel_values_list:
|
||||
# create padded image
|
||||
padded_image = np.zeros((c, h, w), dtype=np.float32)
|
||||
padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image)
|
||||
padded_images.append(padded_image)
|
||||
# create pixel mask
|
||||
mask = np.zeros((h, w), dtype=np.int64)
|
||||
mask[: image.shape[1], : image.shape[2]] = True
|
||||
pixel_mask.append(mask)
|
||||
|
||||
# return as BatchFeature
|
||||
data = {"pixel_values": padded_images, "pixel_mask": pixel_mask}
|
||||
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
|
||||
|
||||
return encoded_inputs
|
||||
|
||||
# POSTPROCESSING METHODS
|
||||
# inspired by https://github.com/Atten4Vis/conditionalDETR/blob/master/models/conditional_detr.py#L258
|
||||
def post_process(self, outputs, target_sizes):
|
||||
"""
|
||||
Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the COCO api. Only
|
||||
supports PyTorch.
|
||||
|
||||
Args:
|
||||
outputs ([`ConditionalDetrObjectDetectionOutput`]):
|
||||
Raw outputs of the model.
|
||||
target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
|
||||
Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original
|
||||
image size (before any data augmentation). For visualization, this should be the image size after data
|
||||
augment, but before padding.
|
||||
|
||||
Returns:
|
||||
`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
|
||||
in the batch as predicted by the model.
|
||||
"""
|
||||
out_logits, out_bbox = outputs.logits, outputs.pred_boxes
|
||||
|
||||
if len(out_logits) != len(target_sizes):
|
||||
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
|
||||
if target_sizes.shape[1] != 2:
|
||||
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
|
||||
|
||||
prob = out_logits.sigmoid()
|
||||
topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 300, dim=1)
|
||||
scores = topk_values
|
||||
topk_boxes = topk_indexes // out_logits.shape[2]
|
||||
labels = topk_indexes % out_logits.shape[2]
|
||||
boxes = center_to_corners_format(out_bbox)
|
||||
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
|
||||
|
||||
# and from relative [0, 1] to absolute [0, height] coordinates
|
||||
img_h, img_w = target_sizes.unbind(1)
|
||||
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
|
||||
boxes = boxes * scale_fct[:, None, :]
|
||||
|
||||
results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
|
||||
|
||||
return results
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_segmentation with Detr->ConditionalDetr
|
||||
def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5):
|
||||
"""
|
||||
Converts the output of [`ConditionalDetrForSegmentation`] into image segmentation predictions. Only supports
|
||||
PyTorch.
|
||||
|
||||
Parameters:
|
||||
outputs ([`ConditionalDetrSegmentationOutput`]):
|
||||
Raw outputs of the model.
|
||||
target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`):
|
||||
Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction.
|
||||
threshold (`float`, *optional*, defaults to 0.9):
|
||||
Threshold to use to filter out queries.
|
||||
mask_threshold (`float`, *optional*, defaults to 0.5):
|
||||
Threshold to use when turning the predicted masks into binary values.
|
||||
|
||||
Returns:
|
||||
`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image
|
||||
in the batch as predicted by the model.
|
||||
"""
|
||||
out_logits, raw_masks = outputs.logits, outputs.pred_masks
|
||||
preds = []
|
||||
|
||||
def to_tuple(tup):
|
||||
if isinstance(tup, tuple):
|
||||
return tup
|
||||
return tuple(tup.cpu().tolist())
|
||||
|
||||
for cur_logits, cur_masks, size in zip(out_logits, raw_masks, target_sizes):
|
||||
# we filter empty queries and detection below threshold
|
||||
scores, labels = cur_logits.softmax(-1).max(-1)
|
||||
keep = labels.ne(outputs.logits.shape[-1] - 1) & (scores > threshold)
|
||||
cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)
|
||||
cur_scores = cur_scores[keep]
|
||||
cur_classes = cur_classes[keep]
|
||||
cur_masks = cur_masks[keep]
|
||||
cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1)
|
||||
cur_masks = (cur_masks.sigmoid() > mask_threshold) * 1
|
||||
|
||||
predictions = {"scores": cur_scores, "labels": cur_classes, "masks": cur_masks}
|
||||
preds.append(predictions)
|
||||
return preds
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_instance with Detr->ConditionalDetr
|
||||
def post_process_instance(self, results, outputs, orig_target_sizes, max_target_sizes, threshold=0.5):
|
||||
"""
|
||||
Converts the output of [`ConditionalDetrForSegmentation`] into actual instance segmentation predictions. Only
|
||||
supports PyTorch.
|
||||
|
||||
Args:
|
||||
results (`List[Dict]`):
|
||||
Results list obtained by [`~ConditionalDetrFeatureExtractor.post_process`], to which "masks" results
|
||||
will be added.
|
||||
outputs ([`ConditionalDetrSegmentationOutput`]):
|
||||
Raw outputs of the model.
|
||||
orig_target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
|
||||
Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original
|
||||
image size (before any data augmentation).
|
||||
max_target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
|
||||
Tensor containing the maximum size (h, w) of each image of the batch. For evaluation, this must be the
|
||||
original image size (before any data augmentation).
|
||||
threshold (`float`, *optional*, defaults to 0.5):
|
||||
Threshold to use when turning the predicted masks into binary values.
|
||||
|
||||
Returns:
|
||||
`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, boxes and masks for an
|
||||
image in the batch as predicted by the model.
|
||||
"""
|
||||
|
||||
if len(orig_target_sizes) != len(max_target_sizes):
|
||||
raise ValueError("Make sure to pass in as many orig_target_sizes as max_target_sizes")
|
||||
max_h, max_w = max_target_sizes.max(0)[0].tolist()
|
||||
outputs_masks = outputs.pred_masks.squeeze(2)
|
||||
outputs_masks = nn.functional.interpolate(
|
||||
outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False
|
||||
)
|
||||
outputs_masks = (outputs_masks.sigmoid() > threshold).cpu()
|
||||
|
||||
for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):
|
||||
img_h, img_w = t[0], t[1]
|
||||
results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)
|
||||
results[i]["masks"] = nn.functional.interpolate(
|
||||
results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest"
|
||||
).byte()
|
||||
|
||||
return results
|
||||
|
||||
# Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_panoptic with Detr->ConditionalDetr
|
||||
def post_process_panoptic(self, outputs, processed_sizes, target_sizes=None, is_thing_map=None, threshold=0.85):
|
||||
"""
|
||||
Converts the output of [`ConditionalDetrForSegmentation`] into actual panoptic predictions. Only supports
|
||||
PyTorch.
|
||||
|
||||
Parameters:
|
||||
outputs ([`ConditionalDetrSegmentationOutput`]):
|
||||
Raw outputs of the model.
|
||||
processed_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`):
|
||||
Torch Tensor (or list) containing the size (h, w) of each image of the batch, i.e. the size after data
|
||||
augmentation but before batching.
|
||||
target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`, *optional*):
|
||||
Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. If left to
|
||||
None, it will default to the `processed_sizes`.
|
||||
is_thing_map (`torch.Tensor` of shape `(batch_size, 2)`, *optional*):
|
||||
Dictionary mapping class indices to either True or False, depending on whether or not they are a thing.
|
||||
If not set, defaults to the `is_thing_map` of COCO panoptic.
|
||||
threshold (`float`, *optional*, defaults to 0.85):
|
||||
Threshold to use to filter out queries.
|
||||
|
||||
Returns:
|
||||
`List[Dict]`: A list of dictionaries, each dictionary containing a PNG string and segments_info values for
|
||||
an image in the batch as predicted by the model.
|
||||
"""
|
||||
if target_sizes is None:
|
||||
target_sizes = processed_sizes
|
||||
if len(processed_sizes) != len(target_sizes):
|
||||
raise ValueError("Make sure to pass in as many processed_sizes as target_sizes")
|
||||
|
||||
if is_thing_map is None:
|
||||
# default to is_thing_map of COCO panoptic
|
||||
is_thing_map = {i: i <= 90 for i in range(201)}
|
||||
|
||||
out_logits, raw_masks, raw_boxes = outputs.logits, outputs.pred_masks, outputs.pred_boxes
|
||||
if not len(out_logits) == len(raw_masks) == len(target_sizes):
|
||||
raise ValueError(
|
||||
"Make sure that you pass in as many target sizes as the batch dimension of the logits and masks"
|
||||
)
|
||||
preds = []
|
||||
|
||||
def to_tuple(tup):
|
||||
if isinstance(tup, tuple):
|
||||
return tup
|
||||
return tuple(tup.cpu().tolist())
|
||||
|
||||
for cur_logits, cur_masks, cur_boxes, size, target_size in zip(
|
||||
out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes
|
||||
):
|
||||
# we filter empty queries and detection below threshold
|
||||
scores, labels = cur_logits.softmax(-1).max(-1)
|
||||
keep = labels.ne(outputs.logits.shape[-1] - 1) & (scores > threshold)
|
||||
cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)
|
||||
cur_scores = cur_scores[keep]
|
||||
cur_classes = cur_classes[keep]
|
||||
cur_masks = cur_masks[keep]
|
||||
cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1)
|
||||
cur_boxes = center_to_corners_format(cur_boxes[keep])
|
||||
|
||||
h, w = cur_masks.shape[-2:]
|
||||
if len(cur_boxes) != len(cur_classes):
|
||||
raise ValueError("Not as many boxes as there are classes")
|
||||
|
||||
# It may be that we have several predicted masks for the same stuff class.
|
||||
# In the following, we track the list of masks ids for each stuff class (they are merged later on)
|
||||
cur_masks = cur_masks.flatten(1)
|
||||
stuff_equiv_classes = defaultdict(lambda: [])
|
||||
for k, label in enumerate(cur_classes):
|
||||
if not is_thing_map[label.item()]:
|
||||
stuff_equiv_classes[label.item()].append(k)
|
||||
|
||||
def get_ids_area(masks, scores, dedup=False):
|
||||
# This helper function creates the final panoptic segmentation image
|
||||
# It also returns the area of the masks that appears on the image
|
||||
|
||||
m_id = masks.transpose(0, 1).softmax(-1)
|
||||
|
||||
if m_id.shape[-1] == 0:
|
||||
# We didn't detect any mask :(
|
||||
m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)
|
||||
else:
|
||||
m_id = m_id.argmax(-1).view(h, w)
|
||||
|
||||
if dedup:
|
||||
# Merge the masks corresponding to the same stuff class
|
||||
for equiv in stuff_equiv_classes.values():
|
||||
if len(equiv) > 1:
|
||||
for eq_id in equiv:
|
||||
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
|
||||
|
||||
final_h, final_w = to_tuple(target_size)
|
||||
|
||||
seg_img = Image.fromarray(id_to_rgb(m_id.view(h, w).cpu().numpy()))
|
||||
seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)
|
||||
|
||||
np_seg_img = torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes()))
|
||||
np_seg_img = np_seg_img.view(final_h, final_w, 3)
|
||||
np_seg_img = np_seg_img.numpy()
|
||||
|
||||
m_id = torch.from_numpy(rgb_to_id(np_seg_img))
|
||||
|
||||
area = []
|
||||
for i in range(len(scores)):
|
||||
area.append(m_id.eq(i).sum().item())
|
||||
return area, seg_img
|
||||
|
||||
area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)
|
||||
if cur_classes.numel() > 0:
|
||||
# We know filter empty masks as long as we find some
|
||||
while True:
|
||||
filtered_small = torch.as_tensor(
|
||||
[area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device
|
||||
)
|
||||
if filtered_small.any().item():
|
||||
cur_scores = cur_scores[~filtered_small]
|
||||
cur_classes = cur_classes[~filtered_small]
|
||||
cur_masks = cur_masks[~filtered_small]
|
||||
area, seg_img = get_ids_area(cur_masks, cur_scores)
|
||||
else:
|
||||
break
|
||||
|
||||
else:
|
||||
cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)
|
||||
|
||||
segments_info = []
|
||||
for i, a in enumerate(area):
|
||||
cat = cur_classes[i].item()
|
||||
segments_info.append({"id": i, "isthing": is_thing_map[cat], "category_id": cat, "area": a})
|
||||
del cur_classes
|
||||
|
||||
with io.BytesIO() as out:
|
||||
seg_img.save(out, format="PNG")
|
||||
predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
|
||||
preds.append(predictions)
|
||||
return preds
|
File diff suppressed because it is too large
Load Diff
@ -3,6 +3,37 @@
|
||||
from ..utils import DummyObject, requires_backends
|
||||
|
||||
|
||||
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None
|
||||
|
||||
|
||||
class ConditionalDetrForObjectDetection(metaclass=DummyObject):
|
||||
_backends = ["timm", "vision"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
requires_backends(self, ["timm", "vision"])
|
||||
|
||||
|
||||
class ConditionalDetrForSegmentation(metaclass=DummyObject):
|
||||
_backends = ["timm", "vision"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
requires_backends(self, ["timm", "vision"])
|
||||
|
||||
|
||||
class ConditionalDetrModel(metaclass=DummyObject):
|
||||
_backends = ["timm", "vision"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
requires_backends(self, ["timm", "vision"])
|
||||
|
||||
|
||||
class ConditionalDetrPreTrainedModel(metaclass=DummyObject):
|
||||
_backends = ["timm", "vision"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
requires_backends(self, ["timm", "vision"])
|
||||
|
||||
|
||||
DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None
|
||||
|
||||
|
||||
|
@ -24,6 +24,13 @@ class CLIPFeatureExtractor(metaclass=DummyObject):
|
||||
requires_backends(self, ["vision"])
|
||||
|
||||
|
||||
class ConditionalDetrFeatureExtractor(metaclass=DummyObject):
|
||||
_backends = ["vision"]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
requires_backends(self, ["vision"])
|
||||
|
||||
|
||||
class ConvNextFeatureExtractor(metaclass=DummyObject):
|
||||
_backends = ["vision"]
|
||||
|
||||
|
0
tests/models/conditional_detr/__init__.py
Normal file
0
tests/models/conditional_detr/__init__.py
Normal file
@ -0,0 +1,342 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 HuggingFace Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import json
|
||||
import pathlib
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.testing_utils import require_torch, require_vision, slow
|
||||
from transformers.utils import is_torch_available, is_vision_available
|
||||
|
||||
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
||||
from transformers import ConditionalDetrFeatureExtractor
|
||||
|
||||
|
||||
class ConditionalDetrFeatureExtractionTester(unittest.TestCase):
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=7,
|
||||
num_channels=3,
|
||||
min_resolution=30,
|
||||
max_resolution=400,
|
||||
do_resize=True,
|
||||
size=18,
|
||||
max_size=1333, # by setting max_size > max_resolution we're effectively not testing this :p
|
||||
do_normalize=True,
|
||||
image_mean=[0.5, 0.5, 0.5],
|
||||
image_std=[0.5, 0.5, 0.5],
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = num_channels
|
||||
self.min_resolution = min_resolution
|
||||
self.max_resolution = max_resolution
|
||||
self.do_resize = do_resize
|
||||
self.size = size
|
||||
self.max_size = max_size
|
||||
self.do_normalize = do_normalize
|
||||
self.image_mean = image_mean
|
||||
self.image_std = image_std
|
||||
|
||||
def prepare_feat_extract_dict(self):
|
||||
return {
|
||||
"do_resize": self.do_resize,
|
||||
"size": self.size,
|
||||
"max_size": self.max_size,
|
||||
"do_normalize": self.do_normalize,
|
||||
"image_mean": self.image_mean,
|
||||
"image_std": self.image_std,
|
||||
}
|
||||
|
||||
def get_expected_values(self, image_inputs, batched=False):
|
||||
"""
|
||||
This function computes the expected height and width when providing images to ConditionalDetrFeatureExtractor,
|
||||
assuming do_resize is set to True with a scalar size.
|
||||
"""
|
||||
if not batched:
|
||||
image = image_inputs[0]
|
||||
if isinstance(image, Image.Image):
|
||||
w, h = image.size
|
||||
else:
|
||||
h, w = image.shape[1], image.shape[2]
|
||||
if w < h:
|
||||
expected_height = int(self.size * h / w)
|
||||
expected_width = self.size
|
||||
elif w > h:
|
||||
expected_height = self.size
|
||||
expected_width = int(self.size * w / h)
|
||||
else:
|
||||
expected_height = self.size
|
||||
expected_width = self.size
|
||||
|
||||
else:
|
||||
expected_values = []
|
||||
for image in image_inputs:
|
||||
expected_height, expected_width = self.get_expected_values([image])
|
||||
expected_values.append((expected_height, expected_width))
|
||||
expected_height = max(expected_values, key=lambda item: item[0])[0]
|
||||
expected_width = max(expected_values, key=lambda item: item[1])[1]
|
||||
|
||||
return expected_height, expected_width
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
class ConditionalDetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
|
||||
|
||||
feature_extraction_class = ConditionalDetrFeatureExtractor if is_vision_available() else None
|
||||
|
||||
def setUp(self):
|
||||
self.feature_extract_tester = ConditionalDetrFeatureExtractionTester(self)
|
||||
|
||||
@property
|
||||
def feat_extract_dict(self):
|
||||
return self.feature_extract_tester.prepare_feat_extract_dict()
|
||||
|
||||
def test_feat_extract_properties(self):
|
||||
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
|
||||
self.assertTrue(hasattr(feature_extractor, "image_mean"))
|
||||
self.assertTrue(hasattr(feature_extractor, "image_std"))
|
||||
self.assertTrue(hasattr(feature_extractor, "do_normalize"))
|
||||
self.assertTrue(hasattr(feature_extractor, "do_resize"))
|
||||
self.assertTrue(hasattr(feature_extractor, "size"))
|
||||
self.assertTrue(hasattr(feature_extractor, "max_size"))
|
||||
|
||||
def test_batch_feature(self):
|
||||
pass
|
||||
|
||||
def test_call_pil(self):
|
||||
# Initialize feature_extractor
|
||||
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
|
||||
# create random PIL images
|
||||
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False)
|
||||
for image in image_inputs:
|
||||
self.assertIsInstance(image, Image.Image)
|
||||
|
||||
# Test not batched input
|
||||
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
|
||||
|
||||
expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs)
|
||||
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(1, self.feature_extract_tester.num_channels, expected_height, expected_width),
|
||||
)
|
||||
|
||||
# Test batched
|
||||
expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True)
|
||||
|
||||
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(
|
||||
self.feature_extract_tester.batch_size,
|
||||
self.feature_extract_tester.num_channels,
|
||||
expected_height,
|
||||
expected_width,
|
||||
),
|
||||
)
|
||||
|
||||
def test_call_numpy(self):
|
||||
# Initialize feature_extractor
|
||||
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
|
||||
# create random numpy tensors
|
||||
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True)
|
||||
for image in image_inputs:
|
||||
self.assertIsInstance(image, np.ndarray)
|
||||
|
||||
# Test not batched input
|
||||
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
|
||||
|
||||
expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs)
|
||||
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(1, self.feature_extract_tester.num_channels, expected_height, expected_width),
|
||||
)
|
||||
|
||||
# Test batched
|
||||
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
|
||||
|
||||
expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True)
|
||||
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(
|
||||
self.feature_extract_tester.batch_size,
|
||||
self.feature_extract_tester.num_channels,
|
||||
expected_height,
|
||||
expected_width,
|
||||
),
|
||||
)
|
||||
|
||||
def test_call_pytorch(self):
|
||||
# Initialize feature_extractor
|
||||
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
|
||||
# create random PyTorch tensors
|
||||
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
|
||||
for image in image_inputs:
|
||||
self.assertIsInstance(image, torch.Tensor)
|
||||
|
||||
# Test not batched input
|
||||
encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values
|
||||
|
||||
expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs)
|
||||
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(1, self.feature_extract_tester.num_channels, expected_height, expected_width),
|
||||
)
|
||||
|
||||
# Test batched
|
||||
encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values
|
||||
|
||||
expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True)
|
||||
|
||||
self.assertEqual(
|
||||
encoded_images.shape,
|
||||
(
|
||||
self.feature_extract_tester.batch_size,
|
||||
self.feature_extract_tester.num_channels,
|
||||
expected_height,
|
||||
expected_width,
|
||||
),
|
||||
)
|
||||
|
||||
def test_equivalence_pad_and_create_pixel_mask(self):
|
||||
# Initialize feature_extractors
|
||||
feature_extractor_1 = self.feature_extraction_class(**self.feat_extract_dict)
|
||||
feature_extractor_2 = self.feature_extraction_class(do_resize=False, do_normalize=False)
|
||||
# create random PyTorch tensors
|
||||
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
|
||||
for image in image_inputs:
|
||||
self.assertIsInstance(image, torch.Tensor)
|
||||
|
||||
# Test whether the method "pad_and_return_pixel_mask" and calling the feature extractor return the same tensors
|
||||
encoded_images_with_method = feature_extractor_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt")
|
||||
encoded_images = feature_extractor_2(image_inputs, return_tensors="pt")
|
||||
|
||||
self.assertTrue(
|
||||
torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4)
|
||||
)
|
||||
self.assertTrue(
|
||||
torch.allclose(encoded_images_with_method["pixel_mask"], encoded_images["pixel_mask"], atol=1e-4)
|
||||
)
|
||||
|
||||
@slow
|
||||
def test_call_pytorch_with_coco_detection_annotations(self):
|
||||
# prepare image and target
|
||||
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
|
||||
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f:
|
||||
target = json.loads(f.read())
|
||||
|
||||
target = {"image_id": 39769, "annotations": target}
|
||||
|
||||
# encode them
|
||||
feature_extractor = ConditionalDetrFeatureExtractor.from_pretrained("microsoft/conditional-detr-resnet-50")
|
||||
encoding = feature_extractor(images=image, annotations=target, return_tensors="pt")
|
||||
|
||||
# verify pixel values
|
||||
expected_shape = torch.Size([1, 3, 800, 1066])
|
||||
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
|
||||
|
||||
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
|
||||
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
|
||||
|
||||
# verify area
|
||||
expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
|
||||
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
|
||||
# verify boxes
|
||||
expected_boxes_shape = torch.Size([6, 4])
|
||||
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
|
||||
expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
|
||||
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
|
||||
# verify image_id
|
||||
expected_image_id = torch.tensor([39769])
|
||||
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
|
||||
# verify is_crowd
|
||||
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
|
||||
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
|
||||
# verify class_labels
|
||||
expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17])
|
||||
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
|
||||
# verify orig_size
|
||||
expected_orig_size = torch.tensor([480, 640])
|
||||
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
|
||||
# verify size
|
||||
expected_size = torch.tensor([800, 1066])
|
||||
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
|
||||
|
||||
@slow
|
||||
def test_call_pytorch_with_coco_panoptic_annotations(self):
|
||||
# prepare image, target and masks_path
|
||||
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
|
||||
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f:
|
||||
target = json.loads(f.read())
|
||||
|
||||
target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
|
||||
|
||||
masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
|
||||
|
||||
# encode them
|
||||
# TODO replace by .from_pretrained microsoft/conditional-detr-resnet-50-panoptic
|
||||
feature_extractor = ConditionalDetrFeatureExtractor(format="coco_panoptic")
|
||||
encoding = feature_extractor(images=image, annotations=target, masks_path=masks_path, return_tensors="pt")
|
||||
|
||||
# verify pixel values
|
||||
expected_shape = torch.Size([1, 3, 800, 1066])
|
||||
self.assertEqual(encoding["pixel_values"].shape, expected_shape)
|
||||
|
||||
expected_slice = torch.tensor([0.2796, 0.3138, 0.3481])
|
||||
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4))
|
||||
|
||||
# verify area
|
||||
expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
|
||||
self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area))
|
||||
# verify boxes
|
||||
expected_boxes_shape = torch.Size([6, 4])
|
||||
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
|
||||
expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
|
||||
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3))
|
||||
# verify image_id
|
||||
expected_image_id = torch.tensor([39769])
|
||||
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id))
|
||||
# verify is_crowd
|
||||
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
|
||||
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd))
|
||||
# verify class_labels
|
||||
expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93])
|
||||
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels))
|
||||
# verify masks
|
||||
expected_masks_sum = 822338
|
||||
self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum)
|
||||
# verify orig_size
|
||||
expected_orig_size = torch.tensor([480, 640])
|
||||
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size))
|
||||
# verify size
|
||||
expected_size = torch.tensor([800, 1066])
|
||||
self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size))
|
542
tests/models/conditional_detr/test_modeling_conditional_detr.py
Normal file
542
tests/models/conditional_detr/test_modeling_conditional_detr.py
Normal file
@ -0,0 +1,542 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" Testing suite for the PyTorch CONDITIONAL_DETR model. """
|
||||
|
||||
|
||||
import inspect
|
||||
import math
|
||||
import unittest
|
||||
|
||||
from transformers import ConditionalDetrConfig, is_timm_available, is_vision_available
|
||||
from transformers.testing_utils import require_timm, require_vision, slow, torch_device
|
||||
from transformers.utils import cached_property
|
||||
|
||||
from ...generation.test_generation_utils import GenerationTesterMixin
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor
|
||||
|
||||
|
||||
if is_timm_available():
|
||||
import torch
|
||||
|
||||
from transformers import ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
||||
from transformers import ConditionalDetrFeatureExtractor
|
||||
|
||||
|
||||
class ConditionalDetrModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=8,
|
||||
is_training=True,
|
||||
use_labels=True,
|
||||
hidden_size=256,
|
||||
num_hidden_layers=2,
|
||||
num_attention_heads=8,
|
||||
intermediate_size=4,
|
||||
hidden_act="gelu",
|
||||
hidden_dropout_prob=0.1,
|
||||
attention_probs_dropout_prob=0.1,
|
||||
num_queries=12,
|
||||
num_channels=3,
|
||||
min_size=200,
|
||||
max_size=200,
|
||||
n_targets=8,
|
||||
num_labels=91,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.is_training = is_training
|
||||
self.use_labels = use_labels
|
||||
self.hidden_size = hidden_size
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.intermediate_size = intermediate_size
|
||||
self.hidden_act = hidden_act
|
||||
self.hidden_dropout_prob = hidden_dropout_prob
|
||||
self.attention_probs_dropout_prob = attention_probs_dropout_prob
|
||||
self.num_queries = num_queries
|
||||
self.num_channels = num_channels
|
||||
self.min_size = min_size
|
||||
self.max_size = max_size
|
||||
self.n_targets = n_targets
|
||||
self.num_labels = num_labels
|
||||
|
||||
# we also set the expected seq length for both encoder and decoder
|
||||
self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32)
|
||||
self.decoder_seq_length = self.num_queries
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size])
|
||||
|
||||
pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device)
|
||||
|
||||
labels = None
|
||||
if self.use_labels:
|
||||
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
|
||||
labels = []
|
||||
for i in range(self.batch_size):
|
||||
target = {}
|
||||
target["class_labels"] = torch.randint(
|
||||
high=self.num_labels, size=(self.n_targets,), device=torch_device
|
||||
)
|
||||
target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device)
|
||||
target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device)
|
||||
labels.append(target)
|
||||
|
||||
config = self.get_config()
|
||||
return config, pixel_values, pixel_mask, labels
|
||||
|
||||
def get_config(self):
|
||||
return ConditionalDetrConfig(
|
||||
d_model=self.hidden_size,
|
||||
encoder_layers=self.num_hidden_layers,
|
||||
decoder_layers=self.num_hidden_layers,
|
||||
encoder_attention_heads=self.num_attention_heads,
|
||||
decoder_attention_heads=self.num_attention_heads,
|
||||
encoder_ffn_dim=self.intermediate_size,
|
||||
decoder_ffn_dim=self.intermediate_size,
|
||||
dropout=self.hidden_dropout_prob,
|
||||
attention_dropout=self.attention_probs_dropout_prob,
|
||||
num_queries=self.num_queries,
|
||||
num_labels=self.num_labels,
|
||||
)
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs()
|
||||
inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
|
||||
return config, inputs_dict
|
||||
|
||||
def create_and_check_conditional_detr_model(self, config, pixel_values, pixel_mask, labels):
|
||||
model = ConditionalDetrModel(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
|
||||
result = model(pixel_values)
|
||||
|
||||
self.parent.assertEqual(
|
||||
result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size)
|
||||
)
|
||||
|
||||
def create_and_check_conditional_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels):
|
||||
model = ConditionalDetrForObjectDetection(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
|
||||
result = model(pixel_values)
|
||||
|
||||
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
|
||||
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
|
||||
|
||||
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels)
|
||||
|
||||
self.parent.assertEqual(result.loss.shape, ())
|
||||
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
|
||||
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
|
||||
|
||||
|
||||
@require_timm
|
||||
class ConditionalDetrModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
|
||||
all_model_classes = (
|
||||
(
|
||||
ConditionalDetrModel,
|
||||
ConditionalDetrForObjectDetection,
|
||||
ConditionalDetrForSegmentation,
|
||||
)
|
||||
if is_timm_available()
|
||||
else ()
|
||||
)
|
||||
is_encoder_decoder = True
|
||||
test_torchscript = False
|
||||
test_pruning = False
|
||||
test_head_masking = False
|
||||
test_missing_keys = False
|
||||
|
||||
# special case for head models
|
||||
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
|
||||
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
|
||||
|
||||
if return_labels:
|
||||
if model_class.__name__ in ["ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation"]:
|
||||
labels = []
|
||||
for i in range(self.model_tester.batch_size):
|
||||
target = {}
|
||||
target["class_labels"] = torch.ones(
|
||||
size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long
|
||||
)
|
||||
target["boxes"] = torch.ones(
|
||||
self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float
|
||||
)
|
||||
target["masks"] = torch.ones(
|
||||
self.model_tester.n_targets,
|
||||
self.model_tester.min_size,
|
||||
self.model_tester.max_size,
|
||||
device=torch_device,
|
||||
dtype=torch.float,
|
||||
)
|
||||
labels.append(target)
|
||||
inputs_dict["labels"] = labels
|
||||
|
||||
return inputs_dict
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = ConditionalDetrModelTester(self)
|
||||
self.config_tester = ConfigTester(self, config_class=ConditionalDetrConfig, has_text_modality=False)
|
||||
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
def test_conditional_detr_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_conditional_detr_model(*config_and_inputs)
|
||||
|
||||
def test_conditional_detr_object_detection_head_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_conditional_detr_object_detection_head_model(*config_and_inputs)
|
||||
|
||||
@unittest.skip(reason="CONDITIONAL_DETR does not use inputs_embeds")
|
||||
def test_inputs_embeds(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="CONDITIONAL_DETR does not have a get_input_embeddings method")
|
||||
def test_model_common_attributes(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="CONDITIONAL_DETR is not a generative model")
|
||||
def test_generate_without_input_ids(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="CONDITIONAL_DETR does not use token embeddings")
|
||||
def test_resize_tokens_embeddings(self):
|
||||
pass
|
||||
|
||||
@slow
|
||||
def test_model_outputs_equivalence(self):
|
||||
# TODO Niels: fix me!
|
||||
pass
|
||||
|
||||
def test_attention_outputs(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
config.return_dict = True
|
||||
|
||||
decoder_seq_length = self.model_tester.decoder_seq_length
|
||||
encoder_seq_length = self.model_tester.encoder_seq_length
|
||||
decoder_key_length = self.model_tester.decoder_seq_length
|
||||
encoder_key_length = self.model_tester.encoder_seq_length
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
inputs_dict["output_attentions"] = True
|
||||
inputs_dict["output_hidden_states"] = False
|
||||
config.return_dict = True
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
|
||||
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
|
||||
|
||||
# check that output_attentions also work using config
|
||||
del inputs_dict["output_attentions"]
|
||||
config.output_attentions = True
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
|
||||
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
|
||||
|
||||
self.assertListEqual(
|
||||
list(attentions[0].shape[-3:]),
|
||||
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
|
||||
)
|
||||
out_len = len(outputs)
|
||||
|
||||
if self.is_encoder_decoder:
|
||||
correct_outlen = 6
|
||||
|
||||
# loss is at first position
|
||||
if "labels" in inputs_dict:
|
||||
correct_outlen += 1 # loss is added to beginning
|
||||
# Object Detection model returns pred_logits and pred_boxes
|
||||
if model_class.__name__ == "ConditionalDetrForObjectDetection":
|
||||
correct_outlen += 1
|
||||
# Panoptic Segmentation model returns pred_logits, pred_boxes, pred_masks
|
||||
if model_class.__name__ == "ConditionalDetrForSegmentation":
|
||||
correct_outlen += 2
|
||||
if "past_key_values" in outputs:
|
||||
correct_outlen += 1 # past_key_values have been returned
|
||||
|
||||
self.assertEqual(out_len, correct_outlen)
|
||||
|
||||
# decoder attentions
|
||||
decoder_attentions = outputs.decoder_attentions
|
||||
self.assertIsInstance(decoder_attentions, (list, tuple))
|
||||
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
|
||||
self.assertListEqual(
|
||||
list(decoder_attentions[0].shape[-3:]),
|
||||
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
|
||||
)
|
||||
|
||||
# cross attentions
|
||||
cross_attentions = outputs.cross_attentions
|
||||
self.assertIsInstance(cross_attentions, (list, tuple))
|
||||
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
|
||||
self.assertListEqual(
|
||||
list(cross_attentions[0].shape[-3:]),
|
||||
[
|
||||
self.model_tester.num_attention_heads,
|
||||
decoder_seq_length,
|
||||
encoder_key_length,
|
||||
],
|
||||
)
|
||||
|
||||
# Check attention is always last and order is fine
|
||||
inputs_dict["output_attentions"] = True
|
||||
inputs_dict["output_hidden_states"] = True
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
if hasattr(self.model_tester, "num_hidden_states_types"):
|
||||
added_hidden_states = self.model_tester.num_hidden_states_types
|
||||
elif self.is_encoder_decoder:
|
||||
added_hidden_states = 2
|
||||
else:
|
||||
added_hidden_states = 1
|
||||
self.assertEqual(out_len + added_hidden_states, len(outputs))
|
||||
|
||||
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
|
||||
|
||||
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
|
||||
self.assertListEqual(
|
||||
list(self_attentions[0].shape[-3:]),
|
||||
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
|
||||
)
|
||||
|
||||
def test_retain_grad_hidden_states_attentions(self):
|
||||
# removed retain_grad and grad on decoder_hidden_states, as queries don't require grad
|
||||
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
config.output_hidden_states = True
|
||||
config.output_attentions = True
|
||||
|
||||
# no need to test all models as different heads yield the same functionality
|
||||
model_class = self.all_model_classes[0]
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
|
||||
inputs = self._prepare_for_class(inputs_dict, model_class)
|
||||
|
||||
outputs = model(**inputs)
|
||||
|
||||
output = outputs[0]
|
||||
|
||||
encoder_hidden_states = outputs.encoder_hidden_states[0]
|
||||
encoder_attentions = outputs.encoder_attentions[0]
|
||||
encoder_hidden_states.retain_grad()
|
||||
encoder_attentions.retain_grad()
|
||||
|
||||
decoder_attentions = outputs.decoder_attentions[0]
|
||||
decoder_attentions.retain_grad()
|
||||
|
||||
cross_attentions = outputs.cross_attentions[0]
|
||||
cross_attentions.retain_grad()
|
||||
|
||||
output.flatten()[0].backward(retain_graph=True)
|
||||
|
||||
self.assertIsNotNone(encoder_hidden_states.grad)
|
||||
self.assertIsNotNone(encoder_attentions.grad)
|
||||
self.assertIsNotNone(decoder_attentions.grad)
|
||||
self.assertIsNotNone(cross_attentions.grad)
|
||||
|
||||
def test_forward_signature(self):
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
signature = inspect.signature(model.forward)
|
||||
# signature.parameters is an OrderedDict => so arg_names order is deterministic
|
||||
arg_names = [*signature.parameters.keys()]
|
||||
|
||||
if model.config.is_encoder_decoder:
|
||||
expected_arg_names = ["pixel_values", "pixel_mask"]
|
||||
expected_arg_names.extend(
|
||||
["head_mask", "decoder_head_mask", "encoder_outputs"]
|
||||
if "head_mask" and "decoder_head_mask" in arg_names
|
||||
else []
|
||||
)
|
||||
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
|
||||
else:
|
||||
expected_arg_names = ["pixel_values", "pixel_mask"]
|
||||
self.assertListEqual(arg_names[:1], expected_arg_names)
|
||||
|
||||
def test_different_timm_backbone(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
# let's pick a random timm backbone
|
||||
config.backbone = "tf_mobilenetv3_small_075"
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
if model_class.__name__ == "ConditionalDetrForObjectDetection":
|
||||
expected_shape = (
|
||||
self.model_tester.batch_size,
|
||||
self.model_tester.num_queries,
|
||||
self.model_tester.num_labels,
|
||||
)
|
||||
self.assertEqual(outputs.logits.shape, expected_shape)
|
||||
|
||||
self.assertTrue(outputs)
|
||||
|
||||
def test_initialization(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
configs_no_init = _config_zero_init(config)
|
||||
configs_no_init.init_xavier_std = 1e9
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config=configs_no_init)
|
||||
for name, param in model.named_parameters():
|
||||
if param.requires_grad:
|
||||
if "bbox_attention" in name and "bias" not in name:
|
||||
self.assertLess(
|
||||
100000,
|
||||
abs(param.data.max().item()),
|
||||
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
|
||||
)
|
||||
else:
|
||||
self.assertIn(
|
||||
((param.data.mean() * 1e9).round() / 1e9).item(),
|
||||
[0.0, 1.0],
|
||||
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
|
||||
)
|
||||
|
||||
|
||||
TOLERANCE = 1e-4
|
||||
|
||||
|
||||
# We will verify our results on an image of cute cats
|
||||
def prepare_img():
|
||||
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
|
||||
return image
|
||||
|
||||
|
||||
@require_timm
|
||||
@require_vision
|
||||
@slow
|
||||
class ConditionalDetrModelIntegrationTests(unittest.TestCase):
|
||||
@cached_property
|
||||
def default_feature_extractor(self):
|
||||
return (
|
||||
ConditionalDetrFeatureExtractor.from_pretrained("microsoft/conditional-detr-resnet-50")
|
||||
if is_vision_available()
|
||||
else None
|
||||
)
|
||||
|
||||
def test_inference_no_head(self):
|
||||
model = ConditionalDetrModel.from_pretrained("microsoft/conditional-detr-resnet-50").to(torch_device)
|
||||
|
||||
feature_extractor = self.default_feature_extractor
|
||||
image = prepare_img()
|
||||
encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device)
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model(**encoding)
|
||||
|
||||
expected_shape = torch.Size((1, 300, 256))
|
||||
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
|
||||
expected_slice = torch.tensor(
|
||||
[[0.0616, -0.5146, -0.4032], [-0.7629, -0.4934, -1.7153], [-0.4768, -0.6403, -0.7826]]
|
||||
).to(torch_device)
|
||||
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
|
||||
|
||||
def test_inference_object_detection_head(self):
|
||||
model = ConditionalDetrForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50").to(
|
||||
torch_device
|
||||
)
|
||||
|
||||
feature_extractor = self.default_feature_extractor
|
||||
image = prepare_img()
|
||||
encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device)
|
||||
pixel_values = encoding["pixel_values"].to(torch_device)
|
||||
pixel_mask = encoding["pixel_mask"].to(torch_device)
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model(pixel_values, pixel_mask)
|
||||
|
||||
expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels))
|
||||
self.assertEqual(outputs.logits.shape, expected_shape_logits)
|
||||
expected_slice_logits = torch.tensor(
|
||||
[[-19.1194, -0.0893, -11.0154], [-17.3640, -1.8035, -14.0219], [-20.0461, -0.5837, -11.1060]]
|
||||
).to(torch_device)
|
||||
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4))
|
||||
|
||||
expected_shape_boxes = torch.Size((1, model.config.num_queries, 4))
|
||||
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[[0.4433, 0.5302, 0.8853], [0.5494, 0.2517, 0.0529], [0.4998, 0.5360, 0.9956]]
|
||||
).to(torch_device)
|
||||
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
|
||||
|
||||
def test_inference_panoptic_segmentation_head(self):
|
||||
model = ConditionalDetrForSegmentation.from_pretrained("microsoft/conditional-detr-resnet-50-panoptic").to(
|
||||
torch_device
|
||||
)
|
||||
|
||||
feature_extractor = self.default_feature_extractor
|
||||
image = prepare_img()
|
||||
encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device)
|
||||
pixel_values = encoding["pixel_values"].to(torch_device)
|
||||
pixel_mask = encoding["pixel_mask"].to(torch_device)
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model(pixel_values, pixel_mask)
|
||||
|
||||
expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels))
|
||||
self.assertEqual(outputs.logits.shape, expected_shape_logits)
|
||||
expected_slice_logits = torch.tensor(
|
||||
[[-18.1565, -1.7568, -13.5029], [-16.8888, -1.4138, -14.1028], [-17.5709, -2.5080, -11.8654]]
|
||||
).to(torch_device)
|
||||
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4))
|
||||
|
||||
expected_shape_boxes = torch.Size((1, model.config.num_queries, 4))
|
||||
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[[0.5344, 0.1789, 0.9285], [0.4420, 0.0572, 0.0875], [0.6630, 0.6887, 0.1017]]
|
||||
).to(torch_device)
|
||||
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4))
|
||||
|
||||
expected_shape_masks = torch.Size((1, model.config.num_queries, 200, 267))
|
||||
self.assertEqual(outputs.pred_masks.shape, expected_shape_masks)
|
||||
expected_slice_masks = torch.tensor(
|
||||
[[-7.7558, -10.8788, -11.9797], [-11.8881, -16.4329, -17.7451], [-14.7316, -19.7383, -20.3004]]
|
||||
).to(torch_device)
|
||||
self.assertTrue(torch.allclose(outputs.pred_masks[0, 0, :3, :3], expected_slice_masks, atol=1e-3))
|
@ -60,6 +60,8 @@ IGNORE_NON_TESTED = PRIVATE_MODELS.copy() + [
|
||||
"DetrEncoder", # Building part of bigger (tested) model.
|
||||
"DetrDecoder", # Building part of bigger (tested) model.
|
||||
"DetrDecoderWrapper", # Building part of bigger (tested) model.
|
||||
"ConditionalDetrEncoder", # Building part of bigger (tested) model.
|
||||
"ConditionalDetrDecoder", # Building part of bigger (tested) model.
|
||||
"M2M100Encoder", # Building part of bigger (tested) model.
|
||||
"M2M100Decoder", # Building part of bigger (tested) model.
|
||||
"MCTCTEncoder", # Building part of bigger (tested) model.
|
||||
@ -165,6 +167,7 @@ IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [
|
||||
"FlaxCLIPVisionModel",
|
||||
"FlaxWav2Vec2ForCTC",
|
||||
"DetrForSegmentation",
|
||||
"ConditionalDetrForSegmentation",
|
||||
"DPRReader",
|
||||
"FlaubertForQuestionAnswering",
|
||||
"FlavaImageCodebook",
|
||||
|
Loading…
Reference in New Issue
Block a user