From 3fcfbe7549d9694f96e1f19630add4adf99dd421 Mon Sep 17 00:00:00 2001 From: Eduardo Pacheco <69953243+EduardoPach@users.noreply.github.com> Date: Mon, 26 Feb 2024 19:17:19 +0100 Subject: [PATCH] Adding SegGPT (#27735) * First commit * Improvements * More improvements * Converted original checkpoint to HF checkpoint * Fix style * Fixed forward * More improvements * More improvements * Update src/transformers/models/seggpt/modeling_seggpt.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Remove asserts * Remove unnecessary attributes * Changed model name to camel case * Improve forward doc * Improve tests * More improvements * Fix copies * Fix doc * Make SegGptImageProcessor more flexible * Added few-shot test * Fix style * Update READMEs and docs * Update READMEs * Make inputs required * Add SegGptForImageSegmentation * Make tests pass * Rename to out_indicies * Update src/transformers/models/seggpt/image_processing_seggpt.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/seggpt/image_processing_seggpt.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Fixed naming convention * Copying SegGptMlp from modeling_sam.py * Some minor improvements * Remove mlp_ratio * Fix docstrings * Fixed docstring match * Objects defined before use * Storing only patch_size and beta for SegGptLoss * removed _prepare_inputs method * Removed modified from headers * Renamed to output_indicies * Removed unnecessary einsums * Update tests/models/seggpt/test_modeling_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update tests/models/seggpt/test_modeling_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update tests/models/seggpt/test_modeling_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/image_processing_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/image_processing_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/image_processing_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/modeling_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/modeling_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Fixing issues * Raise error as soon as possible * More fixes * Fix merge * Added palette to SegGptImageProcessor * Fixed typo * Fixed shape typo * Added permute before doing palette to class mapping * Fixed style * Fixed and added tests * Fixed docstrings * Matching SegFormer API for post_processing_semantic_segmentation * Fixed copies * Fixed SegGptImageProcessor to handle both binary and RGB masks * Updated docstrings of SegGptImageProcessor * Update src/transformers/models/seggpt/image_processing_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update docs/source/en/model_doc/seggpt.md Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/configuration_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/convert_seggpt_to_hf.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/image_processing_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/modeling_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/image_processing_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/image_processing_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/image_processing_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/modeling_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update tests/models/seggpt/test_image_processing_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update tests/models/seggpt/test_modeling_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/modeling_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/modeling_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/modeling_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Object definitions above & fix style * Renamed output_indices to intermediate_feature_indices * Removed unnecessary check on bool_masked_pos * Loss first in the outputs * Added validation for do_normalize * Improved SegGptImageProcessor and added new tests * Added comment * Added docstrings to SegGptLoss * Reimplemented ensemble condition logic in SegGptEncoder * Update src/transformers/models/seggpt/__init__.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/seggpt/modeling_seggpt.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/seggpt/convert_seggpt_to_hf.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/seggpt/configuration_seggpt.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Updated docstrings to use post_process_semantic_segmentation * Fixed typo on docstrings * moved pixel values test to test_image_processing_seggpt * Addressed comments * Update src/transformers/models/seggpt/configuration_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/image_processing_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/configuration_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update src/transformers/models/seggpt/modeling_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Updated docstrings for SegGptLoss * Address comments * Added SegGpt example to model docs * Update src/transformers/models/seggpt/modeling_seggpt.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * moved patchify and unpatchify * Rename checkpoint * Renamed intermediate_features to intermediate_hidden_states for consistency * Update src/transformers/models/seggpt/configuration_seggpt.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Replaced post_process_masks for post_process_semantic_segmentation in the docs --------- Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Niels Co-authored-by: Eduardo Pacheco Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- README.md | 1 + README_es.md | 1 + README_fr.md | 1 + README_hd.md | 1 + README_ja.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.md | 1 + docs/source/en/model_doc/seggpt.md | 90 ++ src/transformers/__init__.py | 23 +- src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + .../models/auto/image_processing_auto.py | 1 + src/transformers/models/auto/modeling_auto.py | 1 + src/transformers/models/seggpt/__init__.py | 71 ++ .../models/seggpt/configuration_seggpt.py | 145 +++ .../models/seggpt/convert_seggpt_to_hf.py | 222 ++++ .../models/seggpt/image_processing_seggpt.py | 626 ++++++++++ .../models/seggpt/modeling_seggpt.py | 1014 +++++++++++++++++ src/transformers/utils/dummy_pt_objects.py | 24 + .../utils/dummy_vision_objects.py | 7 + tests/models/seggpt/__init__.py | 0 .../seggpt/test_image_processing_seggpt.py | 231 ++++ tests/models/seggpt/test_modeling_seggpt.py | 339 ++++++ tests/test_modeling_common.py | 10 + utils/check_repo.py | 1 + 28 files changed, 2816 insertions(+), 4 deletions(-) create mode 100644 docs/source/en/model_doc/seggpt.md create mode 100644 src/transformers/models/seggpt/__init__.py create mode 100644 src/transformers/models/seggpt/configuration_seggpt.py create mode 100644 src/transformers/models/seggpt/convert_seggpt_to_hf.py create mode 100644 src/transformers/models/seggpt/image_processing_seggpt.py create mode 100644 src/transformers/models/seggpt/modeling_seggpt.py create mode 100644 tests/models/seggpt/__init__.py create mode 100644 tests/models/seggpt/test_image_processing_seggpt.py create mode 100644 tests/models/seggpt/test_modeling_seggpt.py diff --git a/README.md b/README.md index 8b688d8446e..8d9dc398573 100644 --- a/README.md +++ b/README.md @@ -482,6 +482,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. 1. **[SeamlessM4Tv2](https://huggingface.co/docs/transformers/model_doc/seamless_m4t_v2)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. +1. **[SegGPT](https://huggingface.co/docs/transformers/main/model_doc/seggpt)** (from Beijing Academy of Artificial Intelligence (BAAI)) released with the paper [SegGPT: Segmenting Everything In Context](https://arxiv.org/abs/2304.03284) by Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, Tiejun Huang. 1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (from Meta AI) released with the paper [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. 1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. 1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. diff --git a/README_es.md b/README_es.md index cebe43cb91e..e8b85812f73 100644 --- a/README_es.md +++ b/README_es.md @@ -455,6 +455,7 @@ Número actual de puntos de control: ![](https://img.shields.io/endpoint?url=htt 1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. 1. **[SeamlessM4Tv2](https://huggingface.co/docs/transformers/model_doc/seamless_m4t_v2)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. +1. **[SegGPT](https://huggingface.co/docs/transformers/main/model_doc/seggpt)** (from Beijing Academy of Artificial Intelligence (BAAI) released with the paper [SegGPT: Segmenting Everything In Context](https://arxiv.org/abs/2304.03284) by Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, Tiejun Huang. 1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (from Meta AI) released with the paper [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. 1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. 1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. diff --git a/README_fr.md b/README_fr.md index 39bd0f8df05..9ff23f6025b 100644 --- a/README_fr.md +++ b/README_fr.md @@ -476,6 +476,7 @@ Nombre actuel de points de contrôle : ![](https://img.shields.io/endpoint?url=h 1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (de Meta AI) a été publié dans l'article [SeamlessM4T — Traduction multimodale et massivement multilingue](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) par l'équipe de communication transparente. 1. **[SeamlessM4Tv2](https://huggingface.co/docs/transformers/model_doc/seamless_m4t_v2)** (de Meta AI) a été publié dans l'article [Seamless: Traduction de la parole multilingue, expressive et en continu](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) par l'équipe de communication transparente. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (de NVIDIA) a été publié dans l'article [SegFormer : Conception simple et efficace pour la segmentation sémantique avec des transformateurs](https://arxiv.org/abs/2105.15203) par Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. +1. **[SegGPT](https://huggingface.co/docs/transformers/main/model_doc/seggpt)** (de Beijing Academy of Artificial Intelligence (BAAI) publié dans l'article [SegGPT: Segmenting Everything In Context](https://arxiv.org/abs/2304.03284) parXinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, Tiejun Huang. 1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (de Meta AI) a été publié dans l'article [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) par Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. 1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (de ASAPP) a été publié dans l'article [Compromis entre performances et efficacité dans l'entraînement non supervisé pour la reconnaissance vocale](https://arxiv.org/abs/2109.06870) par Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. 1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (de ASAPP) a été publié dans l'article [Compromis entre performances et efficacité dans l'entraînement non supervisé pour la reconnaissance vocale](https://arxiv.org/abs/2109.06870) par Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. diff --git a/README_hd.md b/README_hd.md index fee9a2c44bb..081d2d3e206 100644 --- a/README_hd.md +++ b/README_hd.md @@ -429,6 +429,7 @@ conda install conda-forge::transformers 1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. 1. **[SeamlessM4Tv2](https://huggingface.co/docs/transformers/model_doc/seamless_m4t_v2)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. +1. **[SegGPT](https://huggingface.co/docs/transformers/main/model_doc/seggpt)** (Beijing Academy of Artificial Intelligence (BAAI से) Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, Tiejun Huang. द्वाराअनुसंधान पत्र [SegGPT: Segmenting Everything In Context](https://arxiv.org/abs/2304.03284) के साथ जारी किया गया 1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (Meta AI से) Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. द्वाराअनुसंधान पत्र [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) के साथ जारी किया गया 1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (ASAPP से) साथ देने वाला पेपर [भाषण पहचान के लिए अनसुपरवाइज्ड प्री-ट्रेनिंग में परफॉर्मेंस-एफिशिएंसी ट्रेड-ऑफ्स](https://arxiv.org/abs/2109.06870) फेलिक्स वू, क्वांगयुन किम, जिंग पैन, क्यू हान, किलियन क्यू. वेनबर्गर, योव आर्टज़ी द्वारा। 1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (ASAPP से) साथ में पेपर [भाषण पहचान के लिए अनसुपरवाइज्ड प्री-ट्रेनिंग में परफॉर्मेंस-एफिशिएंसी ट्रेड-ऑफ्स](https://arxiv.org/abs/2109.06870) फेलिक्स वू, क्वांगयुन किम, जिंग पैन, क्यू हान, किलियन क्यू. वेनबर्गर, योआव आर्टज़ी द्वारा पोस्ट किया गया। diff --git a/README_ja.md b/README_ja.md index b350abb6eaa..69e8a05fe5d 100644 --- a/README_ja.md +++ b/README_ja.md @@ -489,6 +489,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ 1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. 1. **[SeamlessM4Tv2](https://huggingface.co/docs/transformers/model_doc/seamless_m4t_v2)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (NVIDIA から) Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo から公開された研究論文: [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) +1. **[SegGPT](https://huggingface.co/docs/transformers/main/model_doc/seggpt)** (Beijing Academy of Artificial Intelligence (BAAI から) Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, Tiejun Huang. から公開された研究論文 [SegGPT: Segmenting Everything In Context](https://arxiv.org/abs/2304.03284) 1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (Meta AI から) Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. から公開された研究論文 [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) 1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (ASAPP から) Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi から公開された研究論文: [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) 1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (ASAPP から) Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi から公開された研究論文: [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) diff --git a/README_ko.md b/README_ko.md index 4f714eaafbc..daa13f8635a 100644 --- a/README_ko.md +++ b/README_ko.md @@ -404,6 +404,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. 1. **[SeamlessM4Tv2](https://huggingface.co/docs/transformers/model_doc/seamless_m4t_v2)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (NVIDIA 에서) Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo 의 [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) 논문과 함께 발표했습니다. +1. **[SegGPT](https://huggingface.co/docs/transformers/main/model_doc/seggpt)** (Beijing Academy of Artificial Intelligence (BAAI 에서 제공)은 Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, Tiejun Huang.의 [SegGPT: Segmenting Everything In Context](https://arxiv.org/abs/2304.03284)논문과 함께 발표했습니다. 1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (Meta AI 에서 제공)은 Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick.의 [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf)논문과 함께 발표했습니다. 1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (ASAPP 에서) Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi 의 [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) 논문과 함께 발표했습니다. 1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (ASAPP 에서) Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi 의 [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) 논문과 함께 발표했습니다. diff --git a/README_zh-hans.md b/README_zh-hans.md index a3394b00a65..8cd63a9c91c 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -428,6 +428,7 @@ conda install conda-forge::transformers 1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. 1. **[SeamlessM4Tv2](https://huggingface.co/docs/transformers/model_doc/seamless_m4t_v2)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (来自 NVIDIA) 伴随论文 [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) 由 Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo 发布。 +1. **[SegGPT](https://huggingface.co/docs/transformers/main/model_doc/seggpt)** (来自 Beijing Academy of Artificial Intelligence (BAAI) 伴随论文 [SegGPT: Segmenting Everything In Context](https://arxiv.org/abs/2304.03284) 由 Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, Tiejun Huang 发布。 1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (来自 Meta AI) 伴随论文 [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) 由 Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick 发布。 1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (来自 ASAPP) 伴随论文 [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) 由 Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi 发布。 1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (来自 ASAPP) 伴随论文 [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) 由 Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 024fecdcc6d..ce345a70265 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -440,6 +440,7 @@ conda install conda-forge::transformers 1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team. 1. **[SeamlessM4Tv2](https://huggingface.co/docs/transformers/model_doc/seamless_m4t_v2)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. +1. **[SegGPT](https://huggingface.co/docs/transformers/main/model_doc/seggpt)** (from Beijing Academy of Artificial Intelligence (BAAI) released with the paper [SegGPT: Segmenting Everything In Context](https://arxiv.org/abs/2304.03284) by Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, Tiejun Huang. 1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (from Meta AI) released with the paper [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. 1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. 1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 18dad03d9b1..976a104294c 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -583,6 +583,8 @@ title: ResNet - local: model_doc/segformer title: SegFormer + - local: model_doc/seggpt + title: SegGpt - local: model_doc/swiftformer title: SwiftFormer - local: model_doc/swin diff --git a/docs/source/en/index.md b/docs/source/en/index.md index d6b46ace97e..ae5e21d3b59 100644 --- a/docs/source/en/index.md +++ b/docs/source/en/index.md @@ -251,6 +251,7 @@ Flax), PyTorch, and/or TensorFlow. | [SeamlessM4T](model_doc/seamless_m4t) | ✅ | ❌ | ❌ | | [SeamlessM4Tv2](model_doc/seamless_m4t_v2) | ✅ | ❌ | ❌ | | [SegFormer](model_doc/segformer) | ✅ | ✅ | ❌ | +| [SegGPT](model_doc/seggpt) | ✅ | ❌ | ❌ | | [SEW](model_doc/sew) | ✅ | ❌ | ❌ | | [SEW-D](model_doc/sew-d) | ✅ | ❌ | ❌ | | [SigLIP](model_doc/siglip) | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/seggpt.md b/docs/source/en/model_doc/seggpt.md new file mode 100644 index 00000000000..a7f41630e40 --- /dev/null +++ b/docs/source/en/model_doc/seggpt.md @@ -0,0 +1,90 @@ + + +# SegGPT + +## Overview + +The SegGPT model was proposed in [SegGPT: Segmenting Everything In Context](https://arxiv.org/abs/2304.03284) by Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, Tiejun Huang. SegGPT employs a decoder-only Transformer that can generate a segmentation mask given an input image, a prompt image and its corresponding prompt mask. The model achieves remarkable one-shot results with 56.1 mIoU on COCO-20 and 85.6 mIoU on FSS-1000. + +The abstract from the paper is the following: + +*We present SegGPT, a generalist model for segmenting everything in context. We unify various segmentation tasks into a generalist in-context learning framework that accommodates different kinds of segmentation data by transforming them into the same format of images. The training of SegGPT is formulated as an in-context coloring problem with random color mapping for each data sample. The objective is to accomplish diverse tasks according to the context, rather than relying on specific colors. After training, SegGPT can perform arbitrary segmentation tasks in images or videos via in-context inference, such as object instance, stuff, part, contour, and text. SegGPT is evaluated on a broad range of tasks, including few-shot semantic segmentation, video object segmentation, semantic segmentation, and panoptic segmentation. Our results show strong capabilities in segmenting in-domain and out-of* + +Tips: +- One can use [`SegGptImageProcessor`] to prepare image input, prompt and mask to the model. +- It's highly advisable to pass `num_labels` (not considering background) during preprocessing and postprocessing with [`SegGptImageProcessor`] for your use case. +- When doing infenrece with [`SegGptForImageSegmentation`] if your `batch_size` is greater than 1 you can use feature ensemble across your images by passing `feature_ensemble=True` in the forward method. + +Here's how to use the model for one-shot semantic segmentation: + +```python +import torch +from datasets import load_dataset +from transformers import SegGptImageProcessor, SegGptForImageSegmentation + +model_id = "BAAI/seggpt-vit-large" +image_processor = SegGptImageProcessor.from_pretrained(checkpoint) +model = SegGptForImageSegmentation.from_pretrained(checkpoint) + +dataset_id = "EduardoPacheco/FoodSeg103" +ds = load_dataset(dataset_id, split="train") +# Number of labels in FoodSeg103 (not including background) +num_labels = 103 + +image_input = ds[4]["image"] +ground_truth = ds[4]["label"] +image_prompt = ds[29]["image"] +mask_prompt = ds[29]["label"] + +inputs = image_processor( + images=image_input, + prompt_images=image_prompt, + prompt_masks=mask_prompt, + num_labels=num_labels, + return_tensors="pt" +) + +with torch.no_grad(): + outputs = model(**inputs) + +target_sizes = [image_input.size[::-1]] +mask = image_processor.post_process_semantic_segmentation(outputs, target_sizes, num_labels=num_labels)[0] +``` + +This model was contributed by [EduardoPacheco](https://huggingface.co/EduardoPacheco). +The original code can be found [here]([(https://github.com/baaivision/Painter/tree/main)). + + +## SegGptConfig + +[[autodoc]] SegGptConfig + +## SegGptImageProcessor + +[[autodoc]] SegGptImageProcessor + - preprocess + - post_process_semantic_segmentation + +## SegGptModel + +[[autodoc]] SegGptModel + - forward + +## SegGptForImageSegmentation + +[[autodoc]] SegGptForImageSegmentation + - forward \ No newline at end of file diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index f427c4be7b3..bc1be5842d0 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -767,6 +767,7 @@ _import_structure = { "SeamlessM4Tv2Config", ], "models.segformer": ["SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SegformerConfig"], + "models.seggpt": ["SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SegGptConfig"], "models.sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"], "models.sew_d": ["SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWDConfig"], "models.siglip": [ @@ -1316,6 +1317,7 @@ else: _import_structure["models.pvt"].extend(["PvtImageProcessor"]) _import_structure["models.sam"].extend(["SamImageProcessor"]) _import_structure["models.segformer"].extend(["SegformerFeatureExtractor", "SegformerImageProcessor"]) + _import_structure["models.seggpt"].extend(["SegGptImageProcessor"]) _import_structure["models.siglip"].append("SiglipImageProcessor") _import_structure["models.swin2sr"].append("Swin2SRImageProcessor") _import_structure["models.tvlt"].append("TvltImageProcessor") @@ -3192,6 +3194,14 @@ else: "SegformerPreTrainedModel", ] ) + _import_structure["models.seggpt"].extend( + [ + "SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST", + "SegGptForImageSegmentation", + "SegGptModel", + "SegGptPreTrainedModel", + ] + ) _import_structure["models.sew"].extend( [ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -5531,10 +5541,8 @@ if TYPE_CHECKING: SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, SeamlessM4Tv2Config, ) - from .models.segformer import ( - SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, - SegformerConfig, - ) + from .models.segformer import SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SegformerConfig + from .models.seggpt import SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, SegGptConfig from .models.sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig from .models.sew_d import SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWDConfig from .models.siglip import ( @@ -6080,6 +6088,7 @@ if TYPE_CHECKING: from .models.pvt import PvtImageProcessor from .models.sam import SamImageProcessor from .models.segformer import SegformerFeatureExtractor, SegformerImageProcessor + from .models.seggpt import SegGptImageProcessor from .models.siglip import SiglipImageProcessor from .models.swin2sr import Swin2SRImageProcessor from .models.tvlt import TvltImageProcessor @@ -7635,6 +7644,12 @@ if TYPE_CHECKING: SegformerModel, SegformerPreTrainedModel, ) + from .models.seggpt import ( + SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST, + SegGptForImageSegmentation, + SegGptModel, + SegGptPreTrainedModel, + ) from .models.sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 5d59756f91a..df5496f09d0 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -194,6 +194,7 @@ from . import ( seamless_m4t, seamless_m4t_v2, segformer, + seggpt, sew, sew_d, siglip, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index 282007836a0..ab24b8a3326 100755 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -202,6 +202,7 @@ CONFIG_MAPPING_NAMES = OrderedDict( ("seamless_m4t", "SeamlessM4TConfig"), ("seamless_m4t_v2", "SeamlessM4Tv2Config"), ("segformer", "SegformerConfig"), + ("seggpt", "SegGptConfig"), ("sew", "SEWConfig"), ("sew-d", "SEWDConfig"), ("siglip", "SiglipConfig"), @@ -428,6 +429,7 @@ CONFIG_ARCHIVE_MAP_MAPPING_NAMES = OrderedDict( ("seamless_m4t", "SEAMLESS_M4T_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("seamless_m4t_v2", "SEAMLESS_M4T_V2_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("segformer", "SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("seggpt", "SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("sew", "SEW_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("sew-d", "SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("siglip", "SIGLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -680,6 +682,7 @@ MODEL_NAMES_MAPPING = OrderedDict( ("seamless_m4t", "SeamlessM4T"), ("seamless_m4t_v2", "SeamlessM4Tv2"), ("segformer", "SegFormer"), + ("seggpt", "SegGPT"), ("sew", "SEW"), ("sew-d", "SEW-D"), ("siglip", "SigLIP"), diff --git a/src/transformers/models/auto/image_processing_auto.py b/src/transformers/models/auto/image_processing_auto.py index c9cd6fca69d..aef894a425b 100644 --- a/src/transformers/models/auto/image_processing_auto.py +++ b/src/transformers/models/auto/image_processing_auto.py @@ -98,6 +98,7 @@ IMAGE_PROCESSOR_MAPPING_NAMES = OrderedDict( ("resnet", "ConvNextImageProcessor"), ("sam", "SamImageProcessor"), ("segformer", "SegformerImageProcessor"), + ("seggpt", "SegGptImageProcessor"), ("siglip", "SiglipImageProcessor"), ("swiftformer", "ViTImageProcessor"), ("swin", "ViTImageProcessor"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 50534c58e8a..9a2aaaca01d 100755 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -193,6 +193,7 @@ MODEL_MAPPING_NAMES = OrderedDict( ("seamless_m4t", "SeamlessM4TModel"), ("seamless_m4t_v2", "SeamlessM4Tv2Model"), ("segformer", "SegformerModel"), + ("seggpt", "SegGptModel"), ("sew", "SEWModel"), ("sew-d", "SEWDModel"), ("siglip", "SiglipModel"), diff --git a/src/transformers/models/seggpt/__init__.py b/src/transformers/models/seggpt/__init__.py new file mode 100644 index 00000000000..49649c92865 --- /dev/null +++ b/src/transformers/models/seggpt/__init__.py @@ -0,0 +1,71 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available + + +_import_structure = { + "configuration_seggpt": ["SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SegGptConfig", "SegGptOnnxConfig"] +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_seggpt"] = [ + "SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST", + "SegGptModel", + "SegGptPreTrainedModel", + "SegGptForImageSegmentation", + ] + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["image_processing_seggpt"] = ["SegGptImageProcessor"] + +if TYPE_CHECKING: + from .configuration_seggpt import SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, SegGptConfig, SegGptOnnxConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_seggpt import ( + SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST, + SegGptForImageSegmentation, + SegGptModel, + SegGptPreTrainedModel, + ) + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .image_processing_seggpt import SegGptImageProcessor + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/seggpt/configuration_seggpt.py b/src/transformers/models/seggpt/configuration_seggpt.py new file mode 100644 index 00000000000..37c81f10323 --- /dev/null +++ b/src/transformers/models/seggpt/configuration_seggpt.py @@ -0,0 +1,145 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" SegGpt model configuration""" + + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +SEGGPT_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "BAAI/seggpt-vit-large": "https://huggingface.co/BAAI/seggpt-vit-large/resolve/main/config.json", +} + + +class SegGptConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`SegGptModel`]. It is used to instantiate a SegGPT + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the SegGPT + [BAAI/seggpt-vit-large](https://huggingface.co/BAAI/seggpt-vit-large) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + hidden_size (`int`, *optional*, defaults to 1024): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 24): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the layer normalization layers. + image_size (`List[int]`, *optional*, defaults to `[896, 448]`): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 16): + The size (resolution) of each patch. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + qkv_bias (`bool`, *optional*, defaults to `True`): + Whether to add a bias to the queries, keys and values. + mlp_dim (`int`, *optional*): + The dimensionality of the MLP layer in the Transformer encoder. If unset, defaults to + `hidden_size` * 4. + drop_path_rate (`float`, *optional*, defaults to 0.1): + The drop path rate for the dropout layers. + pretrain_image_size (`int`, *optional*, defaults to 224): + The pretrained size of the absolute position embeddings. + decoder_hidden_size (`int`, *optional*, defaults to 64): + Hidden size for decoder. + use_relative_position_embeddings (`bool`, *optional*, defaults to `True`): + Whether to use relative position embeddings in the attention layers. + merge_index (`int`, *optional*, defaults to 2): + The index of the encoder layer to merge the embeddings. + intermediate_hidden_state_indices (`List[int]`, *optional*, defaults to `[5, 11, 17, 23]`): + The indices of the encoder layers which we store as features for the decoder. + beta (`float`, *optional*, defaults to 0.01): + Regularization factor for SegGptLoss (smooth-l1 loss). + + Example: + + ```python + >>> from transformers import SegGptConfig, SegGptModel + + >>> # Initializing a SegGPT seggpt-vit-large style configuration + >>> configuration = SegGptConfig() + + >>> # Initializing a model (with random weights) from the seggpt-vit-large style configuration + >>> model = SegGptModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "seggpt" + + def __init__( + self, + hidden_size=1024, + num_hidden_layers=24, + num_attention_heads=16, + hidden_act="gelu", + hidden_dropout_prob=0.0, + initializer_range=0.02, + layer_norm_eps=1e-6, + image_size=[896, 448], + patch_size=16, + num_channels=3, + qkv_bias=True, + mlp_dim=None, + drop_path_rate=0.1, + pretrain_image_size=224, + decoder_hidden_size=64, + use_relative_position_embeddings=True, + merge_index=2, + intermediate_hidden_state_indices=[5, 11, 17, 23], + beta=0.01, + **kwargs, + ): + super().__init__(**kwargs) + + if merge_index > min(intermediate_hidden_state_indices): + raise ValueError( + f"Merge index must be less than the minimum encoder output index, but got {merge_index=} and {intermediate_hidden_state_indices=}" + ) + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.qkv_bias = qkv_bias + self.drop_path_rate = drop_path_rate + self.pretrain_image_size = pretrain_image_size + self.decoder_hidden_size = decoder_hidden_size + self.use_relative_position_embeddings = use_relative_position_embeddings + self.merge_index = merge_index + self.intermediate_hidden_state_indices = intermediate_hidden_state_indices + self.beta = beta + self.mlp_dim = int(hidden_size * 4) if mlp_dim is None else mlp_dim diff --git a/src/transformers/models/seggpt/convert_seggpt_to_hf.py b/src/transformers/models/seggpt/convert_seggpt_to_hf.py new file mode 100644 index 00000000000..a13372dfbb1 --- /dev/null +++ b/src/transformers/models/seggpt/convert_seggpt_to_hf.py @@ -0,0 +1,222 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert SegGPT checkpoints from the original repository. + +URL: https://github.com/baaivision/Painter/tree/main/SegGPT +""" + + +import argparse + +import requests +import torch +from PIL import Image + +from transformers import SegGptConfig, SegGptForImageSegmentation, SegGptImageProcessor +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + + +# here we list all keys to be renamed (original name on the left, our name on the right) +def create_rename_keys(config): + rename_keys = [] + + # fmt: off + + # rename embedding and its parameters + rename_keys.append(("patch_embed.proj.weight", "model.embeddings.patch_embeddings.projection.weight")) + rename_keys.append(("patch_embed.proj.bias", "model.embeddings.patch_embeddings.projection.bias")) + rename_keys.append(("mask_token", "model.embeddings.mask_token")) + rename_keys.append(("segment_token_x", "model.embeddings.segment_token_input")) + rename_keys.append(("segment_token_y", "model.embeddings.segment_token_prompt")) + rename_keys.append(("type_token_cls", "model.embeddings.type_token_semantic")) + rename_keys.append(("type_token_ins", "model.embeddings.type_token_instance")) + rename_keys.append(("pos_embed", "model.embeddings.position_embeddings")) + + # rename decoder and other + rename_keys.append(("norm.weight", "model.encoder.layernorm.weight")) + rename_keys.append(("norm.bias", "model.encoder.layernorm.bias")) + rename_keys.append(("decoder_embed.weight", "decoder.decoder_embed.weight")) + rename_keys.append(("decoder_embed.bias", "decoder.decoder_embed.bias")) + rename_keys.append(("decoder_pred.0.weight", "decoder.decoder_pred.conv.weight")) + rename_keys.append(("decoder_pred.0.bias", "decoder.decoder_pred.conv.bias")) + rename_keys.append(("decoder_pred.1.weight", "decoder.decoder_pred.layernorm.weight")) + rename_keys.append(("decoder_pred.1.bias", "decoder.decoder_pred.layernorm.bias")) + rename_keys.append(("decoder_pred.3.weight", "decoder.decoder_pred.head.weight")) + rename_keys.append(("decoder_pred.3.bias", "decoder.decoder_pred.head.bias")) + + # rename blocks + for i in range(config.num_hidden_layers): + rename_keys.append((f"blocks.{i}.attn.qkv.weight", f"model.encoder.layers.{i}.attention.qkv.weight")) + rename_keys.append((f"blocks.{i}.attn.qkv.bias", f"model.encoder.layers.{i}.attention.qkv.bias")) + rename_keys.append((f"blocks.{i}.attn.proj.weight", f"model.encoder.layers.{i}.attention.proj.weight")) + rename_keys.append((f"blocks.{i}.attn.proj.bias", f"model.encoder.layers.{i}.attention.proj.bias")) + rename_keys.append((f"blocks.{i}.attn.rel_pos_h", f"model.encoder.layers.{i}.attention.rel_pos_h")) + rename_keys.append((f"blocks.{i}.attn.rel_pos_w", f"model.encoder.layers.{i}.attention.rel_pos_w")) + + rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"model.encoder.layers.{i}.mlp.lin1.weight")) + rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"model.encoder.layers.{i}.mlp.lin1.bias")) + rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"model.encoder.layers.{i}.mlp.lin2.weight")) + rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"model.encoder.layers.{i}.mlp.lin2.bias")) + + rename_keys.append((f"blocks.{i}.norm1.weight", f"model.encoder.layers.{i}.layernorm_before.weight")) + rename_keys.append((f"blocks.{i}.norm1.bias", f"model.encoder.layers.{i}.layernorm_before.bias")) + rename_keys.append((f"blocks.{i}.norm2.weight", f"model.encoder.layers.{i}.layernorm_after.weight")) + rename_keys.append((f"blocks.{i}.norm2.bias", f"model.encoder.layers.{i}.layernorm_after.bias")) + + # fmt: on + + return rename_keys + + +def rename_key(dct, old, new): + val = dct.pop(old) + dct[new] = val + + +# We will verify our results on spongebob images +def prepare_input(): + image_input_url = ( + "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_2.jpg" + ) + image_prompt_url = ( + "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1.jpg" + ) + mask_prompt_url = ( + "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1_target.png" + ) + + image_input = Image.open(requests.get(image_input_url, stream=True).raw) + image_prompt = Image.open(requests.get(image_prompt_url, stream=True).raw) + mask_prompt = Image.open(requests.get(mask_prompt_url, stream=True).raw) + + return image_input, image_prompt, mask_prompt + + +@torch.no_grad() +def convert_seggpt_checkpoint(args): + model_name = args.model_name + pytorch_dump_folder_path = args.pytorch_dump_folder_path + verify_logits = args.verify_logits + push_to_hub = args.push_to_hub + + # Define default GroundingDINO configuation + config = SegGptConfig() + + # Load original checkpoint + checkpoint_url = "https://huggingface.co/BAAI/SegGpt/blob/main/seggpt_vit_large.pth" + original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"] + + # # Rename keys + new_state_dict = original_state_dict.copy() + rename_keys = create_rename_keys(config) + + for src, dest in rename_keys: + rename_key(new_state_dict, src, dest) + + # Load HF model + model = SegGptForImageSegmentation(config) + model.eval() + missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) + print("Missing keys:", missing_keys) + print("Unexpected keys:", unexpected_keys) + + input_img, prompt_img, prompt_mask = prepare_input() + image_processor = SegGptImageProcessor() + inputs = image_processor(images=input_img, prompt_images=prompt_img, prompt_masks=prompt_mask, return_tensors="pt") + + expected_prompt_pixel_values = torch.tensor( + [ + [[-0.6965, -0.6965, -0.6965], [-0.6965, -0.6965, -0.6965], [-0.6965, -0.6965, -0.6965]], + [[1.6583, 1.6583, 1.6583], [1.6583, 1.6583, 1.6583], [1.6583, 1.6583, 1.6583]], + [[2.3088, 2.3088, 2.3088], [2.3088, 2.3088, 2.3088], [2.3088, 2.3088, 2.3088]], + ] + ) + + expected_pixel_values = torch.tensor( + [ + [[1.6324, 1.6153, 1.5810], [1.6153, 1.5982, 1.5810], [1.5810, 1.5639, 1.5639]], + [[1.2731, 1.2556, 1.2206], [1.2556, 1.2381, 1.2031], [1.2206, 1.2031, 1.1681]], + [[1.6465, 1.6465, 1.6465], [1.6465, 1.6465, 1.6465], [1.6291, 1.6291, 1.6291]], + ] + ) + + expected_prompt_masks = torch.tensor( + [ + [[-2.1179, -2.1179, -2.1179], [-2.1179, -2.1179, -2.1179], [-2.1179, -2.1179, -2.1179]], + [[-2.0357, -2.0357, -2.0357], [-2.0357, -2.0357, -2.0357], [-2.0357, -2.0357, -2.0357]], + [[-1.8044, -1.8044, -1.8044], [-1.8044, -1.8044, -1.8044], [-1.8044, -1.8044, -1.8044]], + ] + ) + + assert torch.allclose(inputs.pixel_values[0, :, :3, :3], expected_pixel_values, atol=1e-4) + assert torch.allclose(inputs.prompt_pixel_values[0, :, :3, :3], expected_prompt_pixel_values, atol=1e-4) + assert torch.allclose(inputs.prompt_masks[0, :, :3, :3], expected_prompt_masks, atol=1e-4) + + torch.manual_seed(2) + outputs = model(**inputs) + print(outputs) + + if verify_logits: + expected_output = torch.tensor( + [ + [[-2.1208, -2.1190, -2.1198], [-2.1237, -2.1228, -2.1227], [-2.1232, -2.1226, -2.1228]], + [[-2.0405, -2.0396, -2.0403], [-2.0434, -2.0434, -2.0433], [-2.0428, -2.0432, -2.0434]], + [[-1.8102, -1.8088, -1.8099], [-1.8131, -1.8126, -1.8129], [-1.8130, -1.8128, -1.8131]], + ] + ) + assert torch.allclose(outputs.pred_masks[0, :, :3, :3], expected_output, atol=1e-4) + print("Looks good!") + else: + print("Converted without verifying logits") + + if pytorch_dump_folder_path is not None: + print(f"Saving model and processor for {model_name} to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + image_processor.save_pretrained(pytorch_dump_folder_path) + + if push_to_hub: + print(f"Pushing model and processor for {model_name} to hub") + model.push_to_hub(f"EduardoPacheco/{model_name}") + image_processor.push_to_hub(f"EduardoPacheco/{model_name}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_name", + default="seggpt-vit-large", + type=str, + choices=["seggpt-vit-large"], + help="Name of the SegGpt model you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." + ) + parser.add_argument( + "--verify_logits", + action="store_false", + help="Whether or not to verify the logits against the original implementation.", + ) + parser.add_argument( + "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." + ) + + args = parser.parse_args() + convert_seggpt_checkpoint(args) diff --git a/src/transformers/models/seggpt/image_processing_seggpt.py b/src/transformers/models/seggpt/image_processing_seggpt.py new file mode 100644 index 00000000000..80fb94cdc7a --- /dev/null +++ b/src/transformers/models/seggpt/image_processing_seggpt.py @@ -0,0 +1,626 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Image processor class for SegGPT.""" + +from typing import Dict, List, Optional, Tuple, Union + +import numpy as np + +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import resize, to_channel_dimension_format +from ...image_utils import ( + IMAGENET_DEFAULT_MEAN, + IMAGENET_DEFAULT_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + get_channel_dimension_axis, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, +) +from ...utils import TensorType, is_torch_available, logging, requires_backends + + +if is_torch_available(): + import torch + + +logger = logging.get_logger(__name__) + + +# See https://arxiv.org/pdf/2212.02499.pdf at 3.1 Redefining Output Spaces as "Images" - Semantic Segmentation from PAINTER paper +# Taken from https://github.com/Abdullah-Meda/Painter/blob/main/Painter/data/coco_semseg/gen_color_coco_panoptic_segm.py#L31 +def build_palette(num_labels: int) -> List[Tuple[int, int]]: + base = int(num_labels ** (1 / 3)) + 1 + margin = 256 // base + + # we assume that class_idx 0 is the background which is mapped to black + color_list = [(0, 0, 0)] + for location in range(num_labels): + num_seq_r = location // base**2 + num_seq_g = (location % base**2) // base + num_seq_b = location % base + + R = 255 - num_seq_r * margin + G = 255 - num_seq_g * margin + B = 255 - num_seq_b * margin + + color_list.append((R, G, B)) + + return color_list + + +def get_num_channels(image: np.ndarray, input_data_format: ChannelDimension) -> int: + if image.ndim == 2: + return 0 + + channel_idx = get_channel_dimension_axis(image, input_data_format) + return image.shape[channel_idx] + + +def mask_to_rgb( + mask: np.ndarray, + palette: Optional[List[Tuple[int, int]]] = None, + input_data_format: Optional[ChannelDimension] = None, + data_format: Optional[ChannelDimension] = None, +) -> np.ndarray: + if input_data_format is None and mask.ndim > 2: + input_data_format = infer_channel_dimension_format(mask) + + data_format = data_format if data_format is not None else input_data_format + + num_channels = get_num_channels(mask, input_data_format) + + if num_channels == 3: + return to_channel_dimension_format(mask, data_format, input_data_format) if data_format is not None else mask + + if palette is not None: + height, width = mask.shape + + rgb_mask = np.zeros((3, height, width), dtype=np.uint8) + + classes_in_mask = np.unique(mask) + + for class_idx in classes_in_mask: + rgb_value = palette[class_idx] + class_mask = (mask == class_idx).astype(np.uint8) + class_mask = np.expand_dims(class_mask, axis=-1) + class_rgb_mask = class_mask * np.array(rgb_value) + class_rgb_mask = np.moveaxis(class_rgb_mask, -1, 0) + rgb_mask += class_rgb_mask.astype(np.uint8) + + rgb_mask = np.clip(rgb_mask, 0, 255).astype(np.uint8) + + else: + rgb_mask = np.repeat(mask[None, ...], 3, axis=0) + + return ( + to_channel_dimension_format(rgb_mask, data_format, input_data_format) if data_format is not None else rgb_mask + ) + + +class SegGptImageProcessor(BaseImageProcessor): + r""" + Constructs a SegGpt image processor. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions to the specified `(size["height"], + size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method. + size (`dict`, *optional*, defaults to `{"height": 448, "width": 448}`): + Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` + method. + resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): + Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the + `preprocess` method. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` + parameter in the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the + `preprocess` method. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` + method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): + Standard deviation to use if normalizing the image. This is a float or list of floats the length of the + number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize: bool = True, + size: Optional[Dict[str, int]] = None, + resample: PILImageResampling = PILImageResampling.BICUBIC, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + **kwargs, + ) -> None: + super().__init__(**kwargs) + size = size if size is not None else {"height": 448, "width": 448} + size = get_size_dict(size) + self.do_resize = do_resize + self.do_rescale = do_rescale + self.do_normalize = do_normalize + self.size = size + self.resample = resample + self.rescale_factor = rescale_factor + self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN + self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD + + def get_palette(self, num_labels: int) -> List[Tuple[int, int]]: + """Build a palette to map the prompt mask from a single channel to a 3 channel RGB. + + Args: + num_labels (`int`): + Number of classes in the segmentation task (excluding the background). + + Returns: + `List[Tuple[int, int]]`: Palette to map the prompt mask from a single channel to a 3 channel RGB. + """ + return build_palette(num_labels) + + def mask_to_rgb( + self, + image: np.ndarray, + palette: Optional[List[Tuple[int, int]]] = None, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> np.ndarray: + """Convert a mask to RGB format. + + Args: + image (`np.ndarray`): + Mask to convert to RGB format. If the mask is already in RGB format, it will be passed through. + palette (`List[Tuple[int, int]]`, *optional*, defaults to `None`): + Palette to use to convert the mask to RGB format. If unset, the mask is duplicated across the channel + dimension. + data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + + Returns: + `np.ndarray`: The mask in RGB format. + """ + return mask_to_rgb( + image, + palette=palette, + data_format=data_format, + input_data_format=input_data_format, + ) + + # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BICUBIC, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize an image to `(size["height"], size["width"])`. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): + `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. + data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + + Returns: + `np.ndarray`: The resized image. + """ + size = get_size_dict(size) + if "height" not in size or "width" not in size: + raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") + output_size = (size["height"], size["width"]) + return resize( + image, + size=output_size, + resample=resample, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + def _preprocess_step( + self, + images: ImageInput, + is_mask: bool = False, + do_resize: Optional[bool] = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_rescale: Optional[bool] = None, + rescale_factor: Optional[float] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + num_labels: Optional[int] = None, + **kwargs, + ): + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + is_mask (`bool`, *optional*, defaults to `False`): + Whether the image is a mask. If True, the image is converted to RGB using the palette if + `self.num_labels` is specified otherwise RGB is achieved by duplicating the channel. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after + resizing. + resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): + `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BICUBIC`. Only has + an effect if `do_resize` is set to `True`. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image values between [0 - 1]. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean to use if `do_normalize` is set to `True`. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to use if `do_normalize` is set to `True`. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + num_labels: (`int`, *optional*): + Number of classes in the segmentation task (excluding the background). If specified, a palette will be + built, assuming that class_idx 0 is the background, to map the prompt mask from a single class_idx + channel to a 3 channel RGB. Not specifying this will result in the prompt mask either being passed + through as is if it is already in RGB format or being duplicated across the channel dimension. + """ + do_resize = do_resize if do_resize is not None else self.do_resize + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + resample = resample if resample is not None else self.resample + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + + size = size if size is not None else self.size + size_dict = get_size_dict(size) + + images = make_list_of_images(images) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + + if do_resize and size is None: + raise ValueError("Size must be specified if do_resize is True.") + + if do_rescale and rescale_factor is None: + raise ValueError("Rescale factor must be specified if do_rescale is True.") + + if do_normalize and (image_mean is None or image_std is None): + raise ValueError("Image mean and std must be specified if do_normalize is True.") + + # All transformations expect numpy arrays. + images = [to_numpy_array(image) for image in images] + + if is_scaled_image(images[0]) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + + if input_data_format is None and not is_mask: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(images[0]) + + if is_mask: + palette = self.get_palette(num_labels) if num_labels is not None else None + # Since this is the input for the next transformations its format should be the same as the input_data_format + images = [ + self.mask_to_rgb(image=image, palette=palette, data_format=ChannelDimension.FIRST) for image in images + ] + input_data_format = ChannelDimension.FIRST + + if do_resize: + images = [ + self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format) + for image in images + ] + + if do_rescale: + images = [ + self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) + for image in images + ] + + if do_normalize: + images = [ + self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) + for image in images + ] + + images = [ + to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images + ] + + return images + + def preprocess( + self, + images: Optional[ImageInput] = None, + prompt_images: Optional[ImageInput] = None, + prompt_masks: Optional[ImageInput] = None, + do_resize: Optional[bool] = None, + size: Dict[str, int] = None, + resample: PILImageResampling = None, + do_rescale: Optional[bool] = None, + rescale_factor: Optional[float] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + num_labels: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ): + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + prompt_images (`ImageInput`): + Prompt image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + prompt_masks (`ImageInput`): + Prompt mask from prompt image to _preprocess. Expects a single or batch of masks. If the mask masks are + a single channel then it will be converted to RGB using the palette if `self.num_labels` is specified + or by just repeating the channel if not. If the mask is already in RGB format, it will be passed through. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after + resizing. + resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): + `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BICUBIC`. Only has + an effect if `do_resize` is set to `True`. Doesn't apply to prompt mask as it is resized using nearest. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image values between [0 - 1]. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean to use if `do_normalize` is set to `True`. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to use if `do_normalize` is set to `True`. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + num_labels: (`int`, *optional*): + Number of classes in the segmentation task (excluding the background). If specified, a palette will be + built, assuming that class_idx 0 is the background, to map the prompt mask from a single class_idx + channel to a 3 channel RGB. Not specifying this will result in the prompt mask either being passed + through as is if it is already in RGB format or being duplicated across the channel dimension. + """ + if all(v is None for v in [images, prompt_images, prompt_masks]): + raise ValueError("At least one of images, prompt_images, prompt_masks must be specified.") + + data = {} + + if images is not None: + images = self._preprocess_step( + images, + is_mask=False, + do_resize=do_resize, + size=size, + resample=resample, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + data["pixel_values"] = images + + if prompt_images is not None: + prompt_images = self._preprocess_step( + prompt_images, + is_mask=False, + do_resize=do_resize, + size=size, + resample=resample, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + data["prompt_pixel_values"] = prompt_images + + if prompt_masks is not None: + prompt_masks = self._preprocess_step( + prompt_masks, + is_mask=True, + do_resize=do_resize, + size=size, + resample=PILImageResampling.NEAREST, + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + data_format=data_format, + input_data_format=input_data_format, + num_labels=num_labels, + **kwargs, + ) + + data["prompt_masks"] = prompt_masks + + return BatchFeature(data=data, tensor_type=return_tensors) + + def post_process_semantic_segmentation( + self, outputs, target_sizes: Optional[List[Tuple[int, int]]] = None, num_labels: Optional[int] = None + ): + """ + Converts the output of [`SegGptImageSegmentationOutput`] into segmentation maps. Only supports + PyTorch. + + Args: + outputs ([`SegGptImageSegmentationOutput`]): + Raw outputs of the model. + target_sizes (`List[Tuple[int, int]]`, *optional*): + List of length (batch_size), where each list item (`Tuple[int, int]`) corresponds to the requested + final size (height, width) of each prediction. If left to None, predictions will not be resized. + num_labels (`int`, *optional*): + Number of classes in the segmentation task (excluding the background). If specified, a palette will be + built, assuming that class_idx 0 is the background, to map prediction masks from RGB values to class + indices. This value should be the same used when preprocessing inputs. + Returns: + semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic + segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is + specified). Each entry of each `torch.Tensor` correspond to a semantic class id. + """ + requires_backends(self, ["torch"]) + # batch_size x num_channels x 2*height x width + masks = outputs.pred_masks + + # Predicted mask and prompt are concatenated in the height dimension + # batch_size x num_channels x height x width + masks = masks[:, :, masks.shape[2] // 2 :, :] + + # To unnormalize we need to permute to channel last + # batch_size x height x width x num_channels + std = torch.tensor(self.image_std).to(masks.device) + mean = torch.tensor(self.image_mean).to(masks.device) + + masks = masks.permute(0, 2, 3, 1) * std + mean + + # batch_size x num_channels x height x width + masks = masks.permute(0, 3, 1, 2) + + # Clip to match with palette if specified + masks = torch.clip(masks * 255, 0, 255) + + semantic_segmentation = [] + palette_tensor = None + palette = self.get_palette(num_labels) if num_labels is not None else None + if palette is not None: + palette_tensor = torch.tensor(palette).float().to(masks.device) + _, num_channels, _, _ = masks.shape + palette_tensor = palette_tensor.view(1, 1, num_labels + 1, num_channels) + + for idx, mask in enumerate(masks): + if target_sizes is not None: + mask = torch.nn.functional.interpolate( + mask.unsqueeze(0), + size=target_sizes[idx], + mode="nearest", + )[0] + + if num_labels is not None: + channels, height, width = mask.shape + dist = mask.permute(1, 2, 0).view(height, width, 1, channels) + dist = dist - palette_tensor + dist = torch.pow(dist, 2) + dist = torch.sum(dist, dim=-1) + pred = dist.argmin(dim=-1) + + else: + # If no palette is specified SegGpt will try to paint using the mask class idx as RGB + pred = mask.mean(dim=0).int() + + semantic_segmentation.append(pred) + + return semantic_segmentation diff --git a/src/transformers/models/seggpt/modeling_seggpt.py b/src/transformers/models/seggpt/modeling_seggpt.py new file mode 100644 index 00000000000..87175fdf38c --- /dev/null +++ b/src/transformers/models/seggpt/modeling_seggpt.py @@ -0,0 +1,1014 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch SegGpt model.""" + + +import collections.abc +import math +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import functional as F + +from ...activations import ACT2FN +from ...modeling_utils import PreTrainedModel +from ...utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_seggpt import SegGptConfig + + +logger = logging.get_logger(__name__) + +# General docstring +_CONFIG_FOR_DOC = "SegGptConfig" + +# Base docstring +_CHECKPOINT_FOR_DOC = "BAAI/seggpt-vit-large" +_EXPECTED_OUTPUT_SHAPE = [3, 896, 448] + + +SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "BAAI/seggpt-vit-large", + # See all SegGpt models at https://huggingface.co/models?filter=seggpt +] + + +@dataclass +class SegGptEncoderOutput(ModelOutput): + """ + Output type of [`SegGptEncoderOutput`]. + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, patch_height, patch_width, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`Tuple[torch.FloatTensor]`, `optional`, returned when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) + of shape `(batch_size, patch_height, patch_width, hidden_size)`. + attentions (`Tuple[torch.FloatTensor]`, `optional`, returned when `config.output_attentions=True`): + Tuple of *torch.FloatTensor* (one for each layer) of shape + `(batch_size, num_heads, seq_len, seq_len)`. + intermediate_hidden_states (`Tuple[torch.FloatTensor]`, `optional`, returned when `config.intermediate_hidden_state_indices` is set): + Tuple of `torch.FloatTensor` of shape `(batch_size, patch_height, patch_width, hidden_size)`. + Each element in the Tuple corresponds to the output of the layer specified in `config.intermediate_hidden_state_indices`. + Additionaly, each feature passes through a LayerNorm. + """ + + last_hidden_state: torch.FloatTensor + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + intermediate_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class SegGptImageSegmentationOutput(ModelOutput): + """ + Output type of [`SegGptImageSegmentationOutput`]. + + Args: + loss (`torch.FloatTensor`, `optional`, returned when `labels` is provided): + The loss value. + pred_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + The predicted masks. + hidden_states (`Tuple[torch.FloatTensor]`, `optional`, returned when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) + of shape `(batch_size, patch_height, patch_width, hidden_size)`. + attentions (`Tuple[torch.FloatTensor]`, `optional`, returned when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape + `(batch_size, num_heads, seq_len, seq_len)`. + """ + + loss: Optional[torch.FloatTensor] = None + pred_masks: Optional[torch.FloatTensor] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +# Copied from transformers.models.sam.modeling_sam.SamPatchEmbeddings with Sam->SegGpt +class SegGptPatchEmbeddings(nn.Module): + """ + This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial + `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a + Transformer. + """ + + def __init__(self, config): + super().__init__() + image_size, patch_size = config.image_size, config.patch_size + num_channels, hidden_size = config.num_channels, config.hidden_size + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.num_patches = num_patches + + self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) + + def forward(self, pixel_values): + batch_size, num_channels, height, width = pixel_values.shape + if num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + if height != self.image_size[0] or width != self.image_size[1]: + raise ValueError( + f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." + ) + embeddings = self.projection(pixel_values).permute(0, 2, 3, 1) + return embeddings + + +class SegGptEmbeddings(nn.Module): + """ + Construct the embeddings from patch, position embeddings for input and prompt. + """ + + def __init__(self, config: SegGptConfig) -> None: + super().__init__() + + self.mask_token = nn.Parameter(torch.zeros(1, 1, 1, config.hidden_size)) + self.segment_token_input = nn.Parameter(torch.zeros(1, 1, 1, config.hidden_size)) + self.segment_token_prompt = nn.Parameter(torch.zeros(1, 1, 1, config.hidden_size)) + # token for seg types + self.type_token_semantic = nn.Parameter(torch.zeros(1, 1, 1, config.hidden_size)) + self.type_token_instance = nn.Parameter(torch.zeros(1, 1, 1, config.hidden_size)) + + self.patch_embeddings = SegGptPatchEmbeddings(config) + + num_positions = (config.pretrain_image_size // config.patch_size) ** 2 + 1 + self.position_embeddings = nn.Parameter(torch.randn(1, num_positions, config.hidden_size)) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def interpolate_pos_encoding(self, height: int, width: int) -> torch.Tensor: + patch_pos_embed = self.position_embeddings[:, 1:] + num_patches = patch_pos_embed.shape[1] + pretrain_patch_size = int(math.sqrt(num_patches)) + + if pretrain_patch_size != height or pretrain_patch_size != width: + patch_pos_embed = F.interpolate( + patch_pos_embed.reshape(1, pretrain_patch_size, pretrain_patch_size, -1).permute(0, 3, 1, 2), + size=(height, width), + mode="bicubic", + align_corners=False, + ) + + return patch_pos_embed.permute(0, 2, 3, 1) + else: + return patch_pos_embed.reshape(1, height, width, -1) + + def forward( + self, + pixel_values: torch.Tensor, + prompt_pixel_values: torch.Tensor, + bool_masked_pos: Optional[torch.BoolTensor] = None, + embedding_type: Optional[str] = None, + ) -> torch.Tensor: + input_embeddings = self.patch_embeddings(pixel_values) + prompt_embeddings = self.patch_embeddings(prompt_pixel_values) + + batch_size, patch_height, patch_width, _ = input_embeddings.shape + + mask_token = self.mask_token.expand(batch_size, patch_height, patch_width, -1) + # replace the masked visual tokens by mask_token + w = bool_masked_pos.unsqueeze(-1).type_as(mask_token).reshape(-1, patch_height, patch_width, 1) + prompt_embeddings = prompt_embeddings * (1 - w) + mask_token * w + + embedding_type = embedding_type if embedding_type is not None else "instance" + + # add positional encoding to each token + pos_embed = self.interpolate_pos_encoding(patch_height, patch_width) + + # add segment token + input_embeddings = input_embeddings + self.segment_token_input + prompt_embeddings = prompt_embeddings + self.segment_token_prompt + + # add position embedding skipping CLS + input_embeddings = input_embeddings + pos_embed + prompt_embeddings = prompt_embeddings + pos_embed + + # add type embedding to each token + if embedding_type == "semantic": + type_embedding = self.type_token_semantic + elif embedding_type == "instance": + type_embedding = self.type_token_instance + else: + raise ValueError(f"Embedding type should be either 'semantic' or 'instance', but got {embedding_type}") + + input_embeddings = input_embeddings + type_embedding + prompt_embeddings = prompt_embeddings + type_embedding + + embeddings = torch.cat((input_embeddings, prompt_embeddings), dim=0) + + return embeddings + + +class SegGptAttention(nn.Module): + """Multi-head Attention block with relative position embeddings.""" + + def __init__(self, config): + super().__init__() + image_size, patch_size = config.image_size, config.patch_size + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + + input_size = (image_size[0] // config.patch_size, image_size[1] // config.patch_size) + head_dim = config.hidden_size // config.num_attention_heads + + self.num_attention_heads = config.num_attention_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(config.hidden_size, config.hidden_size * 3, bias=config.qkv_bias) + self.proj = nn.Linear(config.hidden_size, config.hidden_size) + + self.use_relative_position_embeddings = config.use_relative_position_embeddings + if self.use_relative_position_embeddings: + if input_size is None: + raise ValueError("Input size must be provided if using relative positional encoding.") + + # initialize relative positional embeddings + self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) + + def get_rel_pos(self, q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: + """ + Get relative positional embeddings according to the relative positions of + query and key sizes. + + Args: + q_size (int): + size of the query. + k_size (int): + size of key k. + rel_pos (`torch.Tensor`): + relative position embeddings (L, channel). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos. + rel_pos_resized = F.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=max_rel_dist, + mode="linear", + ) + rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) + + # Scale the coords with short length if shapes for q and k are different. + q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) + k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) + relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return rel_pos_resized[relative_coords.long()] + + def add_decomposed_rel_pos( + self, + attn: torch.Tensor, + query: torch.Tensor, + rel_pos_h: torch.Tensor, + rel_pos_w: torch.Tensor, + q_size: Tuple[int, int], + k_size: Tuple[int, int], + ) -> torch.Tensor: + """ + Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py + + Args: + attn (`torch.Tensor`): + attention map. + query (`torch.Tensor`): + query q in the attention layer with shape (batch_size, query_height * query_width, channel). + rel_pos_h (`torch.Tensor`): + relative position embeddings (Lh, channel) for height axis. + rel_pos_w (`torch.Tensor`): + relative position embeddings (Lw, channel) for width axis. + q_size (tuple): + spatial sequence size of query q with (query_height, query_width). + k_size (tuple): + spatial sequence size of key k with (key_height, key_width). + + Returns: + attn (`torch.Tensor`): + attention map with added relative positional embeddings. + """ + query_height, query_width = q_size + key_height, key_width = k_size + relative_position_height = self.get_rel_pos(query_height, key_height, rel_pos_h) + relative_position_width = self.get_rel_pos(query_width, key_width, rel_pos_w) + + batch_size, _, dim = query.shape + reshaped_query = query.reshape(batch_size, query_height, query_width, dim) + rel_h = torch.einsum("bhwc,hkc->bhwk", reshaped_query, relative_position_height) + rel_w = torch.einsum("bhwc,wkc->bhwk", reshaped_query, relative_position_width) + attn = attn.reshape(batch_size, query_height, query_width, key_height, key_width) + attn = attn + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] + attn = attn.reshape(batch_size, query_height * query_width, key_height * key_width) + return attn + + def forward(self, hidden_states: torch.Tensor, output_attentions=False) -> torch.Tensor: + batch_size, height, width, _ = hidden_states.shape + # qkv with shape (3, batch_size, nHead, height * width, channel) + qkv = ( + self.qkv(hidden_states) + .reshape(batch_size, height * width, 3, self.num_attention_heads, -1) + .permute(2, 0, 3, 1, 4) + ) + # q, k, v with shape (batch_size * nHead, height * width, channel) + query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0) + + attn_weights = (query * self.scale) @ key.transpose(-2, -1) + + if self.use_relative_position_embeddings: + attn_weights = self.add_decomposed_rel_pos( + attn_weights, query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) + ) + + attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(batch_size, self.num_attention_heads, height * width, -1) + attn_weights = attn_weights_reshaped.view(batch_size * self.num_attention_heads, height * width, -1) + else: + attn_weights_reshaped = None + + attn_output = (attn_weights @ value).reshape(batch_size, self.num_attention_heads, height, width, -1) + attn_output = attn_output.permute(0, 2, 3, 1, 4).reshape(batch_size, height, width, -1) + + attn_output = self.proj(attn_output) + + return (attn_output, attn_weights_reshaped) + + +# Copied from transformers.models.sam.modeling_sam.SamMLPBlock with SamMLPBlock->SegGptMlp +class SegGptMlp(nn.Module): + def __init__(self, config): + super().__init__() + self.lin1 = nn.Linear(config.hidden_size, config.mlp_dim) + self.lin2 = nn.Linear(config.mlp_dim, config.hidden_size) + self.act = ACT2FN[config.hidden_act] + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.lin1(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.lin2(hidden_states) + return hidden_states + + +# Copied from transformers.models.beit.modeling_beit.drop_path +def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: + """ + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, + however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the + layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the + argument. + """ + if drop_prob == 0.0 or not training: + return input + keep_prob = 1 - drop_prob + shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) + random_tensor.floor_() # binarize + output = input.div(keep_prob) * random_tensor + return output + + +# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->SegGpt +class SegGptDropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob: Optional[float] = None) -> None: + super().__init__() + self.drop_prob = drop_prob + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + return drop_path(hidden_states, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return "p={}".format(self.drop_prob) + + +class SegGptLayer(nn.Module): + def __init__(self, config: SegGptConfig, drop_path_rate: float) -> None: + super().__init__() + self.attention = SegGptAttention(config) + self.mlp = SegGptMlp(config) + self.drop_path = SegGptDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() + self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + ensemble_cond: int, + feature_ensemble: bool = False, + output_attentions: bool = False, + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + self_attention_outputs = self.attention( + self.layernorm_before(hidden_states), # in SegGpt, layernorm is applied before self-attention + output_attentions=output_attentions, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + if feature_ensemble and attention_output.shape[0] // 2 >= ensemble_cond: + prompt, inputs = attention_output.split(attention_output.shape[1] // 2, dim=1) + if ensemble_cond == 2: + num_prompts = attention_output.shape[0] // 2 + inputs = inputs.reshape(2, num_prompts, -1) + inputs = inputs.mean(dim=1, keepdim=True).expand_as(inputs) + inputs = inputs.reshape(*prompt.shape) + else: + inputs = inputs.mean(dim=0, keepdim=True).expand_as(inputs) + attention_output = torch.cat([prompt, inputs], dim=1) + + # first residual connection + hidden_states = self.drop_path(attention_output) + hidden_states + residual = hidden_states + + hidden_states = self.layernorm_after(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + self.drop_path(hidden_states) + + outputs = (hidden_states,) + outputs + + return outputs + + +class SegGptEncoder(nn.Module): + def __init__(self, config: SegGptConfig) -> None: + super().__init__() + self.config = config + dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)] + self.layers = nn.ModuleList([SegGptLayer(config, dpr[i]) for i in range(config.num_hidden_layers)]) + self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + feature_ensemble: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ) -> Union[tuple, SegGptEncoderOutput]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + intermediate_hidden_states = [] + + for i, layer_module in enumerate(self.layers): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + # Condition to check if we have the appropriate number of prompts to ensemble + ensemble_cond = 2 if self.config.merge_index > i else 1 + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + ensemble_cond, + feature_ensemble, + output_attentions, + ) + else: + layer_outputs = layer_module(hidden_states, ensemble_cond, feature_ensemble, output_attentions) + + hidden_states = layer_outputs[0] + + if i == self.config.merge_index: + hidden_states = ( + hidden_states[: hidden_states.shape[0] // 2] + hidden_states[hidden_states.shape[0] // 2 :] + ) * 0.5 + + if i in self.config.intermediate_hidden_state_indices: + intermediate_hidden_states.append(self.layernorm(hidden_states)) + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [hidden_states, all_hidden_states, all_self_attentions, intermediate_hidden_states] + if v is not None + ) + return SegGptEncoderOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + intermediate_hidden_states=intermediate_hidden_states, + ) + + +# Copied from transformers.models.convnext.modeling_convnext.ConvNextLayerNorm with ConvNext->SegGpt +class SegGptLayerNorm(nn.Module): + r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. + The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, + width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). + """ + + def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.data_format = data_format + if self.data_format not in ["channels_last", "channels_first"]: + raise NotImplementedError(f"Unsupported data format: {self.data_format}") + self.normalized_shape = (normalized_shape,) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.data_format == "channels_last": + x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + elif self.data_format == "channels_first": + input_dtype = x.dtype + x = x.float() + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = x.to(dtype=input_dtype) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + + +class SegGptDecoderHead(nn.Module): + def __init__(self, config): + super().__init__() + self.conv = nn.Conv2d( + config.decoder_hidden_size, + config.decoder_hidden_size, + kernel_size=3, + padding=1, + ) + self.layernorm = SegGptLayerNorm( + normalized_shape=config.decoder_hidden_size, eps=config.layer_norm_eps, data_format="channels_first" + ) + self.act_fct = ACT2FN[config.hidden_act] + self.head = nn.Conv2d(config.decoder_hidden_size, 3, kernel_size=1, bias=True) # decoder to patch + + def forward(self, hidden_states: torch.FloatTensor): + hidden_states = self.conv(hidden_states) + hidden_states = self.layernorm(hidden_states) + hidden_states = self.act_fct(hidden_states) + hidden_states = self.head(hidden_states) + + return hidden_states + + +class SegGptDecoder(nn.Module): + def __init__(self, config): + super().__init__() + self.decoder_embed = nn.Linear( + config.hidden_size * len(config.intermediate_hidden_state_indices), + config.patch_size**2 * config.decoder_hidden_size, + bias=True, + ) + self.decoder_pred = SegGptDecoderHead(config) + self.patch_size = config.patch_size + self.decoder_hidden_size = config.decoder_hidden_size + self.config = config + + def _reshape_hidden_states(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + batch_size, patch_height, patch_width, _ = hidden_states.shape + hidden_states = hidden_states.reshape( + batch_size, patch_height, patch_width, self.patch_size, self.patch_size, self.decoder_hidden_size + ) + hidden_states = hidden_states.permute(0, 5, 1, 3, 2, 4) + hidden_states = hidden_states.reshape( + shape=(batch_size, -1, patch_height * self.patch_size, patch_width * self.patch_size) + ) + + return hidden_states + + def forward(self, hidden_states: torch.FloatTensor): + hidden_states = self.decoder_embed(hidden_states) + hidden_states = self._reshape_hidden_states(hidden_states) + hidden_states = self.decoder_pred(hidden_states) + + return hidden_states + + +class SegGptPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = SegGptConfig + base_model_prefix = "model" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + _no_split_modules = ["SegGptEmbeddings", "SegGptLayer"] + + def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: + """Initialize the weights""" + std = self.config.initializer_range + if isinstance(module, (nn.Linear, nn.Conv2d)): + # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid + # `trunc_normal_cpu` not implemented in `half` issues + module.weight.data = nn.init.trunc_normal_(module.weight.data.to(torch.float32), mean=0.0, std=std).to( + module.weight.dtype + ) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + elif isinstance(module, SegGptAttention): + module.rel_pos_h.data = nn.init.trunc_normal_( + module.rel_pos_h.data.to(torch.float32), + mean=0.0, + std=std, + ).to(module.rel_pos_h.dtype) + + module.rel_pos_w.data = nn.init.trunc_normal_( + module.rel_pos_w.data.to(torch.float32), + mean=0.0, + std=std, + ).to(module.rel_pos_w.dtype) + + elif isinstance(module, SegGptEmbeddings): + module.position_embeddings.data = nn.init.trunc_normal_( + module.position_embeddings.data.to(torch.float32), + mean=0.0, + std=std, + ).to(module.position_embeddings.dtype) + + torch.nn.init.normal_(module.mask_token, std=std) + torch.nn.init.normal_(module.segment_token_input, std=std) + torch.nn.init.normal_(module.segment_token_prompt, std=std) + torch.nn.init.normal_(module.type_token_semantic, std=std) + torch.nn.init.normal_(module.type_token_instance, std=std) + + +SEGGPT_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`SegGptConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +SEGGPT_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`SegGptImageProcessor.__call__`] + for details. + + prompt_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Prompt pixel values. Prompt pixel values can be obtained using [`AutoImageProcessor`]. See + [`SegGptImageProcessor.__call__`] for details. + + prompt_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Prompt mask. Prompt mask can be obtained using [`AutoImageProcessor`]. See [`SegGptImageProcessor.__call__`] for + details. + + bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*): + Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). + + feature_ensemble (`bool`, *optional*): + Boolean indicating whether to use feature ensemble or not. If `True`, the model will use feature ensemble + if we have at least two prompts. If `False`, the model will not use feature ensemble. This argument should + be considered when doing few-shot inference on an input image i.e. more than one prompt for the same image. + + embedding_type (`str`, *optional*): + Embedding type. Indicates whether the prompt is a semantic or instance embedding. Can be either + instance or semantic. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare SegGpt Model transformer outputting raw hidden-states without any specific head on top.", + SEGGPT_START_DOCSTRING, +) +class SegGptModel(SegGptPreTrainedModel): + def __init__(self, config: SegGptConfig): + super().__init__(config) + self.config = config + + self.embeddings = SegGptEmbeddings(config) + self.encoder = SegGptEncoder(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> SegGptPatchEmbeddings: + return self.embeddings.patch_embeddings + + def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(SEGGPT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=SegGptEncoderOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: torch.Tensor, + prompt_pixel_values: torch.Tensor, + prompt_masks: torch.Tensor, + bool_masked_pos: Optional[torch.BoolTensor] = None, + feature_ensemble: Optional[bool] = None, + embedding_type: Optional[str] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SegGptEncoderOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import SegGptImageProcessor, SegGptModel + >>> from PIL import Image + >>> import requests + + >>> image_input_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_2.jpg" + >>> image_prompt_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1.jpg" + >>> mask_prompt_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1_target.png" + + >>> image_input = Image.open(requests.get(image_input_url, stream=True).raw) + >>> image_prompt = Image.open(requests.get(image_prompt_url, stream=True).raw) + >>> mask_prompt = Image.open(requests.get(mask_prompt_url, stream=True).raw).convert("L") + + >>> checkpoint = "BAAI/seggpt-vit-large" + >>> model = SegGptModel.from_pretrained(checkpoint) + >>> image_processor = SegGptImageProcessor.from_pretrained(checkpoint) + + >>> inputs = image_processor(images=image_input, prompt_images=image_prompt, prompt_masks=mask_prompt, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> list(outputs.last_hidden_state.shape) + [1, 56, 28, 1024] + ``` + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + feature_ensemble = feature_ensemble if feature_ensemble is not None else False + + expected_dtype = self.embeddings.patch_embeddings.projection.weight.dtype + pixel_values = pixel_values.to(expected_dtype) + prompt_pixel_values = prompt_pixel_values.to(expected_dtype) + + # Prepare inputs + pixel_values = torch.cat((prompt_pixel_values, pixel_values), dim=2) + prompt_pixel_values = torch.cat((prompt_masks, prompt_masks), dim=2) + + # We concat on height axis so SegGPT can handle as a single image, hence we need to mask the portion + # of the prompt pixels that will be destinated to the prediction as they don't add any information. + if bool_masked_pos is None: + num_patches = self.embeddings.patch_embeddings.num_patches + bool_masked_pos = torch.zeros(num_patches, dtype=torch.bool).to(pixel_values.device) + bool_masked_pos[num_patches // 2 :] = 1 + bool_masked_pos = bool_masked_pos.unsqueeze(0) + + embedding_output = self.embeddings( + pixel_values, prompt_pixel_values, embedding_type=embedding_type, bool_masked_pos=bool_masked_pos + ) + + encoder_outputs = self.encoder( + embedding_output, + feature_ensemble=feature_ensemble, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + return encoder_outputs + + +def patchify(tensor: torch.Tensor, patch_size: int) -> torch.Tensor: + batch_size, num_channels, height, width = tensor.shape + patch_height = height // patch_size + patch_width = width // patch_size + + tensor = tensor.reshape(shape=(batch_size, num_channels, patch_height, patch_size, patch_width, patch_size)) + tensor = tensor.permute(0, 2, 4, 3, 5, 1) + tensor = tensor.reshape(shape=(batch_size, patch_height * patch_width, patch_size**2 * 3)) + + return tensor + + +def unpatchify(tensor: torch.Tensor, patch_height: int, patch_width: int) -> torch.Tensor: + batch_size = tensor.shape[0] + patch_size = int((tensor.shape[-1] / 3) ** 0.5) + if patch_height * patch_width != tensor.shape[1]: + raise ValueError(f"Number of patches {tensor.shape[1]} does not match patch height and width.") + + tensor = tensor.reshape(shape=(batch_size, patch_height, patch_width, patch_size, patch_size, 3)) + tensor = tensor.permute(0, 5, 1, 3, 2, 4) + tensor = tensor.reshape(shape=(batch_size, 3, patch_height * patch_size, patch_width * patch_size)) + + return tensor + + +class SegGptLoss(nn.Module): + def __init__(self, config): + super().__init__() + self.beta = config.beta + self.patch_size = config.patch_size + + def forward( + self, + pixel_values: torch.FloatTensor, + prompt_pixel_values: torch.FloatTensor, + pred_masks: torch.FloatTensor, + labels: torch.FloatTensor, + bool_masked_pos: torch.BoolTensor, + ): + """Computes the L1 loss between the predicted masks and the ground truth masks. + + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, 2*height, width)`): + Concatenated pixel values from prompt and input images. + + prompt_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, 2*height, width)`): + Concatenated pixel values from mask prompt. + + pred_masks (`torch.FloatTensor` of shape `(batch_size, num_channels, 2*height, width)`): + Predicted masks. + + labels (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Ground truth mask for input images. + + bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): + Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). + + Returns: + `torch.FloatTensor`: The mean L1 loss between the predicted masks and the ground truth masks. + """ + mask = bool_masked_pos[:, :, None].repeat(1, 1, self.patch_size**2 * 3) + mask = unpatchify(mask, pixel_values.shape[1] // self.patch_size, pixel_values.shape[2] // self.patch_size) + # Changing dummy mask in prompt_pixel_values to labels values + prompt_pixel_values = prompt_pixel_values.clone() + prompt_pixel_values[:, :, prompt_pixel_values.shape[2] // 2 :, :] = labels + loss = F.smooth_l1_loss(pred_masks, prompt_pixel_values, reduction="none", beta=self.beta) + loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches + + return loss + + +@add_start_docstrings( + "SegGpt model with a decoder on top for one-shot image segmentation.", + SEGGPT_START_DOCSTRING, +) +class SegGptForImageSegmentation(SegGptPreTrainedModel): + def __init__(self, config: SegGptConfig): + super().__init__(config) + self.config = config + + self.model = SegGptModel(config) + self.decoder = SegGptDecoder(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(SEGGPT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=SegGptImageSegmentationOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: torch.Tensor, + prompt_pixel_values: torch.Tensor, + prompt_masks: torch.Tensor, + bool_masked_pos: Optional[torch.BoolTensor] = None, + feature_ensemble: Optional[bool] = None, + embedding_type: Optional[str] = None, + labels: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SegGptImageSegmentationOutput]: + r""" + labels (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`, `optional`): + Ground truth mask for input images. + + Returns: + + Examples: + + ```python + >>> from transformers import SegGptImageProcessor, SegGptForImageSegmentation + >>> from PIL import Image + >>> import requests + + >>> image_input_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_2.jpg" + >>> image_prompt_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1.jpg" + >>> mask_prompt_url = "https://raw.githubusercontent.com/baaivision/Painter/main/SegGPT/SegGPT_inference/examples/hmbb_1_target.png" + + >>> image_input = Image.open(requests.get(image_input_url, stream=True).raw) + >>> image_prompt = Image.open(requests.get(image_prompt_url, stream=True).raw) + >>> mask_prompt = Image.open(requests.get(mask_prompt_url, stream=True).raw).convert("L") + + >>> checkpoint = "BAAI/seggpt-vit-large" + >>> model = SegGptForImageSegmentation.from_pretrained(checkpoint) + >>> image_processor = SegGptImageProcessor.from_pretrained(checkpoint) + + >>> inputs = image_processor(images=image_input, prompt_images=image_prompt, prompt_masks=mask_prompt, return_tensors="pt") + >>> outputs = model(**inputs) + >>> result = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image_input.size[::-1]])[0] + >>> print(list(result.shape)) + [170, 297] + ``` + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if bool_masked_pos is None: + num_patches = self.model.embeddings.patch_embeddings.num_patches + bool_masked_pos = torch.zeros(num_patches, dtype=torch.bool).to(pixel_values.device) + bool_masked_pos[num_patches // 2 :] = 1 + bool_masked_pos = bool_masked_pos.unsqueeze(0) + + outputs = self.model( + pixel_values=pixel_values, + prompt_pixel_values=prompt_pixel_values, + prompt_masks=prompt_masks, + bool_masked_pos=bool_masked_pos, + feature_ensemble=feature_ensemble, + embedding_type=embedding_type, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + intermediate_hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[-1] + intermediate_hidden_states = torch.cat(intermediate_hidden_states, dim=-1) + pred_masks = self.decoder(intermediate_hidden_states) + + loss = None + if labels is not None: + loss_fn = SegGptLoss(self.config) + loss = loss_fn(pixel_values, prompt_pixel_values, pred_masks, labels, bool_masked_pos) + + if not return_dict: + output = (pred_masks,) + if output_hidden_states: + output = output + (outputs[1],) + + if output_attentions: + idx = 2 if output_hidden_states else 1 + output = output + (outputs[idx],) + + if loss is not None: + output = (loss,) + output + return output + + return SegGptImageSegmentationOutput( + loss=loss, + pred_masks=pred_masks, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index dd2e50c67d0..3ba08016855 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -7556,6 +7556,30 @@ class SegformerPreTrainedModel(metaclass=DummyObject): requires_backends(self, ["torch"]) +SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class SegGptForImageSegmentation(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SegGptModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class SegGptPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + SEW_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/dummy_vision_objects.py b/src/transformers/utils/dummy_vision_objects.py index 89366aba508..25a35558fe9 100644 --- a/src/transformers/utils/dummy_vision_objects.py +++ b/src/transformers/utils/dummy_vision_objects.py @@ -471,6 +471,13 @@ class SegformerImageProcessor(metaclass=DummyObject): requires_backends(self, ["vision"]) +class SegGptImageProcessor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + class SiglipImageProcessor(metaclass=DummyObject): _backends = ["vision"] diff --git a/tests/models/seggpt/__init__.py b/tests/models/seggpt/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/models/seggpt/test_image_processing_seggpt.py b/tests/models/seggpt/test_image_processing_seggpt.py new file mode 100644 index 00000000000..46694d6636e --- /dev/null +++ b/tests/models/seggpt/test_image_processing_seggpt.py @@ -0,0 +1,231 @@ +# coding=utf-8 +# Copyright 2024 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +from datasets import load_dataset + +from transformers.testing_utils import require_torch, require_vision, slow +from transformers.utils import is_torch_available, is_vision_available + +from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs + + +if is_torch_available(): + import torch + + from transformers.models.seggpt.modeling_seggpt import SegGptImageSegmentationOutput + +if is_vision_available(): + from transformers import SegGptImageProcessor + + +class SegGptImageProcessingTester(unittest.TestCase): + def __init__( + self, + parent, + batch_size=7, + num_channels=3, + image_size=18, + min_resolution=30, + max_resolution=400, + do_resize=True, + size=None, + do_normalize=True, + image_mean=[0.5, 0.5, 0.5], + image_std=[0.5, 0.5, 0.5], + ): + size = size if size is not None else {"height": 18, "width": 18} + self.parent = parent + self.batch_size = batch_size + self.num_channels = num_channels + self.image_size = image_size + self.min_resolution = min_resolution + self.max_resolution = max_resolution + self.do_resize = do_resize + self.size = size + self.do_normalize = do_normalize + self.image_mean = image_mean + self.image_std = image_std + + def prepare_image_processor_dict(self): + return { + "image_mean": self.image_mean, + "image_std": self.image_std, + "do_normalize": self.do_normalize, + "do_resize": self.do_resize, + "size": self.size, + } + + def expected_output_image_shape(self, images): + return self.num_channels, self.size["height"], self.size["width"] + + def expected_post_processed_shape(self): + return self.size["height"] // 2, self.size["width"] + + def get_fake_image_segmentation_output(self): + torch.manual_seed(42) + return SegGptImageSegmentationOutput( + pred_masks=torch.rand(self.batch_size, self.num_channels, self.size["height"], self.size["width"]) + ) + + def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): + return prepare_image_inputs( + batch_size=self.batch_size, + num_channels=self.num_channels, + min_resolution=self.min_resolution, + max_resolution=self.max_resolution, + equal_resolution=equal_resolution, + numpify=numpify, + torchify=torchify, + ) + + +def prepare_mask(): + ds = load_dataset("EduardoPacheco/seggpt-example-data")["train"] + return ds[0]["mask"].convert("L") + + +def prepare_img(): + ds = load_dataset("EduardoPacheco/seggpt-example-data")["train"] + images = [image.convert("RGB") for image in ds["image"]] + masks = [image.convert("RGB") for image in ds["mask"]] + return images, masks + + +@require_torch +@require_vision +class SegGptImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): + image_processing_class = SegGptImageProcessor if is_vision_available() else None + + def setUp(self): + self.image_processor_tester = SegGptImageProcessingTester(self) + + @property + def image_processor_dict(self): + return self.image_processor_tester.prepare_image_processor_dict() + + def test_image_processor_properties(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + self.assertTrue(hasattr(image_processing, "image_mean")) + self.assertTrue(hasattr(image_processing, "image_std")) + self.assertTrue(hasattr(image_processing, "do_normalize")) + self.assertTrue(hasattr(image_processing, "do_resize")) + self.assertTrue(hasattr(image_processing, "size")) + + def test_image_processor_from_dict_with_kwargs(self): + image_processor = self.image_processing_class.from_dict(self.image_processor_dict) + self.assertEqual(image_processor.size, {"height": 18, "width": 18}) + + image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) + self.assertEqual(image_processor.size, {"height": 42, "width": 42}) + + def test_image_processor_palette(self): + num_labels = 3 + image_processing = self.image_processing_class(**self.image_processor_dict) + palette = image_processing.get_palette(num_labels) + self.assertEqual(len(palette), num_labels + 1) + self.assertEqual(palette[0], (0, 0, 0)) + + def test_mask_equivalence(self): + image_processor = SegGptImageProcessor() + + mask_binary = prepare_mask() + mask_rgb = mask_binary.convert("RGB") + + inputs_binary = image_processor(images=None, prompt_masks=mask_binary, return_tensors="pt") + inputs_rgb = image_processor(images=None, prompt_masks=mask_rgb, return_tensors="pt") + + self.assertTrue((inputs_binary["prompt_masks"] == inputs_rgb["prompt_masks"]).all().item()) + + def test_mask_to_rgb(self): + image_processing = self.image_processing_class(**self.image_processor_dict) + mask = prepare_mask() + mask = np.array(mask) + mask = (mask > 0).astype(np.uint8) + + def check_two_colors(image, color1=(0, 0, 0), color2=(255, 255, 255)): + pixels = image.transpose(1, 2, 0).reshape(-1, 3) + unique_colors = np.unique(pixels, axis=0) + if len(unique_colors) == 2 and (color1 in unique_colors) and (color2 in unique_colors): + return True + else: + return False + + num_labels = 1 + palette = image_processing.get_palette(num_labels) + + # Should only duplicate repeat class indices map, hence only (0,0,0) and (1,1,1) + mask_duplicated = image_processing.mask_to_rgb(mask) + # Mask using palette, since only 1 class is present we have colors (0,0,0) and (255,255,255) + mask_painted = image_processing.mask_to_rgb(mask, palette=palette) + + self.assertTrue(check_two_colors(mask_duplicated, color2=(1, 1, 1))) + self.assertTrue(check_two_colors(mask_painted, color2=(255, 255, 255))) + + def test_post_processing_semantic_segmentation(self): + image_processor = self.image_processing_class(**self.image_processor_dict) + outputs = self.image_processor_tester.get_fake_image_segmentation_output() + post_processed = image_processor.post_process_semantic_segmentation(outputs) + + self.assertEqual(len(post_processed), self.image_processor_tester.batch_size) + + expected_semantic_map_shape = self.image_processor_tester.expected_post_processed_shape() + self.assertEqual(post_processed[0].shape, expected_semantic_map_shape) + + @slow + def test_pixel_values(self): + images, masks = prepare_img() + input_image = images[1] + prompt_image = images[0] + prompt_mask = masks[0] + + image_processor = SegGptImageProcessor.from_pretrained("BAAI/seggpt-vit-large") + + inputs = image_processor( + images=input_image, prompt_images=prompt_image, prompt_masks=prompt_mask, return_tensors="pt" + ) + + # Verify pixel values + expected_prompt_pixel_values = torch.tensor( + [ + [[-0.6965, -0.6965, -0.6965], [-0.6965, -0.6965, -0.6965], [-0.6965, -0.6965, -0.6965]], + [[1.6583, 1.6583, 1.6583], [1.6583, 1.6583, 1.6583], [1.6583, 1.6583, 1.6583]], + [[2.3088, 2.3088, 2.3088], [2.3088, 2.3088, 2.3088], [2.3088, 2.3088, 2.3088]], + ] + ) + + expected_pixel_values = torch.tensor( + [ + [[1.6324, 1.6153, 1.5810], [1.6153, 1.5982, 1.5810], [1.5810, 1.5639, 1.5639]], + [[1.2731, 1.2556, 1.2206], [1.2556, 1.2381, 1.2031], [1.2206, 1.2031, 1.1681]], + [[1.6465, 1.6465, 1.6465], [1.6465, 1.6465, 1.6465], [1.6291, 1.6291, 1.6291]], + ] + ) + + expected_prompt_masks = torch.tensor( + [ + [[-2.1179, -2.1179, -2.1179], [-2.1179, -2.1179, -2.1179], [-2.1179, -2.1179, -2.1179]], + [[-2.0357, -2.0357, -2.0357], [-2.0357, -2.0357, -2.0357], [-2.0357, -2.0357, -2.0357]], + [[-1.8044, -1.8044, -1.8044], [-1.8044, -1.8044, -1.8044], [-1.8044, -1.8044, -1.8044]], + ] + ) + + self.assertTrue(torch.allclose(inputs.pixel_values[0, :, :3, :3], expected_pixel_values, atol=1e-4)) + self.assertTrue( + torch.allclose(inputs.prompt_pixel_values[0, :, :3, :3], expected_prompt_pixel_values, atol=1e-4) + ) + self.assertTrue(torch.allclose(inputs.prompt_masks[0, :, :3, :3], expected_prompt_masks, atol=1e-4)) diff --git a/tests/models/seggpt/test_modeling_seggpt.py b/tests/models/seggpt/test_modeling_seggpt.py new file mode 100644 index 00000000000..0cb36ea534a --- /dev/null +++ b/tests/models/seggpt/test_modeling_seggpt.py @@ -0,0 +1,339 @@ +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch SegGpt model. """ + + +import inspect +import unittest + +from datasets import load_dataset + +from transformers import SegGptConfig +from transformers.testing_utils import ( + require_torch, + require_vision, + slow, + torch_device, +) +from transformers.utils import cached_property, is_torch_available, is_vision_available + +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, floats_tensor +from ...test_pipeline_mixin import PipelineTesterMixin + + +if is_torch_available(): + import torch + from torch import nn + + from transformers import SegGptForImageSegmentation, SegGptModel + from transformers.models.seggpt.modeling_seggpt import SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST + + +if is_vision_available(): + from transformers import SegGptImageProcessor + + +class SegGptModelTester: + def __init__( + self, + parent, + batch_size=2, + image_size=30, + patch_size=2, + num_channels=3, + is_training=False, + use_labels=True, + hidden_size=32, + num_hidden_layers=2, + num_attention_heads=4, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + initializer_range=0.02, + mlp_ratio=2.0, + merge_index=0, + intermediate_hidden_state_indices=[1], + pretrain_image_size=10, + decoder_hidden_size=10, + ): + self.parent = parent + self.batch_size = batch_size + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.is_training = is_training + self.use_labels = use_labels + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.initializer_range = initializer_range + self.mlp_ratio = mlp_ratio + self.merge_index = merge_index + self.intermediate_hidden_state_indices = intermediate_hidden_state_indices + self.pretrain_image_size = pretrain_image_size + self.decoder_hidden_size = decoder_hidden_size + + # in SegGpt, the seq length equals the number of patches (we don't use the [CLS] token) + num_patches = (image_size // patch_size) ** 2 + self.seq_length = num_patches + + def prepare_config_and_inputs(self): + pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size // 2, self.image_size]) + prompt_pixel_values = floats_tensor( + [self.batch_size, self.num_channels, self.image_size // 2, self.image_size] + ) + prompt_masks = floats_tensor([self.batch_size, self.num_channels, self.image_size // 2, self.image_size]) + + labels = None + if self.use_labels: + labels = floats_tensor([self.batch_size, self.num_channels, self.image_size // 2, self.image_size]) + + config = self.get_config() + + return config, pixel_values, prompt_pixel_values, prompt_masks, labels + + def get_config(self): + return SegGptConfig( + image_size=self.image_size, + patch_size=self.patch_size, + num_channels=self.num_channels, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + hidden_act=self.hidden_act, + hidden_dropout_prob=self.hidden_dropout_prob, + initializer_range=self.initializer_range, + mlp_ratio=self.mlp_ratio, + merge_index=self.merge_index, + intermediate_hidden_state_indices=self.intermediate_hidden_state_indices, + pretrain_image_size=self.pretrain_image_size, + decoder_hidden_size=self.decoder_hidden_size, + ) + + def create_and_check_model(self, config, pixel_values, prompt_pixel_values, prompt_masks, labels): + model = SegGptModel(config=config) + model.to(torch_device) + model.eval() + result = model(pixel_values, prompt_pixel_values, prompt_masks) + self.parent.assertEqual( + result.last_hidden_state.shape, + ( + self.batch_size, + self.image_size // self.patch_size, + self.image_size // self.patch_size, + self.hidden_size, + ), + ) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + ( + config, + pixel_values, + prompt_pixel_values, + prompt_masks, + labels, + ) = config_and_inputs + inputs_dict = { + "pixel_values": pixel_values, + "prompt_pixel_values": prompt_pixel_values, + "prompt_masks": prompt_masks, + } + return config, inputs_dict + + +@require_torch +class SegGptModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): + """ + Here we also overwrite some of the tests of test_modeling_common.py, as SegGpt does not use input_ids, inputs_embeds, + attention_mask and seq_length. + """ + + all_model_classes = (SegGptModel, SegGptForImageSegmentation) if is_torch_available() else () + fx_compatible = False + + test_pruning = False + test_resize_embeddings = False + test_head_masking = False + test_torchscript = False + pipeline_model_mapping = ( + {"feature-extraction": SegGptModel, "mask-generation": SegGptModel} if is_torch_available() else {} + ) + + def setUp(self): + self.model_tester = SegGptModelTester(self) + self.config_tester = ConfigTester(self, config_class=SegGptConfig, has_text_modality=False) + + def test_config(self): + self.config_tester.run_common_tests() + + @unittest.skip(reason="SegGpt does not use inputs_embeds") + def test_inputs_embeds(self): + pass + + def test_model_common_attributes(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = ["pixel_values", "prompt_pixel_values", "prompt_masks"] + self.assertListEqual(arg_names[:3], expected_arg_names) + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_hidden_states_output(self): + def check_hidden_states_output(inputs_dict, config, model_class): + model = model_class(config) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states + + expected_num_layers = getattr( + self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 + ) + self.assertEqual(len(hidden_states), expected_num_layers) + + patch_height = patch_width = config.image_size // config.patch_size + + self.assertListEqual( + list(hidden_states[0].shape[-3:]), + [patch_height, patch_width, self.model_tester.hidden_size], + ) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + check_hidden_states_output(inputs_dict, config, model_class) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.output_hidden_states = True + + check_hidden_states_output(inputs_dict, config, model_class) + + @slow + def test_model_from_pretrained(self): + for model_name in SEGGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = SegGptModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + +def prepare_img(): + ds = load_dataset("EduardoPacheco/seggpt-example-data")["train"] + images = [image.convert("RGB") for image in ds["image"]] + masks = [image.convert("RGB") for image in ds["mask"]] + return images, masks + + +@require_torch +@require_vision +class SegGptModelIntegrationTest(unittest.TestCase): + @cached_property + def default_image_processor(self): + return SegGptImageProcessor.from_pretrained("BAAI/seggpt-vit-large") if is_vision_available() else None + + @slow + def test_one_shot_inference(self): + model = SegGptForImageSegmentation.from_pretrained("BAAI/seggpt-vit-large").to(torch_device) + + image_processor = self.default_image_processor + + images, masks = prepare_img() + input_image = images[1] + prompt_image = images[0] + prompt_mask = masks[0] + + inputs = image_processor( + images=input_image, prompt_images=prompt_image, prompt_masks=prompt_mask, return_tensors="pt" + ) + + inputs = inputs.to(torch_device) + # forward pass + with torch.no_grad(): + outputs = model(**inputs) + + # verify the logits + expected_shape = torch.Size((1, 3, 896, 448)) + self.assertEqual(outputs.pred_masks.shape, expected_shape) + + expected_slice = torch.tensor( + [ + [[-2.1208, -2.1190, -2.1198], [-2.1237, -2.1228, -2.1227], [-2.1232, -2.1226, -2.1228]], + [[-2.0405, -2.0396, -2.0403], [-2.0434, -2.0434, -2.0433], [-2.0428, -2.0432, -2.0434]], + [[-1.8102, -1.8088, -1.8099], [-1.8131, -1.8126, -1.8129], [-1.8130, -1.8128, -1.8131]], + ] + ).to(torch_device) + + self.assertTrue(torch.allclose(outputs.pred_masks[0, :, :3, :3], expected_slice, atol=1e-4)) + + result = image_processor.post_process_semantic_segmentation(outputs, [input_image.size[::-1]])[0] + + result_expected_shape = torch.Size((170, 297)) + expected_area = 1082 + area = (result > 0).sum().item() + self.assertEqual(result.shape, result_expected_shape) + self.assertEqual(area, expected_area) + + @slow + def test_few_shot_inference(self): + model = SegGptForImageSegmentation.from_pretrained("BAAI/seggpt-vit-large").to(torch_device) + image_processor = self.default_image_processor + + images, masks = prepare_img() + input_images = [images[1]] * 2 + prompt_images = [images[0], images[2]] + prompt_masks = [masks[0], masks[2]] + + inputs = image_processor( + images=input_images, prompt_images=prompt_images, prompt_masks=prompt_masks, return_tensors="pt" + ) + + inputs = {k: v.to(torch_device) for k, v in inputs.items()} + with torch.no_grad(): + outputs = model(**inputs, feature_ensemble=True) + + expected_shape = torch.Size((2, 3, 896, 448)) + expected_slice = torch.tensor( + [ + [[-2.1201, -2.1192, -2.1189], [-2.1217, -2.1210, -2.1204], [-2.1216, -2.1202, -2.1194]], + [[-2.0393, -2.0390, -2.0387], [-2.0402, -2.0402, -2.0397], [-2.0400, -2.0394, -2.0388]], + [[-1.8083, -1.8076, -1.8077], [-1.8105, -1.8102, -1.8099], [-1.8105, -1.8095, -1.8090]], + ] + ).to(torch_device) + + self.assertEqual(outputs.pred_masks.shape, expected_shape) + self.assertTrue(torch.allclose(outputs.pred_masks[0, :, 448:451, :3], expected_slice, atol=4e-4)) diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index a2a16a14000..6d4f0734cbc 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -958,6 +958,16 @@ class ModelTesterMixin: traced_model = torch.jit.trace( model, (input_ids, bbox), check_trace=False ) # when traced model is checked, an error is produced due to name mangling + elif ( + "pixel_values" in inputs and "prompt_pixel_values" in inputs and "prompt_masks" in inputs + ): # SegGpt requires additional inputs + pixel_values = inputs["pixel_values"] + prompt_pixel_values = inputs["prompt_pixel_values"] + prompt_masks = inputs["prompt_masks"] + model(pixel_values, prompt_pixel_values, prompt_masks) + traced_model = torch.jit.trace( + model, (pixel_values, prompt_pixel_values, prompt_masks), check_trace=False + ) # when traced model is checked, an error is produced due to name mangling else: main_input = inputs[main_input_name] diff --git a/utils/check_repo.py b/utils/check_repo.py index ca25d7d9e32..7cc06c67811 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -308,6 +308,7 @@ IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [ "SeamlessM4Tv2NARTextToUnitForConditionalGeneration", "SeamlessM4Tv2CodeHifiGan", "SeamlessM4Tv2ForSpeechToSpeech", # no auto class for speech-to-speech + "SegGptForImageSegmentation", "SiglipVisionModel", "SiglipTextModel", ]