mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-04 05:10:06 +06:00
Merge 5664729a9c
into 4c1715b610
This commit is contained in:
commit
47acf76676
@ -711,6 +711,8 @@
|
||||
title: DAB-DETR
|
||||
- local: model_doc/deformable_detr
|
||||
title: Deformable DETR
|
||||
- local: model_doc/deim
|
||||
title: deim
|
||||
- local: model_doc/deit
|
||||
title: DeiT
|
||||
- local: model_doc/depth_anything
|
||||
|
48
docs/source/en/model_doc/deim.md
Normal file
48
docs/source/en/model_doc/deim.md
Normal file
@ -0,0 +1,48 @@
|
||||
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# deim
|
||||
|
||||
## Overview
|
||||
|
||||
The deim model was proposed in [<INSERT PAPER NAME HERE>](<INSERT PAPER LINK HERE>) by <INSERT AUTHORS HERE>.
|
||||
<INSERT SHORT SUMMARY HERE>
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*<INSERT PAPER ABSTRACT HERE>*
|
||||
|
||||
Tips:
|
||||
|
||||
<INSERT TIPS ABOUT MODEL HERE>
|
||||
|
||||
This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/<INSERT YOUR HF USERNAME HERE>).
|
||||
The original code can be found [here](<INSERT LINK TO GITHUB REPO HERE>).
|
||||
|
||||
|
||||
## DeimConfig
|
||||
|
||||
[[autodoc]] DeimConfig
|
||||
|
||||
## DeimModel
|
||||
|
||||
[[autodoc]] DeimModel
|
||||
- forward
|
||||
|
||||
## DeimForObjectDetection
|
||||
|
||||
[[autodoc]] DeimForObjectDetection
|
||||
- forward
|
@ -90,6 +90,7 @@ CONFIG_MAPPING_NAMES = OrderedDict[str, str](
|
||||
("ctrl", "CTRLConfig"),
|
||||
("cvt", "CvtConfig"),
|
||||
("d_fine", "DFineConfig"),
|
||||
("deim", "DeimConfig"),
|
||||
("dab-detr", "DabDetrConfig"),
|
||||
("dac", "DacConfig"),
|
||||
("data2vec-audio", "Data2VecAudioConfig"),
|
||||
@ -467,6 +468,7 @@ MODEL_NAMES_MAPPING = OrderedDict[str, str](
|
||||
("ctrl", "CTRL"),
|
||||
("cvt", "CvT"),
|
||||
("d_fine", "D-FINE"),
|
||||
("deim", "deim"),
|
||||
("dab-detr", "DAB-DETR"),
|
||||
("dac", "DAC"),
|
||||
("data2vec-audio", "Data2VecAudio"),
|
||||
|
@ -84,6 +84,7 @@ MODEL_MAPPING_NAMES = OrderedDict(
|
||||
("ctrl", "CTRLModel"),
|
||||
("cvt", "CvtModel"),
|
||||
("d_fine", "DFineModel"),
|
||||
("deim", "DeimModel"),
|
||||
("dab-detr", "DabDetrModel"),
|
||||
("dac", "DacModel"),
|
||||
("data2vec-audio", "Data2VecAudioModel"),
|
||||
@ -1002,6 +1003,7 @@ MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict(
|
||||
# Model for Object Detection mapping
|
||||
("conditional_detr", "ConditionalDetrForObjectDetection"),
|
||||
("d_fine", "DFineForObjectDetection"),
|
||||
("deim", "DeimForObjectDetection"),
|
||||
("dab-detr", "DabDetrForObjectDetection"),
|
||||
("deformable_detr", "DeformableDetrForObjectDetection"),
|
||||
("deta", "DetaForObjectDetection"),
|
||||
|
29
src/transformers/models/deim/__init__.py
Normal file
29
src/transformers/models/deim/__init__.py
Normal file
@ -0,0 +1,29 @@
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from ...utils import _LazyModule
|
||||
from ...utils.import_utils import define_import_structure
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .configuration_deim import *
|
||||
from .modeling_deim import *
|
||||
else:
|
||||
import sys
|
||||
|
||||
_file = globals()["__file__"]
|
||||
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
425
src/transformers/models/deim/configuration_deim.py
Normal file
425
src/transformers/models/deim/configuration_deim.py
Normal file
@ -0,0 +1,425 @@
|
||||
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
||||
# This file was automatically generated from src/transformers/models/deim/modular_deim.py.
|
||||
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
||||
# the file from the modular. If any change should be done, please apply the change to the
|
||||
# modular_deim.py file directly. One of our CI enforces this.
|
||||
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
||||
# coding=utf-8
|
||||
# Copyright 2025 Baidu Inc and The HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...utils import logging
|
||||
from ...utils.backbone_utils import verify_backbone_config_arguments
|
||||
from ..auto import CONFIG_MAPPING
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
# TODO: Attribute map assignment logic should be fixed in modular
|
||||
# as well as super() call parsing becuase otherwise we cannot re-write args after initialization
|
||||
class DeimConfig(PretrainedConfig):
|
||||
"""
|
||||
This is the configuration class to store the configuration of a [`DeimModel`]. It is used to instantiate a deim
|
||||
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
||||
defaults will yield a similar configuration to that of deim-X-COCO "[sushmanth/DEIM"](https://huggingface.co/sushmanth/DEIM").
|
||||
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
||||
documentation from [`PretrainedConfig`] for more information.
|
||||
|
||||
Args:
|
||||
initializer_range (`float`, *optional*, defaults to 0.01):
|
||||
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
||||
initializer_bias_prior_prob (`float`, *optional*):
|
||||
The prior probability used by the bias initializer to initialize biases for `enc_score_head` and `class_embed`.
|
||||
If `None`, `prior_prob` computed as `prior_prob = 1 / (num_labels + 1)` while initializing model weights.
|
||||
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
|
||||
The epsilon used by the layer normalization layers.
|
||||
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
|
||||
The epsilon used by the batch normalization layers.
|
||||
backbone_config (`Dict`, *optional*, defaults to `RTDetrResNetConfig()`):
|
||||
The configuration of the backbone model.
|
||||
backbone (`str`, *optional*):
|
||||
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
|
||||
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
|
||||
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
|
||||
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use pretrained weights for the backbone.
|
||||
use_timm_backbone (`bool`, *optional*, defaults to `False`):
|
||||
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
|
||||
library.
|
||||
freeze_backbone_batch_norms (`bool`, *optional*, defaults to `True`):
|
||||
Whether to freeze the batch normalization layers in the backbone.
|
||||
backbone_kwargs (`dict`, *optional*):
|
||||
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
|
||||
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
|
||||
encoder_hidden_dim (`int`, *optional*, defaults to 256):
|
||||
Dimension of the layers in hybrid encoder.
|
||||
encoder_in_channels (`list`, *optional*, defaults to `[512, 1024, 2048]`):
|
||||
Multi level features input for encoder.
|
||||
feat_strides (`List[int]`, *optional*, defaults to `[8, 16, 32]`):
|
||||
Strides used in each feature map.
|
||||
encoder_layers (`int`, *optional*, defaults to 1):
|
||||
Total of layers to be used by the encoder.
|
||||
encoder_ffn_dim (`int`, *optional*, defaults to 1024):
|
||||
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
|
||||
encoder_attention_heads (`int`, *optional*, defaults to 8):
|
||||
Number of attention heads for each attention layer in the Transformer encoder.
|
||||
dropout (`float`, *optional*, defaults to 0.0):
|
||||
The ratio for all dropout layers.
|
||||
activation_dropout (`float`, *optional*, defaults to 0.0):
|
||||
The dropout ratio for activations inside the fully connected layer.
|
||||
encode_proj_layers (`List[int]`, *optional*, defaults to `[2]`):
|
||||
Indexes of the projected layers to be used in the encoder.
|
||||
positional_encoding_temperature (`int`, *optional*, defaults to 10000):
|
||||
The temperature parameter used to create the positional encodings.
|
||||
encoder_activation_function (`str`, *optional*, defaults to `"gelu"`):
|
||||
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
||||
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
||||
activation_function (`str`, *optional*, defaults to `"silu"`):
|
||||
The non-linear activation function (function or string) in the general layer. If string, `"gelu"`,
|
||||
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
||||
eval_size (`Tuple[int, int]`, *optional*):
|
||||
Height and width used to computes the effective height and width of the position embeddings after taking
|
||||
into account the stride.
|
||||
normalize_before (`bool`, *optional*, defaults to `False`):
|
||||
Determine whether to apply layer normalization in the transformer encoder layer before self-attention and
|
||||
feed-forward modules.
|
||||
hidden_expansion (`float`, *optional*, defaults to 1.0):
|
||||
Expansion ratio to enlarge the dimension size of RepVGGBlock and CSPRepLayer.
|
||||
d_model (`int`, *optional*, defaults to 256):
|
||||
Dimension of the layers exclude hybrid encoder.
|
||||
num_queries (`int`, *optional*, defaults to 300):
|
||||
Number of object queries.
|
||||
decoder_in_channels (`list`, *optional*, defaults to `[256, 256, 256]`):
|
||||
Multi level features dimension for decoder
|
||||
decoder_ffn_dim (`int`, *optional*, defaults to 1024):
|
||||
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
|
||||
num_feature_levels (`int`, *optional*, defaults to 3):
|
||||
The number of input feature levels.
|
||||
decoder_n_points (`int`, *optional*, defaults to 4):
|
||||
The number of sampled keys in each feature level for each attention head in the decoder.
|
||||
decoder_layers (`int`, *optional*, defaults to 6):
|
||||
Number of decoder layers.
|
||||
decoder_attention_heads (`int`, *optional*, defaults to 8):
|
||||
Number of attention heads for each attention layer in the Transformer decoder.
|
||||
decoder_activation_function (`str`, *optional*, defaults to `"relu"`):
|
||||
The non-linear activation function (function or string) in the decoder. If string, `"gelu"`,
|
||||
`"relu"`, `"silu"` and `"gelu_new"` are supported.
|
||||
attention_dropout (`float`, *optional*, defaults to 0.0):
|
||||
The dropout ratio for the attention probabilities.
|
||||
num_denoising (`int`, *optional*, defaults to 100):
|
||||
The total number of denoising tasks or queries to be used for contrastive denoising.
|
||||
label_noise_ratio (`float`, *optional*, defaults to 0.5):
|
||||
The fraction of denoising labels to which random noise should be added.
|
||||
box_noise_scale (`float`, *optional*, defaults to 1.0):
|
||||
Scale or magnitude of noise to be added to the bounding boxes.
|
||||
learn_initial_query (`bool`, *optional*, defaults to `False`):
|
||||
Indicates whether the initial query embeddings for the decoder should be learned during training
|
||||
anchor_image_size (`Tuple[int, int]`, *optional*):
|
||||
Height and width of the input image used during evaluation to generate the bounding box anchors. If None, automatic generate anchor is applied.
|
||||
with_box_refine (`bool`, *optional*, defaults to `True`):
|
||||
Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
|
||||
based on the predictions from the previous layer.
|
||||
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
|
||||
Whether the architecture has an encoder decoder structure.
|
||||
matcher_alpha (`float`, *optional*, defaults to 0.25):
|
||||
Parameter alpha used by the Hungarian Matcher.
|
||||
matcher_gamma (`float`, *optional*, defaults to 2.0):
|
||||
Parameter gamma used by the Hungarian Matcher.
|
||||
matcher_class_cost (`float`, *optional*, defaults to 2.0):
|
||||
The relative weight of the class loss used by the Hungarian Matcher.
|
||||
matcher_bbox_cost (`float`, *optional*, defaults to 5.0):
|
||||
The relative weight of the bounding box loss used by the Hungarian Matcher.
|
||||
matcher_giou_cost (`float`, *optional*, defaults to 2.0):
|
||||
The relative weight of the giou loss of used by the Hungarian Matcher.
|
||||
use_focal_loss (`bool`, *optional*, defaults to `True`):
|
||||
Parameter informing if focal focal should be used.
|
||||
auxiliary_loss (`bool`, *optional*, defaults to `True`):
|
||||
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
|
||||
focal_loss_alpha (`float`, *optional*, defaults to 0.75):
|
||||
Parameter alpha used to compute the focal loss.
|
||||
focal_loss_gamma (`float`, *optional*, defaults to 2.0):
|
||||
Parameter gamma used to compute the focal loss.
|
||||
weight_loss_vfl (`float`, *optional*, defaults to 1.0):
|
||||
Relative weight of the varifocal loss in the object detection loss.
|
||||
weight_loss_bbox (`float`, *optional*, defaults to 5.0):
|
||||
Relative weight of the L1 bounding box loss in the object detection loss.
|
||||
weight_loss_giou (`float`, *optional*, defaults to 2.0):
|
||||
Relative weight of the generalized IoU loss in the object detection loss.
|
||||
weight_loss_fgl (`float`, *optional*, defaults to 0.15):
|
||||
Relative weight of the fine-grained localization loss in the object detection loss.
|
||||
weight_loss_ddf (`float`, *optional*, defaults to 1.5):
|
||||
Relative weight of the decoupled distillation focal loss in the object detection loss.
|
||||
eos_coefficient (`float`, *optional*, defaults to 0.0001):
|
||||
Relative classification weight of the 'no-object' class in the object detection loss.
|
||||
eval_idx (`int`, *optional*, defaults to -1):
|
||||
Index of the decoder layer to use for evaluation. If negative, counts from the end
|
||||
(e.g., -1 means use the last layer). This allows for early prediction in the decoder
|
||||
stack while still training later layers.
|
||||
layer_scale (`float`, *optional*, defaults to `1.0`):
|
||||
Scaling factor for the hidden dimension in later decoder layers. Used to adjust the
|
||||
model capacity after the evaluation layer.
|
||||
max_num_bins (`int`, *optional*, defaults to 32):
|
||||
Maximum number of bins for the distribution-guided bounding box refinement.
|
||||
Higher values allow for more fine-grained localization but increase computation.
|
||||
reg_scale (`float`, *optional*, defaults to 4.0):
|
||||
Scale factor for the regression distribution. Controls the range and granularity
|
||||
of the bounding box refinement process.
|
||||
depth_mult (`float`, *optional*, defaults to 1.0):
|
||||
Multiplier for the number of blocks in RepNCSPELAN4 layers. Used to scale the model's
|
||||
depth while maintaining its architecture.
|
||||
top_prob_values (`int`, *optional*, defaults to 4):
|
||||
Number of top probability values to consider from each corner's distribution.
|
||||
lqe_hidden_dim (`int`, *optional*, defaults to 64):
|
||||
Hidden dimension size for the Location Quality Estimator (LQE) network.
|
||||
lqe_layers (`int`, *optional*, defaults to 2):
|
||||
Number of layers in the Location Quality Estimator MLP.
|
||||
decoder_offset_scale (`float`, *optional*, defaults to 0.5):
|
||||
Offset scale used in deformable attention.
|
||||
decoder_method (`str`, *optional*, defaults to `"default"`):
|
||||
The method to use for the decoder: `"default"` or `"discrete"`.
|
||||
up (`float`, *optional*, defaults to 0.5):
|
||||
Controls the upper bounds of the Weighting Function.
|
||||
"""
|
||||
|
||||
model_type = "deim"
|
||||
layer_types = ["basic", "bottleneck"]
|
||||
attribute_map = {
|
||||
"hidden_size": "d_model",
|
||||
"num_attention_heads": "encoder_attention_heads",
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
initializer_range=0.01,
|
||||
initializer_bias_prior_prob=None,
|
||||
layer_norm_eps=1e-5,
|
||||
batch_norm_eps=1e-5,
|
||||
# backbone
|
||||
backbone_config=None,
|
||||
backbone=None,
|
||||
use_pretrained_backbone=False,
|
||||
use_timm_backbone=False,
|
||||
freeze_backbone_batch_norms=True,
|
||||
backbone_kwargs=None,
|
||||
# encoder HybridEncoder
|
||||
encoder_hidden_dim=256,
|
||||
encoder_in_channels=[512, 1024, 2048],
|
||||
feat_strides=[8, 16, 32],
|
||||
encoder_layers=1,
|
||||
encoder_ffn_dim=1024,
|
||||
encoder_attention_heads=8,
|
||||
dropout=0.0,
|
||||
activation_dropout=0.0,
|
||||
encode_proj_layers=[2],
|
||||
positional_encoding_temperature=10000,
|
||||
encoder_activation_function="gelu",
|
||||
activation_function="silu",
|
||||
eval_size=None,
|
||||
normalize_before=False,
|
||||
hidden_expansion=1.0,
|
||||
# decoder DeimTransformer
|
||||
d_model=256,
|
||||
num_queries=300,
|
||||
decoder_in_channels=[256, 256, 256],
|
||||
decoder_ffn_dim=1024,
|
||||
num_feature_levels=3,
|
||||
decoder_n_points=4,
|
||||
decoder_layers=6,
|
||||
decoder_attention_heads=8,
|
||||
decoder_activation_function="relu",
|
||||
attention_dropout=0.0,
|
||||
num_denoising=100,
|
||||
label_noise_ratio=0.5,
|
||||
box_noise_scale=1.0,
|
||||
learn_initial_query=False,
|
||||
anchor_image_size=None,
|
||||
with_box_refine=True,
|
||||
is_encoder_decoder=True,
|
||||
# Loss
|
||||
matcher_alpha=0.25,
|
||||
matcher_gamma=2.0,
|
||||
matcher_class_cost=2.0,
|
||||
matcher_bbox_cost=5.0,
|
||||
matcher_giou_cost=2.0,
|
||||
use_focal_loss=True,
|
||||
auxiliary_loss=True,
|
||||
focal_loss_alpha=0.75,
|
||||
focal_loss_gamma=2.0,
|
||||
weight_loss_vfl=1.0,
|
||||
weight_loss_bbox=5.0,
|
||||
weight_loss_giou=2.0,
|
||||
weight_loss_fgl=0.15,
|
||||
weight_loss_ddf=1.5,
|
||||
eos_coefficient=1e-4,
|
||||
eval_idx=-1,
|
||||
layer_scale=1,
|
||||
max_num_bins=32,
|
||||
reg_scale=4.0,
|
||||
depth_mult=1.0,
|
||||
top_prob_values=4,
|
||||
lqe_hidden_dim=64,
|
||||
lqe_layers=2,
|
||||
decoder_offset_scale=0.5,
|
||||
decoder_method="default",
|
||||
up=0.5,
|
||||
**kwargs,
|
||||
):
|
||||
self.initializer_range = initializer_range
|
||||
self.initializer_bias_prior_prob = initializer_bias_prior_prob
|
||||
self.layer_norm_eps = layer_norm_eps
|
||||
self.batch_norm_eps = batch_norm_eps
|
||||
# backbone
|
||||
if backbone_config is None and backbone is None:
|
||||
logger.info(
|
||||
"`backbone_config` and `backbone` are `None`. Initializing the config with the default `HGNet-V2` backbone."
|
||||
)
|
||||
backbone_model_type = "hgnet_v2"
|
||||
config_class = CONFIG_MAPPING[backbone_model_type]
|
||||
# this will map it to RTDetrResNetConfig
|
||||
# note: we can instead create HGNetV2Config
|
||||
# and we would need to create HGNetV2Backbone
|
||||
backbone_config = config_class(
|
||||
num_channels=3,
|
||||
embedding_size=64,
|
||||
hidden_sizes=[256, 512, 1024, 2048],
|
||||
depths=[3, 4, 6, 3],
|
||||
layer_type="bottleneck",
|
||||
hidden_act="relu",
|
||||
downsample_in_first_stage=False,
|
||||
downsample_in_bottleneck=False,
|
||||
out_features=None,
|
||||
out_indices=[2, 3, 4],
|
||||
)
|
||||
elif isinstance(backbone_config, dict):
|
||||
backbone_model_type = backbone_config.pop("model_type")
|
||||
config_class = CONFIG_MAPPING[backbone_model_type]
|
||||
backbone_config = config_class.from_dict(backbone_config)
|
||||
|
||||
verify_backbone_config_arguments(
|
||||
use_timm_backbone=use_timm_backbone,
|
||||
use_pretrained_backbone=use_pretrained_backbone,
|
||||
backbone=backbone,
|
||||
backbone_config=backbone_config,
|
||||
backbone_kwargs=backbone_kwargs,
|
||||
)
|
||||
|
||||
self.backbone_config = backbone_config
|
||||
self.backbone = backbone
|
||||
self.use_pretrained_backbone = use_pretrained_backbone
|
||||
self.use_timm_backbone = use_timm_backbone
|
||||
self.freeze_backbone_batch_norms = freeze_backbone_batch_norms
|
||||
self.backbone_kwargs = backbone_kwargs
|
||||
# encoder
|
||||
self.encoder_hidden_dim = encoder_hidden_dim
|
||||
self.encoder_in_channels = encoder_in_channels
|
||||
self.feat_strides = feat_strides
|
||||
self.encoder_attention_heads = encoder_attention_heads
|
||||
self.encoder_ffn_dim = encoder_ffn_dim
|
||||
self.dropout = dropout
|
||||
self.activation_dropout = activation_dropout
|
||||
self.encode_proj_layers = encode_proj_layers
|
||||
self.encoder_layers = encoder_layers
|
||||
self.positional_encoding_temperature = positional_encoding_temperature
|
||||
self.eval_size = eval_size
|
||||
self.normalize_before = normalize_before
|
||||
self.encoder_activation_function = encoder_activation_function
|
||||
self.activation_function = activation_function
|
||||
self.hidden_expansion = hidden_expansion
|
||||
# decoder
|
||||
self.d_model = d_model
|
||||
self.num_queries = num_queries
|
||||
self.decoder_ffn_dim = decoder_ffn_dim
|
||||
self.decoder_in_channels = decoder_in_channels
|
||||
self.num_feature_levels = num_feature_levels
|
||||
self.decoder_n_points = decoder_n_points
|
||||
self.decoder_layers = decoder_layers
|
||||
self.decoder_attention_heads = decoder_attention_heads
|
||||
self.decoder_activation_function = decoder_activation_function
|
||||
self.attention_dropout = attention_dropout
|
||||
self.num_denoising = num_denoising
|
||||
self.label_noise_ratio = label_noise_ratio
|
||||
self.box_noise_scale = box_noise_scale
|
||||
self.learn_initial_query = learn_initial_query
|
||||
self.anchor_image_size = anchor_image_size
|
||||
self.auxiliary_loss = auxiliary_loss
|
||||
self.with_box_refine = with_box_refine
|
||||
# Loss
|
||||
self.matcher_alpha = matcher_alpha
|
||||
self.matcher_gamma = matcher_gamma
|
||||
self.matcher_class_cost = matcher_class_cost
|
||||
self.matcher_bbox_cost = matcher_bbox_cost
|
||||
self.matcher_giou_cost = matcher_giou_cost
|
||||
self.use_focal_loss = use_focal_loss
|
||||
self.focal_loss_alpha = focal_loss_alpha
|
||||
self.focal_loss_gamma = focal_loss_gamma
|
||||
self.weight_loss_vfl = weight_loss_vfl
|
||||
self.weight_loss_bbox = weight_loss_bbox
|
||||
self.weight_loss_giou = weight_loss_giou
|
||||
self.weight_loss_fgl = weight_loss_fgl
|
||||
self.weight_loss_ddf = weight_loss_ddf
|
||||
self.eos_coefficient = eos_coefficient
|
||||
# add the new attributes with the given values or defaults
|
||||
self.eval_idx = eval_idx
|
||||
self.layer_scale = layer_scale
|
||||
self.max_num_bins = max_num_bins
|
||||
self.reg_scale = reg_scale
|
||||
self.depth_mult = depth_mult
|
||||
self.decoder_offset_scale = decoder_offset_scale
|
||||
self.decoder_method = decoder_method
|
||||
self.top_prob_values = top_prob_values
|
||||
self.lqe_hidden_dim = lqe_hidden_dim
|
||||
self.lqe_layers = lqe_layers
|
||||
self.up = up
|
||||
|
||||
if isinstance(self.decoder_n_points, list):
|
||||
if len(self.decoder_n_points) != self.num_feature_levels:
|
||||
raise ValueError(
|
||||
f"Length of decoder_n_points list ({len(self.decoder_n_points)}) must match num_feature_levels ({self.num_feature_levels})."
|
||||
)
|
||||
|
||||
head_dim = self.d_model // self.decoder_attention_heads
|
||||
if head_dim * self.decoder_attention_heads != self.d_model:
|
||||
raise ValueError(
|
||||
f"Embedded dimension {self.d_model} must be divisible by decoder_attention_heads {self.decoder_attention_heads}"
|
||||
)
|
||||
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
|
||||
|
||||
@property
|
||||
def num_attention_heads(self) -> int:
|
||||
return self.encoder_attention_heads
|
||||
|
||||
@property
|
||||
def hidden_size(self) -> int:
|
||||
return self.d_model
|
||||
|
||||
@classmethod
|
||||
def from_backbone_configs(cls, backbone_config: PretrainedConfig, **kwargs):
|
||||
"""Instantiate a [`DeimConfig`] (or a derived class) from a pre-trained backbone model configuration and DETR model
|
||||
configuration.
|
||||
|
||||
Args:
|
||||
backbone_config ([`PretrainedConfig`]):
|
||||
The backbone configuration.
|
||||
|
||||
Returns:
|
||||
[`DeimConfig`]: An instance of a configuration object
|
||||
"""
|
||||
return cls(
|
||||
backbone_config=backbone_config,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
__all__ = ["DeimConfig"]
|
@ -0,0 +1,688 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2025 The HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
import requests
|
||||
import torch
|
||||
from huggingface_hub import hf_hub_download
|
||||
from PIL import Image
|
||||
from torchvision import transforms
|
||||
|
||||
from transformers import DeimConfig, DeimForObjectDetection, RTDetrImageProcessor
|
||||
from transformers.utils import logging
|
||||
|
||||
|
||||
logging.set_verbosity_info()
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
def get_deim_config(model_name: str) -> DeimConfig:
|
||||
config = DeimConfig()
|
||||
|
||||
config.num_labels = 80
|
||||
repo_id = "huggingface/label-files"
|
||||
filename = "object365-id2label.json" if "obj365" in model_name else "coco-detection-mmdet-id2label.json"
|
||||
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
|
||||
id2label = {int(k): v for k, v in id2label.items()}
|
||||
config.id2label = id2label
|
||||
config.label2id = {v: k for k, v in id2label.items()}
|
||||
|
||||
config.backbone_config.hidden_sizes = [64, 128, 256, 512]
|
||||
config.backbone_config.layer_type = "basic"
|
||||
config.backbone_config.embedding_size = 32
|
||||
config.hidden_expansion = 1.0
|
||||
config.decoder_layers = 6
|
||||
|
||||
if model_name in ["dfine_x_coco", "dfine_x_obj2coco", "dfine_x_obj365"]:
|
||||
config.backbone_config.hidden_sizes = [256, 512, 1024, 2048]
|
||||
config.backbone_config.stage_in_channels = [64, 128, 512, 1024]
|
||||
config.backbone_config.stage_mid_channels = [64, 128, 256, 512]
|
||||
config.backbone_config.stage_out_channels = [128, 512, 1024, 2048]
|
||||
config.backbone_config.stage_num_blocks = [1, 2, 5, 2]
|
||||
config.backbone_config.stage_downsample = [False, True, True, True]
|
||||
config.backbone_config.stage_light_block = [False, False, True, True]
|
||||
config.backbone_config.stage_kernel_size = [3, 3, 5, 5]
|
||||
config.backbone_config.stage_numb_of_layers = [6, 6, 6, 6]
|
||||
config.backbone_config.stem_channels = [3, 32, 64]
|
||||
config.encoder_in_channels = [512, 1024, 2048]
|
||||
config.encoder_hidden_dim = 384
|
||||
config.encoder_ffn_dim = 2048
|
||||
config.decoder_n_points = [3, 6, 3]
|
||||
config.decoder_in_channels = [384, 384, 384]
|
||||
if model_name == "dfine_x_obj365":
|
||||
config.num_labels = 366
|
||||
elif model_name in ["dfine_m_coco", "dfine_m_obj2coco", "dfine_m_obj365"]:
|
||||
config.backbone_config.hidden_sizes = [192, 384, 768, 1536]
|
||||
config.backbone_config.stem_channels = [3, 24, 32]
|
||||
config.backbone_config.stage_in_channels = [32, 96, 384, 768]
|
||||
config.backbone_config.stage_mid_channels = [32, 64, 128, 256]
|
||||
config.backbone_config.stage_out_channels = [96, 384, 768, 1536]
|
||||
config.backbone_config.stage_num_blocks = [1, 1, 3, 1]
|
||||
config.backbone_config.stage_downsample = [False, True, True, True]
|
||||
config.backbone_config.stage_light_block = [False, False, True, True]
|
||||
config.backbone_config.stage_kernel_size = [3, 3, 5, 5]
|
||||
config.backbone_config.stage_numb_of_layers = [4, 4, 4, 4]
|
||||
config.decoder_layers = 4
|
||||
config.decoder_n_points = [3, 6, 3]
|
||||
config.encoder_in_channels = [384, 768, 1536]
|
||||
config.backbone_config.use_learnable_affine_block = True
|
||||
config.depth_mult = 0.67
|
||||
if model_name == "dfine_m_obj365":
|
||||
config.num_labels = 366
|
||||
elif model_name in ["dfine_l_coco", "dfine_l_obj2coco_e25", "dfine_l_obj365"]:
|
||||
config.backbone_config.hidden_sizes = [256, 512, 1024, 2048]
|
||||
config.backbone_config.stem_channels = [3, 32, 48]
|
||||
config.backbone_config.stage_in_channels = [48, 128, 512, 1024]
|
||||
config.backbone_config.stage_mid_channels = [48, 96, 192, 384]
|
||||
config.backbone_config.stage_out_channels = [128, 512, 1024, 2048]
|
||||
config.backbone_config.stage_num_blocks = [1, 1, 3, 1]
|
||||
config.backbone_config.stage_downsample = [False, True, True, True]
|
||||
config.backbone_config.stage_light_block = [False, False, True, True]
|
||||
config.backbone_config.stage_kernel_size = [3, 3, 5, 5]
|
||||
config.backbone_config.stage_numb_of_layers = [6, 6, 6, 6]
|
||||
config.encoder_ffn_dim = 1024
|
||||
config.encoder_in_channels = [512, 1024, 2048]
|
||||
config.decoder_n_points = [3, 6, 3]
|
||||
if model_name == "dfine_l_obj365":
|
||||
config.num_labels = 366
|
||||
elif model_name in ["dfine_n_coco", "dfine_n_obj2coco_e25", "dfine_n_obj365"]:
|
||||
config.backbone_config.hidden_sizes = [128, 256, 512, 1024]
|
||||
config.backbone_config.stem_channels = [3, 16, 16]
|
||||
config.backbone_config.stage_in_channels = [16, 64, 256, 512]
|
||||
config.backbone_config.stage_mid_channels = [16, 32, 64, 128]
|
||||
config.backbone_config.stage_out_channels = [64, 256, 512, 1024]
|
||||
config.backbone_config.stage_num_blocks = [1, 1, 2, 1]
|
||||
config.backbone_config.stage_downsample = [False, True, True, True]
|
||||
config.backbone_config.stage_light_block = [False, False, True, True]
|
||||
config.backbone_config.stage_kernel_size = [3, 3, 5, 5]
|
||||
config.backbone_config.stage_numb_of_layers = [3, 3, 3, 3]
|
||||
config.backbone_config.out_indices = [3, 4]
|
||||
config.backbone_config.use_learnable_affine_block = True
|
||||
config.num_feature_levels = 2
|
||||
config.encoder_ffn_dim = 512
|
||||
config.encode_proj_layers = [1]
|
||||
config.d_model = 128
|
||||
config.encoder_hidden_dim = 128
|
||||
config.decoder_ffn_dim = 512
|
||||
config.encoder_in_channels = [512, 1024]
|
||||
config.decoder_n_points = [6, 6]
|
||||
config.decoder_in_channels = [128, 128]
|
||||
config.feat_strides = [16, 32]
|
||||
config.depth_mult = 0.5
|
||||
config.decoder_layers = 3
|
||||
config.hidden_expansion = 0.34
|
||||
if model_name == "dfine_n_obj365":
|
||||
config.num_labels = 366
|
||||
else:
|
||||
config.backbone_config.hidden_sizes = [128, 256, 512, 1024]
|
||||
config.backbone_config.stem_channels = [3, 16, 16]
|
||||
config.backbone_config.stage_in_channels = [16, 64, 256, 512]
|
||||
config.backbone_config.stage_mid_channels = [16, 32, 64, 128]
|
||||
config.backbone_config.stage_out_channels = [64, 256, 512, 1024]
|
||||
config.backbone_config.stage_num_blocks = [1, 1, 2, 1]
|
||||
config.backbone_config.stage_downsample = [False, True, True, True]
|
||||
config.backbone_config.stage_light_block = [False, False, True, True]
|
||||
config.backbone_config.stage_kernel_size = [3, 3, 5, 5]
|
||||
config.backbone_config.stage_numb_of_layers = [3, 3, 3, 3]
|
||||
config.decoder_layers = 3
|
||||
config.hidden_expansion = 0.5
|
||||
config.depth_mult = 0.34
|
||||
config.decoder_n_points = [3, 6, 3]
|
||||
config.encoder_in_channels = [256, 512, 1024]
|
||||
config.backbone_config.use_learnable_affine_block = True
|
||||
if model_name == "dfine_s_obj365":
|
||||
config.num_labels = 366
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def load_original_state_dict(repo_id, model_name):
|
||||
directory_path = hf_hub_download(repo_id=repo_id, filename=f"{model_name}.pth")
|
||||
|
||||
original_state_dict = {}
|
||||
model = torch.load(directory_path, map_location="cpu")["model"]
|
||||
for key in model.keys():
|
||||
original_state_dict[key] = model[key]
|
||||
|
||||
return original_state_dict
|
||||
|
||||
|
||||
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
|
||||
# Decoder base mappings
|
||||
r"decoder.valid_mask": r"model.decoder.valid_mask",
|
||||
r"decoder.anchors": r"model.decoder.anchors",
|
||||
r"decoder.up": r"model.decoder.up",
|
||||
r"decoder.reg_scale": r"model.decoder.reg_scale",
|
||||
# Backbone stem mappings - including stem2a and stem2b
|
||||
r"backbone.stem.stem1.conv.weight": r"model.backbone.model.embedder.stem1.convolution.weight",
|
||||
r"backbone.stem.stem2a.conv.weight": r"model.backbone.model.embedder.stem2a.convolution.weight",
|
||||
r"backbone.stem.stem2b.conv.weight": r"model.backbone.model.embedder.stem2b.convolution.weight",
|
||||
r"backbone.stem.stem3.conv.weight": r"model.backbone.model.embedder.stem3.convolution.weight",
|
||||
r"backbone.stem.stem4.conv.weight": r"model.backbone.model.embedder.stem4.convolution.weight",
|
||||
# Stem normalization
|
||||
r"backbone.stem.stem1.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.embedder.stem1.normalization.\1",
|
||||
r"backbone.stem.stem2a.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.embedder.stem2a.normalization.\1",
|
||||
r"backbone.stem.stem2b.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.embedder.stem2b.normalization.\1",
|
||||
r"backbone.stem.stem3.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.embedder.stem3.normalization.\1",
|
||||
r"backbone.stem.stem4.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.embedder.stem4.normalization.\1",
|
||||
# Stem lab parameters - fixed with .lab in the path
|
||||
r"backbone.stem.stem1.lab.(scale|bias)": r"model.backbone.model.embedder.stem1.lab.\1",
|
||||
r"backbone.stem.stem2a.lab.(scale|bias)": r"model.backbone.model.embedder.stem2a.lab.\1",
|
||||
r"backbone.stem.stem2b.lab.(scale|bias)": r"model.backbone.model.embedder.stem2b.lab.\1",
|
||||
r"backbone.stem.stem3.lab.(scale|bias)": r"model.backbone.model.embedder.stem3.lab.\1",
|
||||
r"backbone.stem.stem4.lab.(scale|bias)": r"model.backbone.model.embedder.stem4.lab.\1",
|
||||
# Backbone stages mappings
|
||||
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).conv.weight": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.convolution.weight",
|
||||
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.normalization.\4",
|
||||
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).conv1.conv.weight": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.conv1.convolution.weight",
|
||||
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).conv2.conv.weight": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.conv2.convolution.weight",
|
||||
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).conv1.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.conv1.normalization.\4",
|
||||
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).conv2.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.conv2.normalization.\4",
|
||||
# Backbone stages aggregation
|
||||
r"backbone.stages.(\d+).blocks.(\d+).aggregation.0.conv.weight": r"model.backbone.model.encoder.stages.\1.blocks.\2.aggregation.0.convolution.weight",
|
||||
r"backbone.stages.(\d+).blocks.(\d+).aggregation.1.conv.weight": r"model.backbone.model.encoder.stages.\1.blocks.\2.aggregation.1.convolution.weight",
|
||||
r"backbone.stages.(\d+).blocks.(\d+).aggregation.0.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.blocks.\2.aggregation.0.normalization.\3",
|
||||
r"backbone.stages.(\d+).blocks.(\d+).aggregation.1.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.blocks.\2.aggregation.1.normalization.\3",
|
||||
# Backbone stages lab parameters for aggregation
|
||||
r"backbone.stages.(\d+).blocks.(\d+).aggregation.0.lab.(scale|bias)": r"model.backbone.model.encoder.stages.\1.blocks.\2.aggregation.0.lab.\3",
|
||||
r"backbone.stages.(\d+).blocks.(\d+).aggregation.1.lab.(scale|bias)": r"model.backbone.model.encoder.stages.\1.blocks.\2.aggregation.1.lab.\3",
|
||||
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).lab.(scale|bias)": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.lab.\4",
|
||||
# Conv1/Conv2 layers with lab
|
||||
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).conv1.lab.(scale|bias)": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.conv1.lab.\4",
|
||||
r"backbone.stages.(\d+).blocks.(\d+).layers.(\d+).conv2.lab.(scale|bias)": r"model.backbone.model.encoder.stages.\1.blocks.\2.layers.\3.conv2.lab.\4",
|
||||
# Downsample with lab
|
||||
r"backbone.stages.(\d+).downsample.lab.(scale|bias)": r"model.backbone.model.encoder.stages.\1.downsample.lab.\2",
|
||||
# Backbone downsample
|
||||
r"backbone.stages.(\d+).downsample.conv.weight": r"model.backbone.model.encoder.stages.\1.downsample.convolution.weight",
|
||||
r"backbone.stages.(\d+).downsample.bn.(weight|bias|running_mean|running_var)": r"model.backbone.model.encoder.stages.\1.downsample.normalization.\2",
|
||||
# Encoder mappings
|
||||
r"encoder.encoder.(\d+).layers.0.self_attn.out_proj.(weight|bias)": r"model.encoder.encoder.\1.layers.0.self_attn.out_proj.\2",
|
||||
r"encoder.encoder.(\d+).layers.0.linear1.(weight|bias)": r"model.encoder.encoder.\1.layers.0.fc1.\2",
|
||||
r"encoder.encoder.(\d+).layers.0.linear2.(weight|bias)": r"model.encoder.encoder.\1.layers.0.fc2.\2",
|
||||
r"encoder.encoder.(\d+).layers.0.norm1.(weight|bias)": r"model.encoder.encoder.\1.layers.0.self_attn_layer_norm.\2",
|
||||
r"encoder.encoder.(\d+).layers.0.norm2.(weight|bias)": r"model.encoder.encoder.\1.layers.0.final_layer_norm.\2",
|
||||
# Encoder projections and convolutions
|
||||
r"encoder.input_proj.(\d+).conv.weight": r"model.encoder_input_proj.\1.0.weight",
|
||||
r"encoder.input_proj.(\d+).norm.(weight|bias|running_mean|running_var)": r"model.encoder_input_proj.\1.1.\2",
|
||||
r"encoder.lateral_convs.(\d+).conv.weight": r"model.encoder.lateral_convs.\1.conv.weight",
|
||||
r"encoder.lateral_convs.(\d+).norm.(weight|bias|running_mean|running_var)": r"model.encoder.lateral_convs.\1.norm.\2",
|
||||
# FPN blocks - complete structure
|
||||
# Basic convolutions
|
||||
r"encoder.fpn_blocks.(\d+).cv1.conv.weight": r"model.encoder.fpn_blocks.\1.conv1.conv.weight",
|
||||
r"encoder.fpn_blocks.(\d+).cv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.conv1.norm.\2",
|
||||
# CSP Rep1 path
|
||||
r"encoder.fpn_blocks.(\d+).cv2.0.conv1.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep1.conv1.conv.weight",
|
||||
r"encoder.fpn_blocks.(\d+).cv2.0.conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep1.conv1.norm.\2",
|
||||
r"encoder.fpn_blocks.(\d+).cv2.0.conv2.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep1.conv2.conv.weight",
|
||||
r"encoder.fpn_blocks.(\d+).cv2.0.conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep1.conv2.norm.\2",
|
||||
r"encoder.fpn_blocks.(\d+).cv2.1.conv.weight": r"model.encoder.fpn_blocks.\1.conv2.conv.weight",
|
||||
r"encoder.fpn_blocks.(\d+).cv2.1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.conv2.norm.\2",
|
||||
# CSP Rep2 path
|
||||
r"encoder.fpn_blocks.(\d+).cv3.0.conv1.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep2.conv1.conv.weight",
|
||||
r"encoder.fpn_blocks.(\d+).cv3.0.conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep2.conv1.norm.\2",
|
||||
r"encoder.fpn_blocks.(\d+).cv3.0.conv2.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep2.conv2.conv.weight",
|
||||
r"encoder.fpn_blocks.(\d+).cv3.0.conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep2.conv2.norm.\2",
|
||||
r"encoder.fpn_blocks.(\d+).cv3.1.conv.weight": r"model.encoder.fpn_blocks.\1.conv3.conv.weight",
|
||||
r"encoder.fpn_blocks.(\d+).cv3.1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.conv3.norm.\2",
|
||||
# Final conv
|
||||
r"encoder.fpn_blocks.(\d+).cv4.conv.weight": r"model.encoder.fpn_blocks.\1.conv4.conv.weight",
|
||||
r"encoder.fpn_blocks.(\d+).cv4.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.conv4.norm.\2",
|
||||
# Bottlenecks for CSP Rep1
|
||||
r"encoder.fpn_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv1.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep1.bottlenecks.\2.conv1.conv.weight",
|
||||
r"encoder.fpn_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep1.bottlenecks.\2.conv1.norm.\3",
|
||||
r"encoder.fpn_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv2.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep1.bottlenecks.\2.conv2.conv.weight",
|
||||
r"encoder.fpn_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep1.bottlenecks.\2.conv2.norm.\3",
|
||||
# Bottlenecks for CSP Rep2
|
||||
r"encoder.fpn_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv1.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep2.bottlenecks.\2.conv1.conv.weight",
|
||||
r"encoder.fpn_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep2.bottlenecks.\2.conv1.norm.\3",
|
||||
r"encoder.fpn_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv2.conv.weight": r"model.encoder.fpn_blocks.\1.csp_rep2.bottlenecks.\2.conv2.conv.weight",
|
||||
r"encoder.fpn_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.fpn_blocks.\1.csp_rep2.bottlenecks.\2.conv2.norm.\3",
|
||||
# PAN blocks - complete structure
|
||||
# Basic convolutions
|
||||
r"encoder.pan_blocks.(\d+).cv1.conv.weight": r"model.encoder.pan_blocks.\1.conv1.conv.weight",
|
||||
r"encoder.pan_blocks.(\d+).cv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.conv1.norm.\2",
|
||||
# CSP Rep1 path
|
||||
r"encoder.pan_blocks.(\d+).cv2.0.conv1.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep1.conv1.conv.weight",
|
||||
r"encoder.pan_blocks.(\d+).cv2.0.conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep1.conv1.norm.\2",
|
||||
r"encoder.pan_blocks.(\d+).cv2.0.conv2.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep1.conv2.conv.weight",
|
||||
r"encoder.pan_blocks.(\d+).cv2.0.conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep1.conv2.norm.\2",
|
||||
r"encoder.pan_blocks.(\d+).cv2.1.conv.weight": r"model.encoder.pan_blocks.\1.conv2.conv.weight",
|
||||
r"encoder.pan_blocks.(\d+).cv2.1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.conv2.norm.\2",
|
||||
# CSP Rep2 path
|
||||
r"encoder.pan_blocks.(\d+).cv3.0.conv1.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep2.conv1.conv.weight",
|
||||
r"encoder.pan_blocks.(\d+).cv3.0.conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep2.conv1.norm.\2",
|
||||
r"encoder.pan_blocks.(\d+).cv3.0.conv2.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep2.conv2.conv.weight",
|
||||
r"encoder.pan_blocks.(\d+).cv3.0.conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep2.conv2.norm.\2",
|
||||
r"encoder.pan_blocks.(\d+).cv3.1.conv.weight": r"model.encoder.pan_blocks.\1.conv3.conv.weight",
|
||||
r"encoder.pan_blocks.(\d+).cv3.1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.conv3.norm.\2",
|
||||
# Final conv
|
||||
r"encoder.pan_blocks.(\d+).cv4.conv.weight": r"model.encoder.pan_blocks.\1.conv4.conv.weight",
|
||||
r"encoder.pan_blocks.(\d+).cv4.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.conv4.norm.\2",
|
||||
# Bottlenecks for CSP Rep1
|
||||
r"encoder.pan_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv1.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep1.bottlenecks.\2.conv1.conv.weight",
|
||||
r"encoder.pan_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep1.bottlenecks.\2.conv1.norm.\3",
|
||||
r"encoder.pan_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv2.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep1.bottlenecks.\2.conv2.conv.weight",
|
||||
r"encoder.pan_blocks.(\d+).cv2.0.bottlenecks.(\d+).conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep1.bottlenecks.\2.conv2.norm.\3",
|
||||
# Bottlenecks for CSP Rep2
|
||||
r"encoder.pan_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv1.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep2.bottlenecks.\2.conv1.conv.weight",
|
||||
r"encoder.pan_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv1.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep2.bottlenecks.\2.conv1.norm.\3",
|
||||
r"encoder.pan_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv2.conv.weight": r"model.encoder.pan_blocks.\1.csp_rep2.bottlenecks.\2.conv2.conv.weight",
|
||||
r"encoder.pan_blocks.(\d+).cv3.0.bottlenecks.(\d+).conv2.norm.(weight|bias|running_mean|running_var)": r"model.encoder.pan_blocks.\1.csp_rep2.bottlenecks.\2.conv2.norm.\3",
|
||||
# Downsample convolutions
|
||||
r"encoder.downsample_convs.(\d+).0.cv(\d+).conv.weight": r"model.encoder.downsample_convs.\1.conv\2.conv.weight",
|
||||
r"encoder.downsample_convs.(\d+).0.cv(\d+).norm.(weight|bias|running_mean|running_var)": r"model.encoder.downsample_convs.\1.conv\2.norm.\3",
|
||||
# Decoder layers
|
||||
r"decoder.decoder.layers.(\d+).self_attn.out_proj.(weight|bias)": r"model.decoder.layers.\1.self_attn.out_proj.\2",
|
||||
r"decoder.decoder.layers.(\d+).cross_attn.sampling_offsets.(weight|bias)": r"model.decoder.layers.\1.encoder_attn.sampling_offsets.\2",
|
||||
r"decoder.decoder.layers.(\d+).cross_attn.attention_weights.(weight|bias)": r"model.decoder.layers.\1.encoder_attn.attention_weights.\2",
|
||||
r"decoder.decoder.layers.(\d+).cross_attn.value_proj.(weight|bias)": r"model.decoder.layers.\1.encoder_attn.value_proj.\2",
|
||||
r"decoder.decoder.layers.(\d+).cross_attn.output_proj.(weight|bias)": r"model.decoder.layers.\1.encoder_attn.output_proj.\2",
|
||||
r"decoder.decoder.layers.(\d+).cross_attn.num_points_scale": r"model.decoder.layers.\1.encoder_attn.num_points_scale",
|
||||
r"decoder.decoder.layers.(\d+).gateway.gate.(weight|bias)": r"model.decoder.layers.\1.gateway.gate.\2",
|
||||
r"decoder.decoder.layers.(\d+).gateway.norm.(weight|bias)": r"model.decoder.layers.\1.gateway.norm.\2",
|
||||
r"decoder.decoder.layers.(\d+).norm1.(weight|bias)": r"model.decoder.layers.\1.self_attn_layer_norm.\2",
|
||||
r"decoder.decoder.layers.(\d+).norm2.(weight|bias)": r"model.decoder.layers.\1.encoder_attn_layer_norm.\2",
|
||||
r"decoder.decoder.layers.(\d+).norm3.(weight|bias)": r"model.decoder.layers.\1.final_layer_norm.\2",
|
||||
r"decoder.decoder.layers.(\d+).linear1.(weight|bias)": r"model.decoder.layers.\1.fc1.\2",
|
||||
r"decoder.decoder.layers.(\d+).linear2.(weight|bias)": r"model.decoder.layers.\1.fc2.\2",
|
||||
# LQE layers
|
||||
r"decoder.decoder.lqe_layers.(\d+).reg_conf.layers.(\d+).(weight|bias)": r"model.decoder.lqe_layers.\1.reg_conf.layers.\2.\3",
|
||||
# Decoder heads and projections
|
||||
r"decoder.dec_score_head.(\d+).(weight|bias)": r"model.decoder.class_embed.\1.\2",
|
||||
r"decoder.dec_bbox_head.(\d+).layers.(\d+).(weight|bias)": r"model.decoder.bbox_embed.\1.layers.\2.\3",
|
||||
r"decoder.pre_bbox_head.layers.(\d+).(weight|bias)": r"model.decoder.pre_bbox_head.layers.\1.\2",
|
||||
r"decoder.input_proj.(\d+).conv.weight": r"model.decoder_input_proj.\1.0.weight",
|
||||
r"decoder.input_proj.(\d+).norm.(weight|bias|running_mean|running_var)": r"model.decoder_input_proj.\1.1.\2",
|
||||
# Other decoder components
|
||||
r"decoder.denoising_class_embed.weight": r"model.denoising_class_embed.weight",
|
||||
r"decoder.query_pos_head.layers.(\d+).(weight|bias)": r"model.decoder.query_pos_head.layers.\1.\2",
|
||||
r"decoder.enc_output.proj.(weight|bias)": r"model.enc_output.0.\1",
|
||||
r"decoder.enc_output.norm.(weight|bias)": r"model.enc_output.1.\1",
|
||||
r"decoder.enc_score_head.(weight|bias)": r"model.enc_score_head.\1",
|
||||
r"decoder.enc_bbox_head.layers.(\d+).(weight|bias)": r"model.enc_bbox_head.layers.\1.\2",
|
||||
}
|
||||
|
||||
|
||||
def convert_old_keys_to_new_keys(state_dict_keys: dict = None):
|
||||
# Use the mapping to rename keys
|
||||
for original_key, converted_key in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
|
||||
for key in list(state_dict_keys.keys()):
|
||||
new_key = re.sub(original_key, converted_key, key)
|
||||
if new_key != key:
|
||||
state_dict_keys[new_key] = state_dict_keys.pop(key)
|
||||
|
||||
return state_dict_keys
|
||||
|
||||
|
||||
def read_in_q_k_v(state_dict, config, model_name):
|
||||
prefix = ""
|
||||
encoder_hidden_dim = config.encoder_hidden_dim
|
||||
|
||||
# first: transformer encoder
|
||||
for i in range(config.encoder_layers):
|
||||
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
|
||||
in_proj_weight = state_dict.pop(f"{prefix}encoder.encoder.{i}.layers.0.self_attn.in_proj_weight")
|
||||
in_proj_bias = state_dict.pop(f"{prefix}encoder.encoder.{i}.layers.0.self_attn.in_proj_bias")
|
||||
# next, add query, keys and values (in that order) to the state dict
|
||||
state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.q_proj.weight"] = in_proj_weight[
|
||||
:encoder_hidden_dim, :
|
||||
]
|
||||
state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.q_proj.bias"] = in_proj_bias[:encoder_hidden_dim]
|
||||
state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.k_proj.weight"] = in_proj_weight[
|
||||
encoder_hidden_dim : 2 * encoder_hidden_dim, :
|
||||
]
|
||||
state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.k_proj.bias"] = in_proj_bias[
|
||||
encoder_hidden_dim : 2 * encoder_hidden_dim
|
||||
]
|
||||
state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.v_proj.weight"] = in_proj_weight[
|
||||
-encoder_hidden_dim:, :
|
||||
]
|
||||
state_dict[f"model.encoder.encoder.{i}.layers.0.self_attn.v_proj.bias"] = in_proj_bias[-encoder_hidden_dim:]
|
||||
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
|
||||
for i in range(config.decoder_layers):
|
||||
# read in weights + bias of input projection layer of self-attention
|
||||
in_proj_weight = state_dict.pop(f"{prefix}decoder.decoder.layers.{i}.self_attn.in_proj_weight", None)
|
||||
in_proj_bias = state_dict.pop(f"{prefix}decoder.decoder.layers.{i}.self_attn.in_proj_bias", None)
|
||||
# next, add query, keys and values (in that order) to the state dict
|
||||
if model_name in ["dfine_n_coco", "dfine_n_obj2coco_e25", "dfine_n_obj365"]:
|
||||
state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:128, :]
|
||||
state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:128]
|
||||
state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:384, :]
|
||||
state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:384]
|
||||
state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-128:, :]
|
||||
state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-128:]
|
||||
else:
|
||||
state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
|
||||
state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
|
||||
state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
|
||||
state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
|
||||
state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
|
||||
state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
|
||||
|
||||
|
||||
# We will verify our results on an image of cute cats
|
||||
def prepare_img():
|
||||
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
im = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
return im
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def convert_deim_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub, repo_id):
|
||||
"""
|
||||
Copy/paste/tweak model's weights to our deim structure.
|
||||
"""
|
||||
|
||||
# load default config
|
||||
config = get_deim_config(model_name)
|
||||
state_dict = load_original_state_dict(repo_id, model_name)
|
||||
state_dict.pop("decoder.valid_mask", None)
|
||||
state_dict.pop("decoder.anchors", None)
|
||||
model = DeimForObjectDetection(config)
|
||||
logger.info(f"Converting model {model_name}...")
|
||||
|
||||
state_dict = convert_old_keys_to_new_keys(state_dict)
|
||||
state_dict.pop("decoder.model.decoder.up", None)
|
||||
state_dict.pop("decoder.model.decoder.reg_scale", None)
|
||||
|
||||
# query, key and value matrices need special treatment
|
||||
read_in_q_k_v(state_dict, config, model_name)
|
||||
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
|
||||
for key in state_dict.copy().keys():
|
||||
if key.endswith("num_batches_tracked"):
|
||||
del state_dict[key]
|
||||
# for two_stage
|
||||
if "bbox_embed" in key or ("class_embed" in key and "denoising_" not in key):
|
||||
state_dict[key.split("model.decoder.")[-1]] = state_dict[key]
|
||||
|
||||
# finally, create HuggingFace model and load state dict
|
||||
model.load_state_dict(state_dict)
|
||||
model.eval()
|
||||
|
||||
# load image processor
|
||||
image_processor = RTDetrImageProcessor()
|
||||
|
||||
# prepare image
|
||||
img = prepare_img()
|
||||
|
||||
# preprocess image
|
||||
transformations = transforms.Compose(
|
||||
[
|
||||
transforms.Resize([640, 640], interpolation=transforms.InterpolationMode.BILINEAR),
|
||||
transforms.ToTensor(),
|
||||
]
|
||||
)
|
||||
original_pixel_values = transformations(img).unsqueeze(0) # insert batch dimension
|
||||
|
||||
encoding = image_processor(images=img, return_tensors="pt")
|
||||
pixel_values = encoding["pixel_values"]
|
||||
|
||||
assert torch.allclose(original_pixel_values, pixel_values)
|
||||
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
model.to(device)
|
||||
pixel_values = pixel_values.to(device)
|
||||
|
||||
outputs = model(pixel_values)
|
||||
|
||||
if model_name == "dfine_x_coco":
|
||||
expected_slice_logits = torch.tensor(
|
||||
[
|
||||
[-4.844723, -4.7293096, -4.5971327],
|
||||
[-4.554266, -4.61723, -4.627926],
|
||||
[-4.3934402, -4.6064143, -4.139952],
|
||||
]
|
||||
)
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[
|
||||
[0.2565248, 0.5477609, 0.47644863],
|
||||
[0.7690029, 0.41423926, 0.46148556],
|
||||
[0.1688096, 0.19923759, 0.21118002],
|
||||
]
|
||||
)
|
||||
elif model_name == "dfine_x_obj2coco":
|
||||
expected_slice_logits = torch.tensor(
|
||||
[
|
||||
[-4.230433, -6.6295037, -4.8339615],
|
||||
[-4.085411, -6.3280816, -4.695468],
|
||||
[-3.8968022, -6.336813, -4.67051],
|
||||
]
|
||||
)
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[
|
||||
[0.25707328, 0.54842496, 0.47624254],
|
||||
[0.76967394, 0.41272867, 0.45970756],
|
||||
[0.16882066, 0.19918433, 0.2112098],
|
||||
]
|
||||
)
|
||||
elif model_name == "dfine_x_obj365":
|
||||
expected_slice_logits = torch.tensor(
|
||||
[
|
||||
[-6.3844957, -3.7549126, -4.6873264],
|
||||
[-5.8433194, -3.4490552, -3.3228905],
|
||||
[-6.5314736, -3.7856622, -4.895984],
|
||||
]
|
||||
)
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[
|
||||
[0.7703046, 0.41329497, 0.45932162],
|
||||
[0.16898105, 0.19876392, 0.21050783],
|
||||
[0.25134972, 0.5517619, 0.4864124],
|
||||
]
|
||||
)
|
||||
elif model_name == "dfine_m_coco":
|
||||
expected_slice_logits = torch.tensor(
|
||||
[
|
||||
[-4.5187078, -4.71708, -4.117749],
|
||||
[-4.513984, -4.937715, -3.829125],
|
||||
[-4.830042, -6.931682, -3.1740026],
|
||||
]
|
||||
)
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[
|
||||
[0.25851426, 0.5489963, 0.4757598],
|
||||
[0.769683, 0.41411665, 0.45988125],
|
||||
[0.16866133, 0.19921188, 0.21207744],
|
||||
]
|
||||
)
|
||||
elif model_name == "dfine_m_obj2coco":
|
||||
expected_slice_logits = torch.tensor(
|
||||
[
|
||||
[-4.520666, -7.6678333, -5.739887],
|
||||
[-4.5053635, -7.510611, -5.452532],
|
||||
[-4.70348, -5.6098466, -5.0199957],
|
||||
]
|
||||
)
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[
|
||||
[0.2567608, 0.5485795, 0.4767465],
|
||||
[0.77035284, 0.41236404, 0.4580645],
|
||||
[0.5498525, 0.27548885, 0.05886984],
|
||||
]
|
||||
)
|
||||
elif model_name == "dfine_m_obj365":
|
||||
expected_slice_logits = torch.tensor(
|
||||
[
|
||||
[-5.770525, -3.1610885, -5.2807794],
|
||||
[-5.7809954, -3.768266, -5.1146393],
|
||||
[-6.180705, -3.7357295, -3.1651964],
|
||||
]
|
||||
)
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[
|
||||
[0.2529114, 0.5526663, 0.48270613],
|
||||
[0.7712474, 0.41294736, 0.457174],
|
||||
[0.5497157, 0.27588123, 0.05813372],
|
||||
]
|
||||
)
|
||||
elif model_name == "dfine_l_coco":
|
||||
expected_slice_logits = torch.tensor(
|
||||
[
|
||||
[-4.068779, -5.169955, -4.339212],
|
||||
[-3.9461594, -5.0279613, -4.0161457],
|
||||
[-4.218292, -6.196324, -5.175245],
|
||||
]
|
||||
)
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[
|
||||
[0.2564867, 0.5489948, 0.4748876],
|
||||
[0.7693534, 0.4138953, 0.4598034],
|
||||
[0.16875696, 0.19875404, 0.21196914],
|
||||
]
|
||||
)
|
||||
elif model_name == "dfine_l_obj365":
|
||||
expected_slice_logits = torch.tensor(
|
||||
[
|
||||
[-5.7953215, -3.4901116, -5.4394145],
|
||||
[-5.7032104, -3.671125, -5.76121],
|
||||
[-6.09466, -3.1512096, -4.285499],
|
||||
]
|
||||
)
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[
|
||||
[0.7693825, 0.41265628, 0.4606362],
|
||||
[0.25306237, 0.55187637, 0.4832178],
|
||||
[0.16892478, 0.19880727, 0.21115331],
|
||||
]
|
||||
)
|
||||
elif model_name == "dfine_l_obj2coco_e25":
|
||||
expected_slice_logits = torch.tensor(
|
||||
[
|
||||
[-3.6098495, -6.633563, -5.1227236],
|
||||
[-3.682696, -6.9178205, -5.414557],
|
||||
[-4.491674, -6.0823426, -4.5718226],
|
||||
]
|
||||
)
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[
|
||||
[0.7697078, 0.41368833, 0.45879585],
|
||||
[0.2573691, 0.54856044, 0.47715297],
|
||||
[0.16895264, 0.19871138, 0.2115552],
|
||||
]
|
||||
)
|
||||
elif model_name == "dfine_n_coco":
|
||||
expected_slice_logits = torch.tensor(
|
||||
[
|
||||
[-3.7827945, -5.0889463, -4.8341026],
|
||||
[-5.3046904, -6.2801714, -2.9276395],
|
||||
[-4.497901, -5.2670407, -6.2380104],
|
||||
]
|
||||
)
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[
|
||||
[0.73334837, 0.4270624, 0.39424777],
|
||||
[0.1680235, 0.1988639, 0.21031213],
|
||||
[0.25370035, 0.5534435, 0.48496848],
|
||||
]
|
||||
)
|
||||
elif model_name == "dfine_s_coco":
|
||||
expected_slice_logits = torch.tensor(
|
||||
[
|
||||
[-3.8097816, -4.7724586, -5.994499],
|
||||
[-5.2974715, -9.499067, -6.1653666],
|
||||
[-5.3502765, -3.9530406, -6.3630295],
|
||||
]
|
||||
)
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[
|
||||
[0.7677696, 0.41479152, 0.46441072],
|
||||
[0.16912134, 0.19869131, 0.2123824],
|
||||
[0.2581653, 0.54818195, 0.47512347],
|
||||
]
|
||||
)
|
||||
elif model_name == "dfine_s_obj2coco":
|
||||
expected_slice_logits = torch.tensor(
|
||||
[
|
||||
[-6.0208125, -7.532673, -5.0572147],
|
||||
[-3.3595953, -9.057545, -6.376975],
|
||||
[-4.3203554, -9.546032, -6.075504],
|
||||
]
|
||||
)
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[
|
||||
[0.16901012, 0.19883151, 0.21121952],
|
||||
[0.76784194, 0.41266578, 0.46402973],
|
||||
[00.2563128, 0.54797643, 0.47937632],
|
||||
]
|
||||
)
|
||||
elif model_name == "dfine_s_obj365":
|
||||
expected_slice_logits = torch.tensor(
|
||||
[
|
||||
[-6.3807316, -4.320986, -6.4775343],
|
||||
[-6.5818424, -3.5009093, -5.75824],
|
||||
[-5.748005, -4.3228016, -4.003726],
|
||||
]
|
||||
)
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[
|
||||
[0.2532072, 0.5491191, 0.48222217],
|
||||
[0.76586807, 0.41175705, 0.46789962],
|
||||
[0.169111, 0.19844547, 0.21069047],
|
||||
]
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unknown deim_name: {model_name}")
|
||||
|
||||
assert torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits.to(outputs.logits.device), atol=1e-3)
|
||||
assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes.to(outputs.pred_boxes.device), atol=1e-4)
|
||||
|
||||
if pytorch_dump_folder_path is not None:
|
||||
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
|
||||
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
|
||||
model.save_pretrained(pytorch_dump_folder_path)
|
||||
print(f"Saving image processor to {pytorch_dump_folder_path}")
|
||||
image_processor.save_pretrained(pytorch_dump_folder_path)
|
||||
|
||||
if push_to_hub:
|
||||
# Upload model, image processor and config to the hub
|
||||
logger.info("Uploading PyTorch model and image processor to the hub...")
|
||||
config.push_to_hub(
|
||||
repo_id=repo_id,
|
||||
commit_message="Add config from convert_deim_original_pytorch_checkpoint_to_hf.py",
|
||||
)
|
||||
model.push_to_hub(
|
||||
repo_id=repo_id,
|
||||
commit_message="Add model from convert_deim_original_pytorch_checkpoint_to_hf.py",
|
||||
)
|
||||
image_processor.push_to_hub(
|
||||
repo_id=repo_id,
|
||||
commit_message="Add image processor from convert_deim_original_pytorch_checkpoint_to_hf.py",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--model_name",
|
||||
default="dfine_s_coco",
|
||||
type=str,
|
||||
help="model_name of the checkpoint you'd like to convert.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
|
||||
)
|
||||
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
|
||||
parser.add_argument(
|
||||
"--repo_id",
|
||||
type=str,
|
||||
help="repo_id where the model will be pushed to.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
convert_deim_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.repo_id)
|
2295
src/transformers/models/deim/modeling_deim.py
Normal file
2295
src/transformers/models/deim/modeling_deim.py
Normal file
File diff suppressed because it is too large
Load Diff
1207
src/transformers/models/deim/modular_deim.py
Normal file
1207
src/transformers/models/deim/modular_deim.py
Normal file
File diff suppressed because it is too large
Load Diff
0
tests/models/deim/__init__.py
Normal file
0
tests/models/deim/__init__.py
Normal file
805
tests/models/deim/test_modeling_deim.py
Normal file
805
tests/models/deim/test_modeling_deim.py
Normal file
@ -0,0 +1,805 @@
|
||||
# coding = utf-8
|
||||
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Testing suite for the PyTorch deim model."""
|
||||
|
||||
import inspect
|
||||
import math
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
from parameterized import parameterized
|
||||
|
||||
from transformers import (
|
||||
DeimConfig,
|
||||
HGNetV2Config,
|
||||
is_torch_available,
|
||||
is_vision_available,
|
||||
)
|
||||
from transformers.testing_utils import require_torch, require_torch_gpu, require_vision, slow, torch_device
|
||||
from transformers.utils import cached_property
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
from transformers import DeimForObjectDetection, DeimModel
|
||||
|
||||
if is_vision_available():
|
||||
from PIL import Image
|
||||
|
||||
from transformers import RTDetrImageProcessor
|
||||
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor
|
||||
from ...test_pipeline_mixin import PipelineTesterMixin
|
||||
|
||||
|
||||
CHECKPOINT = "ustc-community/dfine_s_coco"
|
||||
|
||||
|
||||
class DeimModelTester:
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=3,
|
||||
is_training=True,
|
||||
use_labels=True,
|
||||
n_targets=3,
|
||||
num_labels=10,
|
||||
initializer_range=0.02,
|
||||
layer_norm_eps=1e-5,
|
||||
batch_norm_eps=1e-5,
|
||||
# backbone
|
||||
backbone_config=None,
|
||||
# encoder HybridEncoder
|
||||
encoder_hidden_dim=32,
|
||||
encoder_in_channels=[128, 256, 512],
|
||||
feat_strides=[8, 16, 32],
|
||||
encoder_layers=1,
|
||||
encoder_ffn_dim=64,
|
||||
encoder_attention_heads=2,
|
||||
dropout=0.0,
|
||||
activation_dropout=0.0,
|
||||
encode_proj_layers=[2],
|
||||
positional_encoding_temperature=10000,
|
||||
encoder_activation_function="gelu",
|
||||
activation_function="silu",
|
||||
eval_size=None,
|
||||
normalize_before=False,
|
||||
# decoder DeimTransformer
|
||||
d_model=32,
|
||||
num_queries=30,
|
||||
decoder_in_channels=[32, 32, 32],
|
||||
decoder_ffn_dim=64,
|
||||
num_feature_levels=3,
|
||||
decoder_n_points=[3, 6, 3],
|
||||
decoder_n_levels=3,
|
||||
decoder_layers=2,
|
||||
decoder_attention_heads=2,
|
||||
decoder_activation_function="relu",
|
||||
attention_dropout=0.0,
|
||||
num_denoising=0,
|
||||
label_noise_ratio=0.5,
|
||||
box_noise_scale=1.0,
|
||||
learn_initial_query=False,
|
||||
anchor_image_size=None,
|
||||
image_size=64,
|
||||
disable_custom_kernels=True,
|
||||
with_box_refine=True,
|
||||
decoder_offset_scale=0.5,
|
||||
eval_idx=-1,
|
||||
layer_scale=1,
|
||||
reg_max=32,
|
||||
reg_scale=4.0,
|
||||
depth_mult=0.34,
|
||||
hidden_expansion=0.5,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.num_channels = 3
|
||||
self.is_training = is_training
|
||||
self.use_labels = use_labels
|
||||
self.n_targets = n_targets
|
||||
self.num_labels = num_labels
|
||||
self.initializer_range = initializer_range
|
||||
self.layer_norm_eps = layer_norm_eps
|
||||
self.batch_norm_eps = batch_norm_eps
|
||||
self.backbone_config = backbone_config
|
||||
self.encoder_hidden_dim = encoder_hidden_dim
|
||||
self.encoder_in_channels = encoder_in_channels
|
||||
self.feat_strides = feat_strides
|
||||
self.encoder_layers = encoder_layers
|
||||
self.encoder_ffn_dim = encoder_ffn_dim
|
||||
self.encoder_attention_heads = encoder_attention_heads
|
||||
self.dropout = dropout
|
||||
self.activation_dropout = activation_dropout
|
||||
self.encode_proj_layers = encode_proj_layers
|
||||
self.positional_encoding_temperature = positional_encoding_temperature
|
||||
self.encoder_activation_function = encoder_activation_function
|
||||
self.activation_function = activation_function
|
||||
self.eval_size = eval_size
|
||||
self.normalize_before = normalize_before
|
||||
self.d_model = d_model
|
||||
self.num_queries = num_queries
|
||||
self.decoder_in_channels = decoder_in_channels
|
||||
self.decoder_ffn_dim = decoder_ffn_dim
|
||||
self.num_feature_levels = num_feature_levels
|
||||
self.decoder_n_points = decoder_n_points
|
||||
self.decoder_n_levels = decoder_n_levels
|
||||
self.decoder_layers = decoder_layers
|
||||
self.decoder_attention_heads = decoder_attention_heads
|
||||
self.decoder_activation_function = decoder_activation_function
|
||||
self.attention_dropout = attention_dropout
|
||||
self.decoder_offset_scale = decoder_offset_scale
|
||||
self.eval_idx = eval_idx
|
||||
self.layer_scale = layer_scale
|
||||
self.reg_max = reg_max
|
||||
self.reg_scale = reg_scale
|
||||
self.depth_mult = depth_mult
|
||||
self.num_denoising = num_denoising
|
||||
self.label_noise_ratio = label_noise_ratio
|
||||
self.box_noise_scale = box_noise_scale
|
||||
self.learn_initial_query = learn_initial_query
|
||||
self.anchor_image_size = anchor_image_size
|
||||
self.image_size = image_size
|
||||
self.disable_custom_kernels = disable_custom_kernels
|
||||
self.with_box_refine = with_box_refine
|
||||
self.hidden_expansion = hidden_expansion
|
||||
|
||||
self.encoder_seq_length = math.ceil(self.image_size / 32) * math.ceil(self.image_size / 32)
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
|
||||
|
||||
pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device)
|
||||
|
||||
labels = None
|
||||
if self.use_labels:
|
||||
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
|
||||
labels = []
|
||||
for i in range(self.batch_size):
|
||||
target = {}
|
||||
target["class_labels"] = torch.randint(
|
||||
high=self.num_labels, size=(self.n_targets,), device=torch_device
|
||||
)
|
||||
target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device)
|
||||
labels.append(target)
|
||||
|
||||
config = self.get_config()
|
||||
config.num_labels = self.num_labels
|
||||
return config, pixel_values, pixel_mask, labels
|
||||
|
||||
def get_config(self):
|
||||
hidden_sizes = [64, 128, 256, 512]
|
||||
backbone_config = HGNetV2Config(
|
||||
stage_in_channels=[16, 64, 128, 256],
|
||||
stage_mid_channels=[16, 32, 64, 128],
|
||||
stage_out_channels=[64, 128, 256, 512],
|
||||
stage_num_blocks=[1, 1, 2, 1],
|
||||
stage_downsample=[False, True, True, True],
|
||||
stage_light_block=[False, False, True, True],
|
||||
stage_kernel_size=[3, 3, 5, 5],
|
||||
stage_numb_of_layers=[3, 3, 3, 3],
|
||||
embeddings_size=10,
|
||||
hidden_sizes=hidden_sizes,
|
||||
depths=[1, 1, 2, 1],
|
||||
out_features=["stage2", "stage3", "stage4"],
|
||||
out_indices=[2, 3, 4],
|
||||
stem_channels=[3, 16, 16],
|
||||
use_lab=True,
|
||||
)
|
||||
return DeimConfig.from_backbone_configs(
|
||||
backbone_config=backbone_config,
|
||||
encoder_hidden_dim=self.encoder_hidden_dim,
|
||||
encoder_in_channels=self.encoder_in_channels,
|
||||
feat_strides=self.feat_strides,
|
||||
encoder_layers=self.encoder_layers,
|
||||
encoder_ffn_dim=self.encoder_ffn_dim,
|
||||
encoder_attention_heads=self.encoder_attention_heads,
|
||||
dropout=self.dropout,
|
||||
activation_dropout=self.activation_dropout,
|
||||
encode_proj_layers=self.encode_proj_layers,
|
||||
positional_encoding_temperature=self.positional_encoding_temperature,
|
||||
encoder_activation_function=self.encoder_activation_function,
|
||||
activation_function=self.activation_function,
|
||||
eval_size=self.eval_size,
|
||||
normalize_before=self.normalize_before,
|
||||
d_model=self.d_model,
|
||||
num_queries=self.num_queries,
|
||||
decoder_in_channels=self.decoder_in_channels,
|
||||
decoder_ffn_dim=self.decoder_ffn_dim,
|
||||
num_feature_levels=self.num_feature_levels,
|
||||
decoder_n_points=self.decoder_n_points,
|
||||
decoder_n_levels=self.decoder_n_levels,
|
||||
decoder_layers=self.decoder_layers,
|
||||
decoder_attention_heads=self.decoder_attention_heads,
|
||||
decoder_activation_function=self.decoder_activation_function,
|
||||
decoder_offset_scale=self.decoder_offset_scale,
|
||||
eval_idx=self.eval_idx,
|
||||
layer_scale=self.layer_scale,
|
||||
reg_max=self.reg_max,
|
||||
reg_scale=self.reg_scale,
|
||||
depth_mult=self.depth_mult,
|
||||
attention_dropout=self.attention_dropout,
|
||||
num_denoising=self.num_denoising,
|
||||
label_noise_ratio=self.label_noise_ratio,
|
||||
box_noise_scale=self.box_noise_scale,
|
||||
learn_initial_query=self.learn_initial_query,
|
||||
anchor_image_size=self.anchor_image_size,
|
||||
image_size=self.image_size,
|
||||
disable_custom_kernels=self.disable_custom_kernels,
|
||||
with_box_refine=self.with_box_refine,
|
||||
)
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs()
|
||||
inputs_dict = {"pixel_values": pixel_values}
|
||||
return config, inputs_dict
|
||||
|
||||
def create_and_check_deim_model(self, config, pixel_values, pixel_mask, labels):
|
||||
model = DeimModel(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
|
||||
result = model(pixel_values)
|
||||
|
||||
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.d_model))
|
||||
|
||||
def create_and_check_deim_object_detection_head_model(self, config, pixel_values, pixel_mask, labels):
|
||||
model = DeimForObjectDetection(config=config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
|
||||
result = model(pixel_values)
|
||||
|
||||
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
|
||||
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
|
||||
|
||||
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels)
|
||||
|
||||
self.parent.assertEqual(result.loss.shape, ())
|
||||
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels))
|
||||
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
|
||||
|
||||
|
||||
@require_torch
|
||||
class DeimModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
all_model_classes = (DeimModel, DeimForObjectDetection) if is_torch_available() else ()
|
||||
is_encoder_decoder = True
|
||||
test_torchscript = False
|
||||
test_pruning = False
|
||||
test_head_masking = False
|
||||
test_missing_keys = False
|
||||
test_torch_exportable = True
|
||||
|
||||
# special case for head models
|
||||
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
|
||||
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
|
||||
|
||||
if return_labels:
|
||||
if model_class.__name__ == "DeimForObjectDetection":
|
||||
labels = []
|
||||
for i in range(self.model_tester.batch_size):
|
||||
target = {}
|
||||
target["class_labels"] = torch.ones(
|
||||
size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long
|
||||
)
|
||||
target["boxes"] = torch.ones(
|
||||
self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float
|
||||
)
|
||||
labels.append(target)
|
||||
inputs_dict["labels"] = labels
|
||||
|
||||
return inputs_dict
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = DeimModelTester(self)
|
||||
self.config_tester = ConfigTester(
|
||||
self,
|
||||
config_class=DeimConfig,
|
||||
has_text_modality=False,
|
||||
common_properties=["hidden_size", "num_attention_heads"],
|
||||
)
|
||||
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
def test_deim_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_deim_model(*config_and_inputs)
|
||||
|
||||
def test_deim_object_detection_head_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_deim_object_detection_head_model(*config_and_inputs)
|
||||
|
||||
@unittest.skip(reason="Deim doesn't work well with `nn.DataParallel")
|
||||
def test_multi_gpu_data_parallel_forward(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Deim does not use inputs_embeds")
|
||||
def test_inputs_embeds(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Deim does not use test_inputs_embeds_matches_input_ids")
|
||||
def test_inputs_embeds_matches_input_ids(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Deim does not support input and output embeddings")
|
||||
def test_model_get_set_embeddings(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Deim does not support input and output embeddings")
|
||||
def test_model_common_attributes(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Deim does not use token embeddings")
|
||||
def test_resize_tokens_embeddings(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Not relevant for the model")
|
||||
def test_can_init_all_missing_weights(self):
|
||||
pass
|
||||
|
||||
@unittest.skip(reason="Feed forward chunking is not implemented")
|
||||
def test_feed_forward_chunking(self):
|
||||
pass
|
||||
|
||||
def test_attention_outputs(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
config.return_dict = True
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
inputs_dict["output_attentions"] = True
|
||||
inputs_dict["output_hidden_states"] = False
|
||||
config.return_dict = True
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
attentions = outputs.encoder_attentions
|
||||
self.assertEqual(len(attentions), self.model_tester.encoder_layers)
|
||||
|
||||
# check that output_attentions also work using config
|
||||
del inputs_dict["output_attentions"]
|
||||
config.output_attentions = True
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
attentions = outputs.encoder_attentions
|
||||
self.assertEqual(len(attentions), self.model_tester.encoder_layers)
|
||||
|
||||
self.assertListEqual(
|
||||
list(attentions[0].shape[-3:]),
|
||||
[
|
||||
self.model_tester.encoder_attention_heads,
|
||||
self.model_tester.encoder_seq_length,
|
||||
self.model_tester.encoder_seq_length,
|
||||
],
|
||||
)
|
||||
out_len = len(outputs)
|
||||
|
||||
correct_outlen = 15
|
||||
|
||||
# loss is at first position
|
||||
if "labels" in inputs_dict:
|
||||
correct_outlen += 1 # loss is added to beginning
|
||||
# Object Detection model returns pred_logits and pred_boxes
|
||||
if model_class.__name__ == "DeimForObjectDetection":
|
||||
correct_outlen += 2
|
||||
|
||||
self.assertEqual(out_len, correct_outlen)
|
||||
|
||||
# decoder attentions
|
||||
decoder_attentions = outputs.decoder_attentions
|
||||
self.assertIsInstance(decoder_attentions, (list, tuple))
|
||||
self.assertEqual(len(decoder_attentions), self.model_tester.decoder_layers)
|
||||
self.assertListEqual(
|
||||
list(decoder_attentions[0].shape[-3:]),
|
||||
[
|
||||
self.model_tester.decoder_attention_heads,
|
||||
self.model_tester.num_queries,
|
||||
self.model_tester.num_queries,
|
||||
],
|
||||
)
|
||||
|
||||
# cross attentions
|
||||
cross_attentions = outputs.cross_attentions
|
||||
self.assertIsInstance(cross_attentions, (list, tuple))
|
||||
self.assertEqual(len(cross_attentions), self.model_tester.decoder_layers)
|
||||
self.assertListEqual(
|
||||
list(cross_attentions[0].shape[-3:]),
|
||||
[
|
||||
self.model_tester.num_queries,
|
||||
self.model_tester.decoder_attention_heads,
|
||||
self.model_tester.decoder_n_levels * self.model_tester.decoder_n_points
|
||||
if isinstance(self.model_tester.decoder_n_points, int)
|
||||
else sum(self.model_tester.decoder_n_points),
|
||||
],
|
||||
)
|
||||
|
||||
# Check attention is always last and order is fine
|
||||
inputs_dict["output_attentions"] = True
|
||||
inputs_dict["output_hidden_states"] = True
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
if hasattr(self.model_tester, "num_hidden_states_types"):
|
||||
added_hidden_states = self.model_tester.num_hidden_states_types
|
||||
else:
|
||||
# Deim should maintin encoder_hidden_states output
|
||||
added_hidden_states = 2
|
||||
self.assertEqual(out_len + added_hidden_states, len(outputs))
|
||||
|
||||
self_attentions = outputs.encoder_attentions
|
||||
|
||||
self.assertEqual(len(self_attentions), self.model_tester.encoder_layers)
|
||||
self.assertListEqual(
|
||||
list(self_attentions[0].shape[-3:]),
|
||||
[
|
||||
self.model_tester.encoder_attention_heads,
|
||||
self.model_tester.encoder_seq_length,
|
||||
self.model_tester.encoder_seq_length,
|
||||
],
|
||||
)
|
||||
|
||||
def test_hidden_states_output(self):
|
||||
def check_hidden_states_output(inputs_dict, config, model_class):
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
|
||||
|
||||
expected_num_layers = getattr(
|
||||
self.model_tester, "expected_num_hidden_layers", len(self.model_tester.encoder_in_channels) - 1
|
||||
)
|
||||
self.assertEqual(len(hidden_states), expected_num_layers)
|
||||
|
||||
self.assertListEqual(
|
||||
list(hidden_states[1].shape[-2:]),
|
||||
[
|
||||
self.model_tester.image_size // self.model_tester.feat_strides[-1],
|
||||
self.model_tester.image_size // self.model_tester.feat_strides[-1],
|
||||
],
|
||||
)
|
||||
|
||||
if config.is_encoder_decoder:
|
||||
hidden_states = outputs.decoder_hidden_states
|
||||
|
||||
expected_num_layers = getattr(
|
||||
self.model_tester, "expected_num_hidden_layers", self.model_tester.decoder_layers + 1
|
||||
)
|
||||
|
||||
self.assertIsInstance(hidden_states, (list, tuple))
|
||||
self.assertEqual(len(hidden_states), expected_num_layers)
|
||||
|
||||
self.assertListEqual(
|
||||
list(hidden_states[0].shape[-2:]),
|
||||
[self.model_tester.num_queries, self.model_tester.d_model],
|
||||
)
|
||||
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
inputs_dict["output_hidden_states"] = True
|
||||
check_hidden_states_output(inputs_dict, config, model_class)
|
||||
|
||||
# check that output_hidden_states also work using config
|
||||
del inputs_dict["output_hidden_states"]
|
||||
config.output_hidden_states = True
|
||||
|
||||
check_hidden_states_output(inputs_dict, config, model_class)
|
||||
|
||||
def test_retain_grad_hidden_states_attentions(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
config.output_hidden_states = True
|
||||
config.output_attentions = True
|
||||
|
||||
model_class = self.all_model_classes[0]
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
|
||||
inputs = self._prepare_for_class(inputs_dict, model_class)
|
||||
|
||||
outputs = model(**inputs)
|
||||
|
||||
# we take the first output since last_hidden_state is the first item
|
||||
output = outputs[0]
|
||||
|
||||
encoder_hidden_states = outputs.encoder_hidden_states[0]
|
||||
encoder_attentions = outputs.encoder_attentions[0]
|
||||
encoder_hidden_states.retain_grad()
|
||||
encoder_attentions.retain_grad()
|
||||
|
||||
decoder_attentions = outputs.decoder_attentions[0]
|
||||
decoder_attentions.retain_grad()
|
||||
|
||||
cross_attentions = outputs.cross_attentions[0]
|
||||
cross_attentions.retain_grad()
|
||||
|
||||
output.flatten()[0].backward(retain_graph=True)
|
||||
|
||||
self.assertIsNotNone(encoder_hidden_states.grad)
|
||||
self.assertIsNotNone(encoder_attentions.grad)
|
||||
self.assertIsNotNone(decoder_attentions.grad)
|
||||
self.assertIsNotNone(cross_attentions.grad)
|
||||
|
||||
def test_forward_signature(self):
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
signature = inspect.signature(model.forward)
|
||||
arg_names = [*signature.parameters.keys()]
|
||||
expected_arg_names = ["pixel_values"]
|
||||
self.assertListEqual(arg_names[:1], expected_arg_names)
|
||||
|
||||
def test_different_timm_backbone(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
# let's pick a random timm backbone
|
||||
config.encoder_in_channels = [24, 40, 432]
|
||||
config.backbone = "tf_mobilenetv3_small_075"
|
||||
config.backbone_config = None
|
||||
config.use_timm_backbone = True
|
||||
config.backbone_kwargs = {"out_indices": [2, 3, 4]}
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
if model_class.__name__ == "DeimForObjectDetection":
|
||||
expected_shape = (
|
||||
self.model_tester.batch_size,
|
||||
self.model_tester.num_queries,
|
||||
self.model_tester.num_labels,
|
||||
)
|
||||
self.assertEqual(outputs.logits.shape, expected_shape)
|
||||
# Confirm out_indices was propogated to backbone
|
||||
self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3)
|
||||
else:
|
||||
# Confirm out_indices was propogated to backbone
|
||||
self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3)
|
||||
|
||||
self.assertTrue(outputs)
|
||||
|
||||
def test_hf_backbone(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
# Load a pretrained HF checkpoint as backbone
|
||||
config.backbone = "microsoft/resnet-18"
|
||||
config.backbone_config = None
|
||||
config.use_timm_backbone = False
|
||||
config.use_pretrained_backbone = True
|
||||
config.backbone_kwargs = {"out_indices": [2, 3, 4]}
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
if model_class.__name__ == "DeimForObjectDetection":
|
||||
expected_shape = (
|
||||
self.model_tester.batch_size,
|
||||
self.model_tester.num_queries,
|
||||
self.model_tester.num_labels,
|
||||
)
|
||||
self.assertEqual(outputs.logits.shape, expected_shape)
|
||||
# Confirm out_indices was propogated to backbone
|
||||
self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3)
|
||||
else:
|
||||
# Confirm out_indices was propogated to backbone
|
||||
self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3)
|
||||
|
||||
self.assertTrue(outputs)
|
||||
|
||||
def test_initialization(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
configs_no_init = _config_zero_init(config)
|
||||
configs_no_init.initializer_bias_prior_prob = 0.2
|
||||
bias_value = -1.3863 # log_e ((1 - 0.2) / 0.2)
|
||||
|
||||
failed_cases = []
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config=configs_no_init)
|
||||
# Skip the check for the backbone
|
||||
for name, module in model.named_modules():
|
||||
if module.__class__.__name__ == "DeimConvEncoder":
|
||||
backbone_params = [f"{name}.{key}" for key in module.state_dict().keys()]
|
||||
break
|
||||
|
||||
for name, param in model.named_parameters():
|
||||
if param.requires_grad:
|
||||
if ("class_embed" in name and "bias" in name) or "enc_score_head.bias" in name:
|
||||
bias_tensor = torch.full_like(param.data, bias_value)
|
||||
try:
|
||||
torch.testing.assert_close(param.data, bias_tensor, atol=1e-4, rtol=1e-4)
|
||||
except AssertionError:
|
||||
failed_cases.append(
|
||||
f"Parameter {name} of model {model_class} seems not properly initialized. "
|
||||
f"Biases should be initialized to {bias_value}, got {param.data}"
|
||||
)
|
||||
elif (
|
||||
"level_embed" in name
|
||||
or "sampling_offsets.bias" in name
|
||||
or "value_proj" in name
|
||||
or "output_proj" in name
|
||||
or "reference_points" in name
|
||||
or "enc_score_head.weight" in name
|
||||
or ("class_embed" in name and "weight" in name)
|
||||
or name in backbone_params
|
||||
):
|
||||
continue
|
||||
else:
|
||||
mean = param.data.mean()
|
||||
round_mean = (mean * 1e9).round() / 1e9
|
||||
round_mean = round_mean.item()
|
||||
if round_mean not in [0.0, 1.0]:
|
||||
failed_cases.append(
|
||||
f"Parameter {name} of model {model_class} seems not properly initialized. "
|
||||
f"Mean is {round_mean}, but should be in [0, 1]"
|
||||
)
|
||||
|
||||
message = "\n" + "\n".join(failed_cases)
|
||||
self.assertTrue(not failed_cases, message)
|
||||
|
||||
@parameterized.expand(["float32", "float16", "bfloat16"])
|
||||
@require_torch_gpu
|
||||
@slow
|
||||
def test_inference_with_different_dtypes(self, torch_dtype_str):
|
||||
torch_dtype = {
|
||||
"float32": torch.float32,
|
||||
"float16": torch.float16,
|
||||
"bfloat16": torch.bfloat16,
|
||||
}[torch_dtype_str]
|
||||
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
model = model_class(config)
|
||||
model.to(torch_device).to(torch_dtype)
|
||||
model.eval()
|
||||
for key, tensor in inputs_dict.items():
|
||||
if tensor.dtype == torch.float32:
|
||||
inputs_dict[key] = tensor.to(torch_dtype)
|
||||
with torch.no_grad():
|
||||
_ = model(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
@parameterized.expand(["float32", "float16", "bfloat16"])
|
||||
@require_torch_gpu
|
||||
@slow
|
||||
def test_inference_equivalence_for_static_and_dynamic_anchors(self, torch_dtype_str):
|
||||
torch_dtype = {
|
||||
"float32": torch.float32,
|
||||
"float16": torch.float16,
|
||||
"bfloat16": torch.bfloat16,
|
||||
}[torch_dtype_str]
|
||||
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
h, w = inputs_dict["pixel_values"].shape[-2:]
|
||||
|
||||
# convert inputs to the desired dtype
|
||||
for key, tensor in inputs_dict.items():
|
||||
if tensor.dtype == torch.float32:
|
||||
inputs_dict[key] = tensor.to(torch_dtype)
|
||||
|
||||
for model_class in self.all_model_classes:
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
model_class(config).save_pretrained(tmpdirname)
|
||||
model_static = model_class.from_pretrained(
|
||||
tmpdirname, anchor_image_size=[h, w], device_map=torch_device, torch_dtype=torch_dtype
|
||||
).eval()
|
||||
model_dynamic = model_class.from_pretrained(
|
||||
tmpdirname, anchor_image_size=None, device_map=torch_device, torch_dtype=torch_dtype
|
||||
).eval()
|
||||
|
||||
self.assertIsNotNone(model_static.config.anchor_image_size)
|
||||
self.assertIsNone(model_dynamic.config.anchor_image_size)
|
||||
|
||||
with torch.no_grad():
|
||||
outputs_static = model_static(**self._prepare_for_class(inputs_dict, model_class))
|
||||
outputs_dynamic = model_dynamic(**self._prepare_for_class(inputs_dict, model_class))
|
||||
|
||||
torch.testing.assert_close(
|
||||
outputs_static.last_hidden_state, outputs_dynamic.last_hidden_state, rtol=1e-4, atol=1e-4
|
||||
)
|
||||
|
||||
|
||||
TOLERANCE = 1e-4
|
||||
|
||||
|
||||
# We will verify our results on an image of cute cats
|
||||
def prepare_img():
|
||||
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
|
||||
return image
|
||||
|
||||
|
||||
@require_torch
|
||||
@require_vision
|
||||
class DeimModelIntegrationTest(unittest.TestCase):
|
||||
@cached_property
|
||||
def default_image_processor(self):
|
||||
return RTDetrImageProcessor.from_pretrained(CHECKPOINT) if is_vision_available() else None
|
||||
|
||||
def test_inference_object_detection_head(self):
|
||||
model = DeimForObjectDetection.from_pretrained(CHECKPOINT).to(torch_device)
|
||||
|
||||
image_processor = self.default_image_processor
|
||||
image = prepare_img()
|
||||
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model(**inputs)
|
||||
|
||||
expected_shape_logits = torch.Size((1, 300, model.config.num_labels))
|
||||
self.assertEqual(outputs.logits.shape, expected_shape_logits)
|
||||
|
||||
expected_logits = torch.tensor(
|
||||
[
|
||||
[-3.8097816, -4.7724586, -5.994499],
|
||||
[-5.2974715, -9.499067, -6.1653666],
|
||||
[-5.3502765, -3.9530406, -6.3630295],
|
||||
]
|
||||
).to(torch_device)
|
||||
expected_boxes = torch.tensor(
|
||||
[
|
||||
[0.7677696, 0.41479152, 0.46441072],
|
||||
[0.16912134, 0.19869131, 0.2123824],
|
||||
[0.2581653, 0.54818195, 0.47512347],
|
||||
]
|
||||
).to(torch_device)
|
||||
|
||||
torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, atol=1e-4, rtol=1e-4)
|
||||
|
||||
expected_shape_boxes = torch.Size((1, 300, 4))
|
||||
self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes)
|
||||
torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4, rtol=1e-4)
|
||||
|
||||
# verify postprocessing
|
||||
results = image_processor.post_process_object_detection(
|
||||
outputs, threshold=0.0, target_sizes=[image.size[::-1]]
|
||||
)[0]
|
||||
expected_scores = torch.tensor([0.9642, 0.9542, 0.9536, 0.8548], device=torch_device)
|
||||
expected_labels = [15, 65, 15, 57]
|
||||
expected_slice_boxes = torch.tensor(
|
||||
[
|
||||
[1.3186283e01, 5.4130211e01, 3.1726535e02, 4.7212445e02],
|
||||
[4.0275269e01, 7.2975174e01, 1.7620003e02, 1.1776848e02],
|
||||
[3.4276117e02, 2.3427944e01, 6.3998401e02, 3.7477191e02],
|
||||
[5.8418274e-01, 1.1794567e00, 6.3933154e02, 4.7485995e02],
|
||||
],
|
||||
device=torch_device,
|
||||
)
|
||||
torch.testing.assert_close(results["scores"][:4], expected_scores, atol=1e-3, rtol=1e-4)
|
||||
self.assertSequenceEqual(results["labels"][:4].tolist(), expected_labels)
|
||||
torch.testing.assert_close(results["boxes"][:4], expected_slice_boxes[:4], atol=1e-3, rtol=1e-4)
|
Loading…
Reference in New Issue
Block a user