mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
Rename detr targets to labels (#12280)
* Rename target to labels in DetrFeatureExtractor * Update DetrFeatureExtractor tests accordingly * Improve docs of DetrFeatureExtractor * Improve docs * Make style
This commit is contained in:
parent
7682e97702
commit
1fc6817a30
@ -64,11 +64,6 @@ class DetrConfig(PretrainedConfig):
|
||||
The dropout ratio for the attention probabilities.
|
||||
activation_dropout (:obj:`float`, `optional`, defaults to 0.0):
|
||||
The dropout ratio for activations inside the fully connected layer.
|
||||
classifier_dropout (:obj:`float`, `optional`, defaults to 0.0):
|
||||
The dropout ratio for classifier.
|
||||
max_position_embeddings (:obj:`int`, `optional`, defaults to 1024):
|
||||
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
||||
just in case (e.g., 512 or 1024 or 2048).
|
||||
init_std (:obj:`float`, `optional`, defaults to 0.02):
|
||||
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
||||
init_xavier_std (:obj:`float`, `optional`, defaults to 1):
|
||||
@ -178,7 +173,6 @@ class DetrConfig(PretrainedConfig):
|
||||
self.init_xavier_std = init_xavier_std
|
||||
self.encoder_layerdrop = encoder_layerdrop
|
||||
self.decoder_layerdrop = decoder_layerdrop
|
||||
self.classifier_dropout = classifier_dropout
|
||||
self.num_hidden_layers = encoder_layers
|
||||
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
|
||||
self.auxiliary_loss = auxiliary_loss
|
||||
|
@ -440,7 +440,8 @@ class DetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
|
||||
annotations.
|
||||
|
||||
return_segmentation_masks (:obj:`Dict`, :obj:`List[Dict]`, `optional`, defaults to :obj:`False`):
|
||||
Whether to also return instance segmentation masks in case :obj:`format = "coco_detection"`.
|
||||
Whether to also include instance segmentation masks as part of the labels in case :obj:`format =
|
||||
"coco_detection"`.
|
||||
|
||||
masks_path (:obj:`pathlib.Path`, `optional`):
|
||||
Path to the directory containing the PNG files that store the class-agnostic image segmentations. Only
|
||||
@ -465,6 +466,7 @@ class DetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
|
||||
- **pixel_values** -- Pixel values to be fed to a model.
|
||||
- **pixel_mask** -- Pixel mask to be fed to a model (when :obj:`pad_and_return_pixel_mask=True` or if
|
||||
`"pixel_mask"` is in :obj:`self.model_input_names`).
|
||||
- **labels** -- Optional labels to be fed to a model (when :obj:`annotations` are provided)
|
||||
"""
|
||||
# Input type checking for clearer error
|
||||
|
||||
@ -613,7 +615,7 @@ class DetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
|
||||
if not is_torch_available():
|
||||
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.")
|
||||
|
||||
encoded_inputs["target"] = [
|
||||
encoded_inputs["labels"] = [
|
||||
{k: torch.from_numpy(v) for k, v in target.items()} for target in annotations
|
||||
]
|
||||
|
||||
|
@ -828,8 +828,8 @@ DETR_INPUTS_DOCSTRING = r"""
|
||||
pixel_values (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_channels, height, width)`):
|
||||
Pixel values. Padding will be ignored by default should you provide it.
|
||||
|
||||
Pixel values can be obtained using :class:`~transformers.DetrTokenizer`. See
|
||||
:meth:`transformers.DetrTokenizer.__call__` for details.
|
||||
Pixel values can be obtained using :class:`~transformers.DetrFeatureExtractor`. See
|
||||
:meth:`transformers.DetrFeatureExtractor.__call__` for details.
|
||||
|
||||
pixel_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, height, width)`, `optional`):
|
||||
Mask to avoid performing attention on padding pixel values. Mask values selected in ``[0, 1]``:
|
||||
@ -990,7 +990,6 @@ class DetrDecoder(DetrPreTrainedModel):
|
||||
super().__init__(config)
|
||||
self.dropout = config.dropout
|
||||
self.layerdrop = config.decoder_layerdrop
|
||||
self.max_target_positions = config.max_position_embeddings
|
||||
|
||||
self.layers = nn.ModuleList([DetrDecoderLayer(config) for _ in range(config.decoder_layers)])
|
||||
# in DETR, the decoder uses layernorm after the last decoder layer output
|
||||
|
@ -253,8 +253,7 @@ class DetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestC
|
||||
target = {"image_id": 39769, "annotations": target}
|
||||
|
||||
# encode them
|
||||
# TODO replace by facebook/detr-resnet-50
|
||||
feature_extractor = DetrFeatureExtractor.from_pretrained("nielsr/detr-resnet-50")
|
||||
feature_extractor = DetrFeatureExtractor.from_pretrained("facebook/detr-resnet-50")
|
||||
encoding = feature_extractor(images=image, annotations=target, return_tensors="pt")
|
||||
|
||||
# verify pixel values
|
||||
@ -266,27 +265,27 @@ class DetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestC
|
||||
|
||||
# verify area
|
||||
expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
|
||||
assert torch.allclose(encoding["target"][0]["area"], expected_area)
|
||||
assert torch.allclose(encoding["labels"][0]["area"], expected_area)
|
||||
# verify boxes
|
||||
expected_boxes_shape = torch.Size([6, 4])
|
||||
self.assertEqual(encoding["target"][0]["boxes"].shape, expected_boxes_shape)
|
||||
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
|
||||
expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
|
||||
assert torch.allclose(encoding["target"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)
|
||||
assert torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)
|
||||
# verify image_id
|
||||
expected_image_id = torch.tensor([39769])
|
||||
assert torch.allclose(encoding["target"][0]["image_id"], expected_image_id)
|
||||
assert torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)
|
||||
# verify is_crowd
|
||||
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
|
||||
assert torch.allclose(encoding["target"][0]["iscrowd"], expected_is_crowd)
|
||||
assert torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)
|
||||
# verify class_labels
|
||||
expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17])
|
||||
assert torch.allclose(encoding["target"][0]["class_labels"], expected_class_labels)
|
||||
assert torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)
|
||||
# verify orig_size
|
||||
expected_orig_size = torch.tensor([480, 640])
|
||||
assert torch.allclose(encoding["target"][0]["orig_size"], expected_orig_size)
|
||||
assert torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)
|
||||
# verify size
|
||||
expected_size = torch.tensor([800, 1066])
|
||||
assert torch.allclose(encoding["target"][0]["size"], expected_size)
|
||||
assert torch.allclose(encoding["labels"][0]["size"], expected_size)
|
||||
|
||||
@slow
|
||||
def test_call_pytorch_with_coco_panoptic_annotations(self):
|
||||
@ -313,27 +312,27 @@ class DetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestC
|
||||
|
||||
# verify area
|
||||
expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
|
||||
assert torch.allclose(encoding["target"][0]["area"], expected_area)
|
||||
assert torch.allclose(encoding["labels"][0]["area"], expected_area)
|
||||
# verify boxes
|
||||
expected_boxes_shape = torch.Size([6, 4])
|
||||
self.assertEqual(encoding["target"][0]["boxes"].shape, expected_boxes_shape)
|
||||
self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape)
|
||||
expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
|
||||
assert torch.allclose(encoding["target"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)
|
||||
assert torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)
|
||||
# verify image_id
|
||||
expected_image_id = torch.tensor([39769])
|
||||
assert torch.allclose(encoding["target"][0]["image_id"], expected_image_id)
|
||||
assert torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)
|
||||
# verify is_crowd
|
||||
expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0])
|
||||
assert torch.allclose(encoding["target"][0]["iscrowd"], expected_is_crowd)
|
||||
assert torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)
|
||||
# verify class_labels
|
||||
expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93])
|
||||
assert torch.allclose(encoding["target"][0]["class_labels"], expected_class_labels)
|
||||
assert torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)
|
||||
# verify masks
|
||||
expected_masks_sum = 822338
|
||||
self.assertEqual(encoding["target"][0]["masks"].sum().item(), expected_masks_sum)
|
||||
self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum)
|
||||
# verify orig_size
|
||||
expected_orig_size = torch.tensor([480, 640])
|
||||
assert torch.allclose(encoding["target"][0]["orig_size"], expected_orig_size)
|
||||
assert torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)
|
||||
# verify size
|
||||
expected_size = torch.tensor([800, 1066])
|
||||
assert torch.allclose(encoding["target"][0]["size"], expected_size)
|
||||
assert torch.allclose(encoding["labels"][0]["size"], expected_size)
|
||||
|
Loading…
Reference in New Issue
Block a user