mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
Fix auxiliary loss related code in transformers (#28406)
* [DETA] fix freeze/unfreeze function * Update src/transformers/models/deta/modeling_deta.py Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * Update src/transformers/models/deta/modeling_deta.py Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * add freeze/unfreeze test case in DETA * fix type * fix typo 2 * fix : enable aux and enc loss in training pipeline * Add unsynced variables from original DETA for training * modification for passing CI test * make style * make fix * manual make fix * change deta_modeling_test of configuration 'two_stage' default to TRUE and minor change of dist checking * remove print * divide configuration in DetaModel and DetaForObjectDetection * image smaller size than 224 will give topk error * pred_boxes and logits should be equivalent to two_stage_num_proposals * add missing part in DetaConfig * Update src/transformers/models/deta/modeling_deta.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * add docstring in configure and prettify TO DO part * change distribute related code to accelerate * Update src/transformers/models/deta/configuration_deta.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Update tests/models/deta/test_modeling_deta.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * protect importing accelerate * change variable name to specific value * wrong import * fix aux_loss in conditional_detr * add test aux_loss * add aux_loss test in deta and table_transformer * fix yolos since it doesn't have auxiliary function * fix maskformer auxiliary_loss related code * make style * change param 'auxiliary_loss' to 'use_auxiliary_loss' * change param 'auxiliary_loss' to 'use_auxiliary_loss' in tests * make style & fix-copies, also revert yolos related parameter * revert variable name 'use_auxiliary_loss' to 'auxiliary_loss' due to DetrConfig * revert variable name in yolos * revert maskformer * add aux_loss test in maskformer * make style * Update src/transformers/models/yolos/configuration_yolos.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --------- Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>
This commit is contained in:
parent
948ffff407
commit
3f69f415ad
@ -1874,8 +1874,8 @@ class ConditionalDetrForObjectDetection(ConditionalDetrPreTrainedModel):
|
||||
intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4]
|
||||
outputs_class = self.class_labels_classifier(intermediate)
|
||||
|
||||
for lvl in range(hs.shape[0]):
|
||||
tmp = self.bbox_predictor(hs[lvl])
|
||||
for lvl in range(intermediate.shape[0]):
|
||||
tmp = self.bbox_predictor(intermediate[lvl])
|
||||
tmp[..., :2] += reference_before_sigmoid
|
||||
outputs_coord = tmp.sigmoid()
|
||||
outputs_coords.append(outputs_coord)
|
||||
@ -2118,9 +2118,9 @@ class ConditionalDetrForSegmentation(ConditionalDetrPreTrainedModel):
|
||||
outputs_loss["pred_masks"] = pred_masks
|
||||
if self.config.auxiliary_loss:
|
||||
intermediate = decoder_outputs.intermediate_hidden_states if return_dict else decoder_outputs[-1]
|
||||
outputs_class = self.class_labels_classifier(intermediate)
|
||||
outputs_coord = self.bbox_predictor(intermediate).sigmoid()
|
||||
auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord)
|
||||
outputs_class = self.conditional_detr.class_labels_classifier(intermediate)
|
||||
outputs_coord = self.conditional_detr.bbox_predictor(intermediate).sigmoid()
|
||||
auxiliary_outputs = self.conditional_detr._set_aux_loss(outputs_class, outputs_coord)
|
||||
outputs_loss["auxiliary_outputs"] = auxiliary_outputs
|
||||
|
||||
loss_dict = criterion(outputs_loss, labels)
|
||||
|
@ -399,6 +399,22 @@ class ConditionalDetrModelTest(ModelTesterMixin, GenerationTesterMixin, Pipeline
|
||||
self.assertIsNotNone(decoder_attentions.grad)
|
||||
self.assertIsNotNone(cross_attentions.grad)
|
||||
|
||||
def test_forward_auxiliary_loss(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
config.auxiliary_loss = True
|
||||
|
||||
# only test for object detection and segmentation model
|
||||
for model_class in self.all_model_classes[1:]:
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
|
||||
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
|
||||
|
||||
outputs = model(**inputs)
|
||||
|
||||
self.assertIsNotNone(outputs.auxiliary_outputs)
|
||||
self.assertEqual(len(outputs.auxiliary_outputs), self.model_tester.num_hidden_layers - 1)
|
||||
|
||||
def test_forward_signature(self):
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
|
@ -476,6 +476,22 @@ class DeformableDetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineT
|
||||
self.assertIsNotNone(decoder_attentions.grad)
|
||||
self.assertIsNotNone(cross_attentions.grad)
|
||||
|
||||
def test_forward_auxiliary_loss(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
config.auxiliary_loss = True
|
||||
|
||||
# only test for object detection and segmentation model
|
||||
for model_class in self.all_model_classes[1:]:
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
|
||||
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
|
||||
|
||||
outputs = model(**inputs)
|
||||
|
||||
self.assertIsNotNone(outputs.auxiliary_outputs)
|
||||
self.assertEqual(len(outputs.auxiliary_outputs), self.model_tester.num_hidden_layers - 1)
|
||||
|
||||
def test_forward_signature(self):
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
|
@ -449,6 +449,22 @@ class DetaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin
|
||||
self.assertIsNotNone(decoder_attentions.grad)
|
||||
self.assertIsNotNone(cross_attentions.grad)
|
||||
|
||||
def test_forward_auxiliary_loss(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
config.auxiliary_loss = True
|
||||
|
||||
# only test for object detection and segmentation model
|
||||
for model_class in self.all_model_classes[1:]:
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
|
||||
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
|
||||
|
||||
outputs = model(**inputs)
|
||||
|
||||
self.assertIsNotNone(outputs.auxiliary_outputs)
|
||||
self.assertEqual(len(outputs.auxiliary_outputs), self.model_tester.num_hidden_layers - 1)
|
||||
|
||||
def test_forward_signature(self):
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
|
@ -362,6 +362,24 @@ class MaskFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
|
||||
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
|
||||
self.assertIsNotNone(attentions.grad)
|
||||
|
||||
def test_forward_auxiliary_loss(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
config.use_auxiliary_loss = True
|
||||
config.output_auxiliary_logits = True
|
||||
config.output_hidden_states = True
|
||||
|
||||
# only test for object detection and segmentation model
|
||||
for model_class in self.all_model_classes[1:]:
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
|
||||
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
|
||||
|
||||
outputs = model(**inputs)
|
||||
|
||||
self.assertIsNotNone(outputs.auxiliary_logits)
|
||||
self.assertEqual(len(outputs.auxiliary_logits), self.model_tester.num_channels - 1)
|
||||
|
||||
|
||||
TOLERANCE = 1e-4
|
||||
|
||||
|
@ -411,6 +411,22 @@ class TableTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, Pipelin
|
||||
self.assertIsNotNone(decoder_attentions.grad)
|
||||
self.assertIsNotNone(cross_attentions.grad)
|
||||
|
||||
def test_forward_auxiliary_loss(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
config.auxiliary_loss = True
|
||||
|
||||
# only test for object detection and segmentation model
|
||||
for model_class in self.all_model_classes[1:]:
|
||||
model = model_class(config)
|
||||
model.to(torch_device)
|
||||
|
||||
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
|
||||
|
||||
outputs = model(**inputs)
|
||||
|
||||
self.assertIsNotNone(outputs.auxiliary_outputs)
|
||||
self.assertEqual(len(outputs.auxiliary_outputs), self.model_tester.num_hidden_layers - 1)
|
||||
|
||||
def test_forward_signature(self):
|
||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user