mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-24 23:08:57 +06:00
enable d_fine finetuning properly (#37962)
add pre_output in the front Co-authored-by: Pavel Iakubovskii <qubvel@gmail.com>
This commit is contained in:
parent
e021bf6bf8
commit
aa27fa75cd
@ -1248,6 +1248,10 @@ class DFineDecoder(DFinePreTrainedModel):
|
||||
|
||||
if self.class_embed is not None and (self.training or i == self.eval_idx):
|
||||
scores = self.class_embed[i](hidden_states)
|
||||
# Add initial logits and reference points with pre-bbox head
|
||||
if i == 0:
|
||||
intermediate_logits += (scores,)
|
||||
intermediate_reference_points += (new_reference_points,)
|
||||
# Lqe does not affect the performance here.
|
||||
scores = self.lqe_layers[i](scores, pred_corners)
|
||||
intermediate_logits += (scores,)
|
||||
|
@ -803,6 +803,10 @@ class DFineDecoder(RTDetrDecoder):
|
||||
|
||||
if self.class_embed is not None and (self.training or i == self.eval_idx):
|
||||
scores = self.class_embed[i](hidden_states)
|
||||
# Add initial logits and reference points with pre-bbox head
|
||||
if i == 0:
|
||||
intermediate_logits += (scores,)
|
||||
intermediate_reference_points += (new_reference_points,)
|
||||
# Lqe does not affect the performance here.
|
||||
scores = self.lqe_layers[i](scores, pred_corners)
|
||||
intermediate_logits += (scores,)
|
||||
|
Loading…
Reference in New Issue
Block a user