diff --git a/src/transformers/models/sam_hq/modeling_sam_hq.py b/src/transformers/models/sam_hq/modeling_sam_hq.py index fd747dffd6e..84bca47c34c 100644 --- a/src/transformers/models/sam_hq/modeling_sam_hq.py +++ b/src/transformers/models/sam_hq/modeling_sam_hq.py @@ -1517,9 +1517,9 @@ class SamHQModel(SamHQPreTrainedModel): return SamHQImageSegmentationOutput( iou_scores=mask_decoder_output.iou_scores, pred_masks=mask_decoder_output.masks, - mask_decoder_attention=mask_decoder_output.mask_decoder_attentions, + mask_decoder_attentions=mask_decoder_output.mask_decoder_attentions, vision_hidden_states=vision_outputs.hidden_states, - vision_attentions=vision_outputs.vision_attentions, + vision_attentions=vision_outputs.attentions, ) diff --git a/src/transformers/models/sam_hq/modular_sam_hq.py b/src/transformers/models/sam_hq/modular_sam_hq.py index d34f082bc2a..f0ab4a875a0 100644 --- a/src/transformers/models/sam_hq/modular_sam_hq.py +++ b/src/transformers/models/sam_hq/modular_sam_hq.py @@ -610,9 +610,9 @@ class SamHQModel(SamModel): return SamHQImageSegmentationOutput( iou_scores=mask_decoder_output.iou_scores, pred_masks=mask_decoder_output.masks, - mask_decoder_attention=mask_decoder_output.mask_decoder_attentions, + mask_decoder_attentions=mask_decoder_output.mask_decoder_attentions, vision_hidden_states=vision_outputs.hidden_states, - vision_attentions=vision_outputs.vision_attentions, + vision_attentions=vision_outputs.attentions, )