Fix attention mask expansion when converting to executorch (#38637)

This commit is contained in:
pweglik 2025-06-09 17:00:55 +02:00 committed by GitHub
parent 19224c3642
commit 282d6684dc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -193,7 +193,7 @@ class AttentionMaskConverter:
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
inverted_mask = torch.tensor(1.0, dtype=dtype) - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)