Avoid all-zeor attnetion mask used in testing (#26469)

fix

Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
This commit is contained in:
Yih-Dar 2023-09-29 11:06:06 +02:00 committed by GitHub
parent 9b23d0de0e
commit 391177441b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -2960,7 +2960,8 @@ def ids_tensor(shape, vocab_size, rng=None, name=None):
def random_attention_mask(shape, rng=None, name=None):
attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None)
# make sure that at least one token is attended to for each batch
attn_mask[:, -1] = 1
# we choose the 1st token so this property of `at least one being non-zero` still holds after applying causal mask
attn_mask[:, 0] = 1
return attn_mask