Fix TF Roberta for mixed precision training (#11675)

This commit is contained in:
Julien Plu 2021-05-11 18:01:03 +02:00 committed by GitHub
parent a135f59536
commit d9b286272c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -541,7 +541,9 @@ class TFRobertaMainLayer(tf.keras.layers.Layer):
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
extended_attention_mask = tf.multiply(tf.subtract(1.0, extended_attention_mask), -10000.0)
one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head