mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
parent
1d69028989
commit
a163c9ca5b
@ -745,7 +745,7 @@ class T5Stack(T5PreTrainedModel):
|
||||
# layer_outputs = hidden-states, key-value-states (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
|
||||
position_bias = layer_outputs[3 if self.output_attentions else 2]
|
||||
if self.is_decoder and encoder_hidden_states is not None:
|
||||
encoder_decoder_position_bias = layer_outputs[4 if self.output_attentions else 3]
|
||||
encoder_decoder_position_bias = layer_outputs[5 if self.output_attentions else 3]
|
||||
# append next layer key value states
|
||||
present_key_value_states = present_key_value_states + (present_key_value_state,)
|
||||
|
||||
|
@ -682,7 +682,7 @@ class TFT5MainLayer(tf.keras.layers.Layer):
|
||||
# layer_outputs = hidden-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
|
||||
position_bias = layer_outputs[3 if self.output_attentions else 2]
|
||||
if self.is_decoder and encoder_hidden_states is not None:
|
||||
encoder_decoder_position_bias = layer_outputs[4 if self.output_attentions else 3]
|
||||
encoder_decoder_position_bias = layer_outputs[5 if self.output_attentions else 3]
|
||||
# append next layer key value states
|
||||
present_key_value_states = present_key_value_states + (present_key_value_state,)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user