|
|
|
@ -14,7 +14,7 @@
|
|
|
|
|
from typing import Dict, Optional, Tuple
|
|
|
|
|
|
|
|
|
|
import flax
|
|
|
|
|
import jaxlib.xla_extension as jax_xla
|
|
|
|
|
import jax.numpy as jnp
|
|
|
|
|
|
|
|
|
|
from .file_utils import ModelOutput
|
|
|
|
|
|
|
|
|
@ -25,24 +25,24 @@ class FlaxBaseModelOutput(ModelOutput):
|
|
|
|
|
Base class for model's outputs, with potential hidden states and attentions.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
last_hidden_state (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
|
|
|
|
|
last_hidden_state (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
|
|
|
|
|
Sequence of hidden-states at the output of the last layer of the model.
|
|
|
|
|
hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
|
heads.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
last_hidden_state: jax_xla.DeviceArray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
last_hidden_state: jnp.ndarray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@flax.struct.dataclass
|
|
|
|
@ -51,28 +51,28 @@ class FlaxBaseModelOutputWithPast(ModelOutput):
|
|
|
|
|
Base class for model's outputs, with potential hidden states and attentions.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
last_hidden_state (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
|
|
|
|
|
last_hidden_state (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
|
|
|
|
|
Sequence of hidden-states at the output of the last layer of the model.
|
|
|
|
|
past_key_values (:obj:`Dict[str, jax_xla.DeviceArray]`):
|
|
|
|
|
past_key_values (:obj:`Dict[str, jnp.ndarray]`):
|
|
|
|
|
Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
|
|
|
|
|
auto-regressive decoding. Pre-computed key and value hidden-states are of shape `[batch_size, max_length]`.
|
|
|
|
|
hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
|
heads.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
last_hidden_state: jax_xla.DeviceArray = None
|
|
|
|
|
past_key_values: Optional[Dict[str, jax_xla.DeviceArray]] = None
|
|
|
|
|
hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
last_hidden_state: jnp.ndarray = None
|
|
|
|
|
past_key_values: Optional[Dict[str, jnp.ndarray]] = None
|
|
|
|
|
hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@flax.struct.dataclass
|
|
|
|
@ -81,29 +81,29 @@ class FlaxBaseModelOutputWithPooling(ModelOutput):
|
|
|
|
|
Base class for model's outputs that also contains a pooling of the last hidden states.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
last_hidden_state (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
|
|
|
|
|
last_hidden_state (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
|
|
|
|
|
Sequence of hidden-states at the output of the last layer of the model.
|
|
|
|
|
pooler_output (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, hidden_size)`):
|
|
|
|
|
pooler_output (:obj:`jnp.ndarray` of shape :obj:`(batch_size, hidden_size)`):
|
|
|
|
|
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
|
|
|
|
|
Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
|
|
|
|
|
prediction (classification) objective during pretraining.
|
|
|
|
|
hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
|
heads.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
last_hidden_state: jax_xla.DeviceArray = None
|
|
|
|
|
pooler_output: jax_xla.DeviceArray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
last_hidden_state: jnp.ndarray = None
|
|
|
|
|
pooler_output: jnp.ndarray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@flax.struct.dataclass
|
|
|
|
@ -112,44 +112,44 @@ class FlaxBaseModelOutputWithPastAndCrossAttentions(ModelOutput):
|
|
|
|
|
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
last_hidden_state (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
|
|
|
|
|
last_hidden_state (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
|
|
|
|
|
Sequence of hidden-states at the output of the last layer of the model.
|
|
|
|
|
|
|
|
|
|
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
|
|
|
|
|
1, hidden_size)` is output.
|
|
|
|
|
past_key_values (:obj:`tuple(tuple(jax_xla.DeviceArray))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
|
|
|
|
|
Tuple of :obj:`tuple(jax_xla.DeviceArray)` of length :obj:`config.n_layers`, with each tuple having 2
|
|
|
|
|
tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
|
|
|
|
|
past_key_values (:obj:`tuple(tuple(jnp.ndarray))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
|
|
|
|
|
Tuple of :obj:`tuple(jnp.ndarray)` of length :obj:`config.n_layers`, with each tuple having 2 tensors of
|
|
|
|
|
shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
|
|
|
|
|
``config.is_encoder_decoder=True`` 2 additional tensors of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
|
|
|
|
|
``config.is_encoder_decoder=True`` in the cross-attention blocks) that can be used (see
|
|
|
|
|
:obj:`past_key_values` input) to speed up sequential decoding.
|
|
|
|
|
hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
|
heads.
|
|
|
|
|
cross_attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` and ``config.add_cross_attention=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
cross_attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` and ``config.add_cross_attention=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
|
|
|
|
|
weighted average in the cross-attention heads.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
last_hidden_state: jax_xla.DeviceArray = None
|
|
|
|
|
past_key_values: Optional[Tuple[Tuple[jax_xla.DeviceArray]]] = None
|
|
|
|
|
hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
cross_attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
last_hidden_state: jnp.ndarray = None
|
|
|
|
|
past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None
|
|
|
|
|
hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
cross_attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@flax.struct.dataclass
|
|
|
|
@ -159,58 +159,58 @@ class FlaxSeq2SeqModelOutput(ModelOutput):
|
|
|
|
|
decoding.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
last_hidden_state (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
|
|
|
|
|
last_hidden_state (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
|
|
|
|
|
Sequence of hidden-states at the output of the last layer of the decoder of the model.
|
|
|
|
|
|
|
|
|
|
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
|
|
|
|
|
1, hidden_size)` is output.
|
|
|
|
|
past_key_values (:obj:`tuple(tuple(jax_xla.DeviceArray))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
|
|
|
|
|
Tuple of :obj:`tuple(jax_xla.DeviceArray)` of length :obj:`config.n_layers`, with each tuple having 2
|
|
|
|
|
tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional
|
|
|
|
|
tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
past_key_values (:obj:`tuple(tuple(jnp.ndarray))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
|
|
|
|
|
Tuple of :obj:`tuple(jnp.ndarray)` of length :obj:`config.n_layers`, with each tuple having 2 tensors of
|
|
|
|
|
shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
|
|
|
|
|
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
|
|
|
|
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
|
|
|
|
|
decoder_hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
decoder_hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
decoder_attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
decoder_attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
|
self-attention heads.
|
|
|
|
|
cross_attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
cross_attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
|
|
|
|
|
weighted average in the cross-attention heads.
|
|
|
|
|
encoder_last_hidden_state (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
|
|
|
|
encoder_last_hidden_state (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
|
|
|
|
Sequence of hidden-states at the output of the last layer of the encoder of the model.
|
|
|
|
|
encoder_hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
encoder_hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
encoder_attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
encoder_attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
|
self-attention heads.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
last_hidden_state: jax_xla.DeviceArray = None
|
|
|
|
|
past_key_values: Optional[Tuple[Tuple[jax_xla.DeviceArray]]] = None
|
|
|
|
|
decoder_hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
decoder_attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
cross_attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
encoder_last_hidden_state: Optional[jax_xla.DeviceArray] = None
|
|
|
|
|
encoder_hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
encoder_attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
last_hidden_state: jnp.ndarray = None
|
|
|
|
|
past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None
|
|
|
|
|
decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
decoder_attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
cross_attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
encoder_last_hidden_state: Optional[jnp.ndarray] = None
|
|
|
|
|
encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@flax.struct.dataclass
|
|
|
|
@ -219,39 +219,39 @@ class FlaxCausalLMOutputWithCrossAttentions(ModelOutput):
|
|
|
|
|
Base class for causal language model (or autoregressive) outputs.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
logits (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
|
|
|
|
|
logits (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
|
|
|
|
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
|
|
|
|
hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
|
heads.
|
|
|
|
|
cross_attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
cross_attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Cross attentions weights after the attention softmax, used to compute the weighted average in the
|
|
|
|
|
cross-attention heads.
|
|
|
|
|
past_key_values (:obj:`tuple(tuple(jax_xla.DeviceArray))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` tuples of length :obj:`config.n_layers`, with each tuple containing the
|
|
|
|
|
cached key, value states of the self-attention and the cross-attention layers if model is used in
|
|
|
|
|
encoder-decoder setting. Only relevant if ``config.is_decoder = True``.
|
|
|
|
|
past_key_values (:obj:`tuple(tuple(jnp.ndarray))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` tuples of length :obj:`config.n_layers`, with each tuple containing the cached
|
|
|
|
|
key, value states of the self-attention and the cross-attention layers if model is used in encoder-decoder
|
|
|
|
|
setting. Only relevant if ``config.is_decoder = True``.
|
|
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
|
|
|
|
|
:obj:`past_key_values` input) to speed up sequential decoding.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
logits: jax_xla.DeviceArray = None
|
|
|
|
|
past_key_values: Optional[Tuple[Tuple[jax_xla.DeviceArray]]] = None
|
|
|
|
|
hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
cross_attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
logits: jnp.ndarray = None
|
|
|
|
|
past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None
|
|
|
|
|
hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
cross_attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@flax.struct.dataclass
|
|
|
|
@ -260,24 +260,24 @@ class FlaxMaskedLMOutput(ModelOutput):
|
|
|
|
|
Base class for masked language models outputs.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
logits (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
|
|
|
|
|
logits (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
|
|
|
|
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
|
|
|
|
hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
|
heads.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
logits: jax_xla.DeviceArray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
logits: jnp.ndarray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
FlaxCausalLMOutput = FlaxMaskedLMOutput
|
|
|
|
@ -289,55 +289,55 @@ class FlaxSeq2SeqLMOutput(ModelOutput):
|
|
|
|
|
Base class for sequence-to-sequence language models outputs.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
logits (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
|
|
|
|
|
logits (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
|
|
|
|
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
|
|
|
|
past_key_values (:obj:`tuple(tuple(jax_xla.DeviceArray))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
|
|
|
|
|
Tuple of :obj:`tuple(jax_xla.DeviceArray)` of length :obj:`config.n_layers`, with each tuple having 2
|
|
|
|
|
tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional
|
|
|
|
|
tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
past_key_values (:obj:`tuple(tuple(jnp.ndarray))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
|
|
|
|
|
Tuple of :obj:`tuple(jnp.ndarray)` of length :obj:`config.n_layers`, with each tuple having 2 tensors of
|
|
|
|
|
shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
|
|
|
|
|
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
|
|
|
|
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
|
|
|
|
|
decoder_hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
decoder_hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
decoder_attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
decoder_attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
|
self-attention heads.
|
|
|
|
|
cross_attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
cross_attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
|
|
|
|
|
weighted average in the cross-attention heads.
|
|
|
|
|
encoder_last_hidden_state (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
|
|
|
|
encoder_last_hidden_state (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
|
|
|
|
Sequence of hidden-states at the output of the last layer of the encoder of the model.
|
|
|
|
|
encoder_hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
encoder_hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
encoder_attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
encoder_attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
|
self-attention heads.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
logits: jax_xla.DeviceArray = None
|
|
|
|
|
past_key_values: Optional[Tuple[Tuple[jax_xla.DeviceArray]]] = None
|
|
|
|
|
decoder_hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
decoder_attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
cross_attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
encoder_last_hidden_state: Optional[jax_xla.DeviceArray] = None
|
|
|
|
|
encoder_hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
encoder_attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
logits: jnp.ndarray = None
|
|
|
|
|
past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None
|
|
|
|
|
decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
decoder_attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
cross_attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
encoder_last_hidden_state: Optional[jnp.ndarray] = None
|
|
|
|
|
encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@flax.struct.dataclass
|
|
|
|
@ -346,25 +346,25 @@ class FlaxNextSentencePredictorOutput(ModelOutput):
|
|
|
|
|
Base class for outputs of models predicting if two sentences are consecutive or not.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
logits (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, 2)`):
|
|
|
|
|
logits (:obj:`jnp.ndarray` of shape :obj:`(batch_size, 2)`):
|
|
|
|
|
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
|
|
|
|
|
before SoftMax).
|
|
|
|
|
hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
|
heads.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
logits: jax_xla.DeviceArray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
logits: jnp.ndarray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@flax.struct.dataclass
|
|
|
|
@ -373,24 +373,24 @@ class FlaxSequenceClassifierOutput(ModelOutput):
|
|
|
|
|
Base class for outputs of sentence classification models.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
logits (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, config.num_labels)`):
|
|
|
|
|
logits (:obj:`jnp.ndarray` of shape :obj:`(batch_size, config.num_labels)`):
|
|
|
|
|
Classification (or regression if config.num_labels==1) scores (before SoftMax).
|
|
|
|
|
hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
|
heads.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
logits: jax_xla.DeviceArray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
logits: jnp.ndarray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@flax.struct.dataclass
|
|
|
|
@ -399,55 +399,55 @@ class FlaxSeq2SeqSequenceClassifierOutput(ModelOutput):
|
|
|
|
|
Base class for outputs of sequence-to-sequence sentence classification models.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
logits (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, config.num_labels)`):
|
|
|
|
|
logits (:obj:`jnp.ndarray` of shape :obj:`(batch_size, config.num_labels)`):
|
|
|
|
|
Classification (or regression if config.num_labels==1) scores (before SoftMax).
|
|
|
|
|
past_key_values (:obj:`tuple(tuple(jax_xla.DeviceArray))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
|
|
|
|
|
Tuple of :obj:`tuple(jax_xla.DeviceArray)` of length :obj:`config.n_layers`, with each tuple having 2
|
|
|
|
|
tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional
|
|
|
|
|
tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
past_key_values (:obj:`tuple(tuple(jnp.ndarray))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
|
|
|
|
|
Tuple of :obj:`tuple(jnp.ndarray)` of length :obj:`config.n_layers`, with each tuple having 2 tensors of
|
|
|
|
|
shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
|
|
|
|
|
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
|
|
|
|
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
|
|
|
|
|
decoder_hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
decoder_hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
decoder_attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
decoder_attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
|
self-attention heads.
|
|
|
|
|
cross_attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
cross_attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
|
|
|
|
|
weighted average in the cross-attention heads.
|
|
|
|
|
encoder_last_hidden_state (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
|
|
|
|
encoder_last_hidden_state (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
|
|
|
|
Sequence of hidden-states at the output of the last layer of the encoder of the model.
|
|
|
|
|
encoder_hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
encoder_hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
encoder_attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
encoder_attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
|
self-attention heads.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
logits: jax_xla.DeviceArray = None
|
|
|
|
|
past_key_values: Optional[Tuple[Tuple[jax_xla.DeviceArray]]] = None
|
|
|
|
|
decoder_hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
decoder_attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
cross_attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
encoder_last_hidden_state: Optional[jax_xla.DeviceArray] = None
|
|
|
|
|
encoder_hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
encoder_attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
logits: jnp.ndarray = None
|
|
|
|
|
past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None
|
|
|
|
|
decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
decoder_attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
cross_attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
encoder_last_hidden_state: Optional[jnp.ndarray] = None
|
|
|
|
|
encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@flax.struct.dataclass
|
|
|
|
@ -456,26 +456,26 @@ class FlaxMultipleChoiceModelOutput(ModelOutput):
|
|
|
|
|
Base class for outputs of multiple choice models.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
logits (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, num_choices)`):
|
|
|
|
|
logits (:obj:`jnp.ndarray` of shape :obj:`(batch_size, num_choices)`):
|
|
|
|
|
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
|
|
|
|
|
|
|
|
|
|
Classification scores (before SoftMax).
|
|
|
|
|
hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
|
heads.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
logits: jax_xla.DeviceArray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
logits: jnp.ndarray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@flax.struct.dataclass
|
|
|
|
@ -484,24 +484,24 @@ class FlaxTokenClassifierOutput(ModelOutput):
|
|
|
|
|
Base class for outputs of token classification models.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
logits (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
|
|
|
|
|
logits (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
|
|
|
|
|
Classification scores (before SoftMax).
|
|
|
|
|
hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
|
heads.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
logits: jax_xla.DeviceArray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
logits: jnp.ndarray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@flax.struct.dataclass
|
|
|
|
@ -510,27 +510,27 @@ class FlaxQuestionAnsweringModelOutput(ModelOutput):
|
|
|
|
|
Base class for outputs of question answering models.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
start_logits (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length)`):
|
|
|
|
|
start_logits (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length)`):
|
|
|
|
|
Span-start scores (before SoftMax).
|
|
|
|
|
end_logits (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length)`):
|
|
|
|
|
end_logits (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length)`):
|
|
|
|
|
Span-end scores (before SoftMax).
|
|
|
|
|
hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
|
heads.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
start_logits: jax_xla.DeviceArray = None
|
|
|
|
|
end_logits: jax_xla.DeviceArray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
start_logits: jnp.ndarray = None
|
|
|
|
|
end_logits: jnp.ndarray = None
|
|
|
|
|
hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@flax.struct.dataclass
|
|
|
|
@ -539,55 +539,55 @@ class FlaxSeq2SeqQuestionAnsweringModelOutput(ModelOutput):
|
|
|
|
|
Base class for outputs of sequence-to-sequence question answering models.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
start_logits (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length)`):
|
|
|
|
|
start_logits (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length)`):
|
|
|
|
|
Span-start scores (before SoftMax).
|
|
|
|
|
end_logits (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length)`):
|
|
|
|
|
end_logits (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length)`):
|
|
|
|
|
Span-end scores (before SoftMax).
|
|
|
|
|
past_key_values (:obj:`tuple(tuple(jax_xla.DeviceArray))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
|
|
|
|
|
Tuple of :obj:`tuple(jax_xla.DeviceArray)` of length :obj:`config.n_layers`, with each tuple having 2
|
|
|
|
|
tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional
|
|
|
|
|
tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
past_key_values (:obj:`tuple(tuple(jnp.ndarray))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
|
|
|
|
|
Tuple of :obj:`tuple(jnp.ndarray)` of length :obj:`config.n_layers`, with each tuple having 2 tensors of
|
|
|
|
|
shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
|
|
|
|
|
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
|
|
|
|
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
|
|
|
|
|
decoder_hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
decoder_hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
decoder_attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
decoder_attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
|
self-attention heads.
|
|
|
|
|
cross_attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
cross_attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
|
|
|
|
|
weighted average in the cross-attention heads.
|
|
|
|
|
encoder_last_hidden_state (:obj:`jax_xla.DeviceArray` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
|
|
|
|
encoder_last_hidden_state (:obj:`jnp.ndarray` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
|
|
|
|
Sequence of hidden-states at the output of the last layer of the encoder of the model.
|
|
|
|
|
encoder_hidden_states (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for the output of the embeddings + one for the output of each
|
|
|
|
|
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
encoder_hidden_states (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
|
shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
|
|
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
|
|
|
|
|
encoder_attentions (:obj:`tuple(jax_xla.DeviceArray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jax_xla.DeviceArray` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
|
|
|
sequence_length, sequence_length)`.
|
|
|
|
|
encoder_attentions (:obj:`tuple(jnp.ndarray)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
|
|
|
Tuple of :obj:`jnp.ndarray` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
|
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
|
|
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
|
self-attention heads.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
start_logits: jax_xla.DeviceArray = None
|
|
|
|
|
end_logits: jax_xla.DeviceArray = None
|
|
|
|
|
past_key_values: Optional[Tuple[Tuple[jax_xla.DeviceArray]]] = None
|
|
|
|
|
decoder_hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
decoder_attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
cross_attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
encoder_last_hidden_state: Optional[jax_xla.DeviceArray] = None
|
|
|
|
|
encoder_hidden_states: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
encoder_attentions: Optional[Tuple[jax_xla.DeviceArray]] = None
|
|
|
|
|
start_logits: jnp.ndarray = None
|
|
|
|
|
end_logits: jnp.ndarray = None
|
|
|
|
|
past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None
|
|
|
|
|
decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
decoder_attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
cross_attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
encoder_last_hidden_state: Optional[jnp.ndarray] = None
|
|
|
|
|
encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|
encoder_attentions: Optional[Tuple[jnp.ndarray]] = None
|
|
|
|
|