added type hints for blenderbot and blenderbot_small (#16307)

This commit is contained in:
ivanllt 2022-03-22 03:13:58 +08:00 committed by GitHub
parent e226a24f84
commit 96cd5bcbb9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 135 additions and 135 deletions

View File

@ -1119,22 +1119,22 @@ class BlenderbotModel(BlenderbotPreTrainedModel):
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
input_ids=None, input_ids: Optional[torch.LongTensor] = None,
attention_mask=None, attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids=None, decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask=None, decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask=None, head_mask: Optional[torch.Tensor] = None,
decoder_head_mask=None, decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask=None, cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs=None, encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
past_key_values=None, past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds=None, inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds=None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache=None, use_cache: Optional[bool] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
r""" r"""
Returns: Returns:
@ -1275,23 +1275,23 @@ class BlenderbotForConditionalGeneration(BlenderbotPreTrainedModel):
@add_end_docstrings(BLENDERBOT_GENERATION_EXAMPLE) @add_end_docstrings(BLENDERBOT_GENERATION_EXAMPLE)
def forward( def forward(
self, self,
input_ids=None, input_ids: Optional[torch.LongTensor] = None,
attention_mask=None, attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids=None, decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask=None, decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask=None, head_mask: Optional[torch.Tensor] = None,
decoder_head_mask=None, decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask=None, cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs=None, encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
past_key_values=None, past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds=None, inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds=None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels=None, labels: Optional[torch.LongTensor] = None,
use_cache=None, use_cache: Optional[bool] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
r""" r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,

View File

@ -18,7 +18,7 @@
import os import os
import random import random
import warnings import warnings
from typing import Optional, Tuple, Union from typing import List, Optional, Tuple, Union
import tensorflow as tf import tensorflow as tf
@ -1137,24 +1137,24 @@ class TFBlenderbotModel(TFBlenderbotPreTrainedModel):
) )
def call( def call(
self, self,
input_ids=None, input_ids: Optional[tf.Tensor] = None,
attention_mask=None, attention_mask: Optional[tf.Tensor] = None,
decoder_input_ids=None, decoder_input_ids: Optional[tf.Tensor] = None,
decoder_attention_mask=None, decoder_attention_mask: Optional[tf.Tensor] = None,
head_mask=None, head_mask: Optional[tf.Tensor] = None,
decoder_head_mask=None, decoder_head_mask: Optional[tf.Tensor] = None,
cross_attn_head_mask=None, cross_attn_head_mask: Optional[tf.Tensor] = None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None, past_key_values: Optional[List[tf.Tensor]] = None,
inputs_embeds=None, inputs_embeds: Optional[tf.Tensor] = None,
decoder_inputs_embeds=None, decoder_inputs_embeds: Optional[tf.Tensor] = None,
use_cache=None, use_cache: Optional[bool] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
training=False, training: Optional[bool] = False,
**kwargs **kwargs
): ) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]:
outputs = self.model( outputs = self.model(
input_ids=input_ids, input_ids=input_ids,
attention_mask=attention_mask, attention_mask=attention_mask,
@ -1253,25 +1253,25 @@ class TFBlenderbotForConditionalGeneration(TFBlenderbotPreTrainedModel, TFCausal
@add_end_docstrings(BLENDERBOT_GENERATION_EXAMPLE) @add_end_docstrings(BLENDERBOT_GENERATION_EXAMPLE)
def call( def call(
self, self,
input_ids=None, input_ids: Optional[tf.Tensor] = None,
attention_mask=None, attention_mask: Optional[tf.Tensor] = None,
decoder_input_ids=None, decoder_input_ids: Optional[tf.Tensor] = None,
decoder_attention_mask=None, decoder_attention_mask: Optional[tf.Tensor] = None,
head_mask=None, head_mask: Optional[tf.Tensor] = None,
decoder_head_mask=None, decoder_head_mask: Optional[tf.Tensor] = None,
cross_attn_head_mask=None, cross_attn_head_mask: Optional[tf.Tensor] = None,
encoder_outputs: Optional[TFBaseModelOutput] = None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None, past_key_values: Optional[List[tf.Tensor]] = None,
inputs_embeds=None, inputs_embeds: Optional[tf.Tensor] = None,
decoder_inputs_embeds=None, decoder_inputs_embeds: Optional[tf.Tensor] = None,
use_cache=None, use_cache: Optional[bool] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
labels=None, labels: Optional[tf.Tensor] = None,
training=False, training: Optional[bool] = False,
**kwargs, **kwargs,
): ) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]:
r""" r"""
labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*): labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,

View File

@ -1102,22 +1102,22 @@ class BlenderbotSmallModel(BlenderbotSmallPreTrainedModel):
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward( def forward(
self, self,
input_ids=None, input_ids: Optional[torch.LongTensor] = None,
attention_mask=None, attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids=None, decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask=None, decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask=None, head_mask: Optional[torch.Tensor] = None,
decoder_head_mask=None, decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask=None, cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs=None, encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
past_key_values=None, past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds=None, inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds=None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache=None, use_cache: Optional[bool] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
r""" r"""
Returns: Returns:
@ -1246,23 +1246,23 @@ class BlenderbotSmallForConditionalGeneration(BlenderbotSmallPreTrainedModel):
@add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE) @add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE)
def forward( def forward(
self, self,
input_ids=None, input_ids: Optional[torch.LongTensor] = None,
attention_mask=None, attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids=None, decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask=None, decoder_attention_mask: Optional[torch.LongTensor] = None,
head_mask=None, head_mask: Optional[torch.Tensor] = None,
decoder_head_mask=None, decoder_head_mask: Optional[torch.Tensor] = None,
cross_attn_head_mask=None, cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs=None, encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
past_key_values=None, past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds=None, inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds=None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels=None, labels: Optional[torch.LongTensor] = None,
use_cache=None, use_cache: Optional[bool] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
): ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
r""" r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,

View File

@ -16,7 +16,7 @@
import random import random
from typing import Optional, Tuple, Union from typing import List, Optional, Tuple, Union
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
@ -1132,24 +1132,24 @@ class TFBlenderbotSmallModel(TFBlenderbotSmallPreTrainedModel):
) )
def call( def call(
self, self,
input_ids=None, input_ids: Optional[tf.Tensor] = None,
attention_mask=None, attention_mask: Optional[tf.Tensor] = None,
decoder_input_ids=None, decoder_input_ids: Optional[tf.Tensor] = None,
decoder_attention_mask=None, decoder_attention_mask: Optional[tf.Tensor] = None,
head_mask=None, head_mask: Optional[tf.Tensor] = None,
decoder_head_mask=None, decoder_head_mask: Optional[tf.Tensor] = None,
cross_attn_head_mask=None, cross_attn_head_mask: Optional[tf.Tensor] = None,
encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
past_key_values=None, past_key_values: Optional[List[tf.Tensor]] = None,
inputs_embeds=None, inputs_embeds: Optional[tf.Tensor] = None,
decoder_inputs_embeds=None, decoder_inputs_embeds: Optional[tf.Tensor] = None,
use_cache=None, use_cache: Optional[bool] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
training=False, training: Optional[bool] = False,
**kwargs **kwargs
): ) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]:
outputs = self.model( outputs = self.model(
input_ids=input_ids, input_ids=input_ids,
@ -1236,25 +1236,25 @@ class TFBlenderbotSmallForConditionalGeneration(TFBlenderbotSmallPreTrainedModel
@add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE) @add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE)
def call( def call(
self, self,
input_ids=None, input_ids: Optional[tf.Tensor] = None,
attention_mask=None, attention_mask: Optional[tf.Tensor] = None,
decoder_input_ids=None, decoder_input_ids: Optional[tf.Tensor] = None,
decoder_attention_mask=None, decoder_attention_mask: Optional[tf.Tensor] = None,
head_mask=None, head_mask: Optional[tf.Tensor] = None,
decoder_head_mask=None, decoder_head_mask: Optional[tf.Tensor] = None,
cross_attn_head_mask=None, cross_attn_head_mask: Optional[tf.Tensor] = None,
encoder_outputs: Optional[TFBaseModelOutput] = None, encoder_outputs: Optional[TFBaseModelOutput] = None,
past_key_values=None, past_key_values: Optional[List[tf.Tensor]] = None,
inputs_embeds=None, inputs_embeds: Optional[tf.Tensor] = None,
decoder_inputs_embeds=None, decoder_inputs_embeds: Optional[tf.Tensor] = None,
use_cache=None, use_cache: Optional[bool] = None,
output_attentions=None, output_attentions: Optional[bool] = None,
output_hidden_states=None, output_hidden_states: Optional[bool] = None,
return_dict=None, return_dict: Optional[bool] = None,
labels=None, labels: Optional[tf.Tensor] = None,
training=False, training: Optional[bool] = False,
**kwargs, **kwargs,
): ) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]:
r""" r"""
labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*): labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,