mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
Add TF Funnel Transformer (#7029)
* Add TF Funnel Transformer * Proper dummy input * Formatting * Update src/transformers/modeling_tf_funnel.py Co-authored-by: Lysandre Debut <lysandre@huggingface.co> * Address review comments * One review comment forgotten Co-authored-by: Lysandre Debut <lysandre@huggingface.co>
This commit is contained in:
parent
7fd1febf38
commit
15a189049e
@ -69,6 +69,9 @@ Funnel specific outputs
|
||||
.. autoclass:: transformers.modeling_funnel.FunnelForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_funnel.TFFunnelForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
|
||||
FunnelBaseModel
|
||||
~~~~~~~~~~~~~~~
|
||||
@ -124,3 +127,59 @@ FunnelForQuestionAnswering
|
||||
|
||||
.. autoclass:: transformers.FunnelForQuestionAnswering
|
||||
:members:
|
||||
|
||||
|
||||
TFFunnelBaseModel
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFFunnelBaseModel
|
||||
:members:
|
||||
|
||||
|
||||
TFFunnelModel
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFFunnelModel
|
||||
:members:
|
||||
|
||||
|
||||
TFFunnelModelForPreTraining
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFFunnelForPreTraining
|
||||
:members:
|
||||
|
||||
|
||||
TFFunnelForMaskedLM
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFFunnelForMaskedLM
|
||||
:members:
|
||||
|
||||
|
||||
TFFunnelForSequenceClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFFunnelForSequenceClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFFunnelForMultipleChoice
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFFunnelForMultipleChoice
|
||||
:members:
|
||||
|
||||
|
||||
TFFunnelForTokenClassification
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFFunnelForTokenClassification
|
||||
:members:
|
||||
|
||||
|
||||
TFFunnelForQuestionAnswering
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.TFFunnelForQuestionAnswering
|
||||
:members:
|
@ -592,6 +592,17 @@ if is_tf_available():
|
||||
TFFlaubertModel,
|
||||
TFFlaubertWithLMHeadModel,
|
||||
)
|
||||
from .modeling_tf_funnel import (
|
||||
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFFunnelBaseModel,
|
||||
TFFunnelForMaskedLM,
|
||||
TFFunnelForMultipleChoice,
|
||||
TFFunnelForPreTraining,
|
||||
TFFunnelForQuestionAnswering,
|
||||
TFFunnelForSequenceClassification,
|
||||
TFFunnelForTokenClassification,
|
||||
TFFunnelModel,
|
||||
)
|
||||
from .modeling_tf_gpt2 import (
|
||||
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFGPT2DoubleHeadsModel,
|
||||
|
@ -133,7 +133,9 @@ CONFIG_NAME = "config.json"
|
||||
MODEL_CARD_NAME = "modelcard.json"
|
||||
|
||||
|
||||
MULTIPLE_CHOICE_DUMMY_INPUTS = [[[0], [1]], [[0], [1]]]
|
||||
MULTIPLE_CHOICE_DUMMY_INPUTS = [
|
||||
[[0, 1, 0, 1], [1, 0, 0, 1]]
|
||||
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
|
||||
DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
|
||||
DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
|
||||
|
||||
|
@ -425,9 +425,9 @@ def _relative_shift_gather(positional_attn, context_len, shift):
|
||||
# max_rel_len = 2 * context_len + shift -1 is the numbers of possible relative positions i-j
|
||||
|
||||
# What's next is the same as doing the following gather, which might be clearer code but less efficient.
|
||||
# idxs = context_len + torch.arange(0, context_len).unsqueeze(0) - torch.arange(0, context_len).unsqueeze(1)
|
||||
# idxs = context_len + torch.arange(0, context_len).unsqueeze(0) - torch.arange(0, seq_len).unsqueeze(1)
|
||||
# # matrix of context_len + i-j
|
||||
# return positional_attn.gather(3, idxs.expand([bs, n_head, context_len, context_len]))
|
||||
# return positional_attn.gather(3, idxs.expand([batch_size, n_head, context_len, context_len]))
|
||||
|
||||
positional_attn = torch.reshape(positional_attn, [batch_size, n_head, max_rel_len, seq_len])
|
||||
positional_attn = positional_attn[:, :, shift:, :]
|
||||
@ -526,9 +526,9 @@ class FunnelRelMultiheadAttention(nn.Module):
|
||||
token_type_attn *= cls_mask
|
||||
return token_type_attn
|
||||
|
||||
def forward(self, query, key, value, attention_inputs, head_mask=None, output_attentions=False):
|
||||
# q has shape batch_size x seq_len x d_model
|
||||
# k and v have shapes batch_size x context_len x d_model
|
||||
def forward(self, query, key, value, attention_inputs, output_attentions=False):
|
||||
# query has shape batch_size x seq_len x d_model
|
||||
# key and value have shapes batch_size x context_len x d_model
|
||||
position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs
|
||||
|
||||
batch_size, seq_len, _ = query.shape
|
||||
@ -598,8 +598,8 @@ class FunnelLayer(nn.Module):
|
||||
self.attention = FunnelRelMultiheadAttention(config, block_index)
|
||||
self.ffn = FunnelPositionwiseFFN(config)
|
||||
|
||||
def forward(self, q, k, v, attention_inputs, output_attentions=False):
|
||||
attn = self.attention(q, k, v, attention_inputs, output_attentions=output_attentions)
|
||||
def forward(self, query, key, value, attention_inputs, output_attentions=False):
|
||||
attn = self.attention(query, key, value, attention_inputs, output_attentions=output_attentions)
|
||||
output = self.ffn(attn[0])
|
||||
return (output, attn[1]) if output_attentions else (output,)
|
||||
|
||||
@ -792,7 +792,7 @@ class FunnelClassificationHead(nn.Module):
|
||||
|
||||
def forward(self, hidden):
|
||||
hidden = self.linear_hidden(hidden)
|
||||
hidden = F.tanh(hidden)
|
||||
hidden = torch.tanh(hidden)
|
||||
hidden = self.dropout(hidden)
|
||||
return self.linear_out(hidden)
|
||||
|
||||
@ -954,7 +954,7 @@ class FunnelBaseModel(FunnelPreTrainedModel):
|
||||
|
||||
|
||||
@add_start_docstrings(
|
||||
"The bare base Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top.",
|
||||
"The bare Funnel Transformer Model transformer outputting raw hidden-states without any specific head on top.",
|
||||
FUNNEL_START_DOCSTRING,
|
||||
)
|
||||
class FunnelModel(FunnelPreTrainedModel):
|
||||
@ -1099,10 +1099,10 @@ class FunnelForPreTraining(FunnelPreTrainedModel):
|
||||
>>> import torch
|
||||
|
||||
>>> tokenizer = FunnelTokenizer.from_pretrained('funnel-transformer/small')
|
||||
>>> model = FunnelForPreTraining.from_pretrained('funnel-transformer/small')
|
||||
>>> model = FunnelForPreTraining.from_pretrained('funnel-transformer/small', return_dict=True)
|
||||
|
||||
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
|
||||
>>> logits = model(input_ids).logits
|
||||
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors= "pt")
|
||||
>>> logits = model(**inputs).logits
|
||||
"""
|
||||
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||
|
||||
|
@ -27,6 +27,7 @@ from .configuration_auto import (
|
||||
DistilBertConfig,
|
||||
ElectraConfig,
|
||||
FlaubertConfig,
|
||||
FunnelConfig,
|
||||
GPT2Config,
|
||||
LongformerConfig,
|
||||
MobileBertConfig,
|
||||
@ -92,6 +93,15 @@ from .modeling_tf_flaubert import (
|
||||
TFFlaubertModel,
|
||||
TFFlaubertWithLMHeadModel,
|
||||
)
|
||||
from .modeling_tf_funnel import (
|
||||
TFFunnelForMaskedLM,
|
||||
TFFunnelForMultipleChoice,
|
||||
TFFunnelForPreTraining,
|
||||
TFFunnelForQuestionAnswering,
|
||||
TFFunnelForSequenceClassification,
|
||||
TFFunnelForTokenClassification,
|
||||
TFFunnelModel,
|
||||
)
|
||||
from .modeling_tf_gpt2 import TFGPT2LMHeadModel, TFGPT2Model
|
||||
from .modeling_tf_longformer import TFLongformerForMaskedLM, TFLongformerForQuestionAnswering, TFLongformerModel
|
||||
from .modeling_tf_mobilebert import (
|
||||
@ -163,6 +173,7 @@ TF_MODEL_MAPPING = OrderedDict(
|
||||
(XLMConfig, TFXLMModel),
|
||||
(CTRLConfig, TFCTRLModel),
|
||||
(ElectraConfig, TFElectraModel),
|
||||
(FunnelConfig, TFFunnelModel),
|
||||
]
|
||||
)
|
||||
|
||||
@ -184,6 +195,7 @@ TF_MODEL_FOR_PRETRAINING_MAPPING = OrderedDict(
|
||||
(XLMConfig, TFXLMWithLMHeadModel),
|
||||
(CTRLConfig, TFCTRLLMHeadModel),
|
||||
(ElectraConfig, TFElectraForPreTraining),
|
||||
(FunnelConfig, TFFunnelForPreTraining),
|
||||
]
|
||||
)
|
||||
|
||||
@ -206,6 +218,7 @@ TF_MODEL_WITH_LM_HEAD_MAPPING = OrderedDict(
|
||||
(XLMConfig, TFXLMWithLMHeadModel),
|
||||
(CTRLConfig, TFCTRLLMHeadModel),
|
||||
(ElectraConfig, TFElectraForMaskedLM),
|
||||
(FunnelConfig, TFFunnelForMaskedLM),
|
||||
]
|
||||
)
|
||||
|
||||
@ -237,6 +250,7 @@ TF_MODEL_FOR_MASKED_LM_MAPPING = OrderedDict(
|
||||
(FlaubertConfig, TFFlaubertWithLMHeadModel),
|
||||
(XLMConfig, TFXLMWithLMHeadModel),
|
||||
(ElectraConfig, TFElectraForMaskedLM),
|
||||
(FunnelConfig, TFFunnelForMaskedLM),
|
||||
]
|
||||
)
|
||||
|
||||
@ -255,6 +269,7 @@ TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = OrderedDict(
|
||||
(FlaubertConfig, TFFlaubertForSequenceClassification),
|
||||
(XLMConfig, TFXLMForSequenceClassification),
|
||||
(ElectraConfig, TFElectraForSequenceClassification),
|
||||
(FunnelConfig, TFFunnelForSequenceClassification),
|
||||
]
|
||||
)
|
||||
|
||||
@ -272,6 +287,7 @@ TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = OrderedDict(
|
||||
(FlaubertConfig, TFFlaubertForQuestionAnsweringSimple),
|
||||
(XLMConfig, TFXLMForQuestionAnsweringSimple),
|
||||
(ElectraConfig, TFElectraForQuestionAnswering),
|
||||
(FunnelConfig, TFFunnelForQuestionAnswering),
|
||||
]
|
||||
)
|
||||
|
||||
@ -288,6 +304,7 @@ TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = OrderedDict(
|
||||
(MobileBertConfig, TFMobileBertForTokenClassification),
|
||||
(XLNetConfig, TFXLNetForTokenClassification),
|
||||
(ElectraConfig, TFElectraForTokenClassification),
|
||||
(FunnelConfig, TFFunnelForTokenClassification),
|
||||
]
|
||||
)
|
||||
|
||||
@ -304,6 +321,7 @@ TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = OrderedDict(
|
||||
(FlaubertConfig, TFFlaubertForMultipleChoice),
|
||||
(AlbertConfig, TFAlbertForMultipleChoice),
|
||||
(ElectraConfig, TFElectraForMultipleChoice),
|
||||
(FunnelConfig, TFFunnelForMultipleChoice),
|
||||
]
|
||||
)
|
||||
|
||||
|
1663
src/transformers/modeling_tf_funnel.py
Normal file
1663
src/transformers/modeling_tf_funnel.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -487,7 +487,10 @@ class TFModelTesterMixin:
|
||||
model = model_class(config)
|
||||
outputs = model(self._prepare_for_class(inputs_dict, model_class))
|
||||
hidden_states = [t.numpy() for t in outputs[-1]]
|
||||
self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1)
|
||||
expected_num_layers = getattr(
|
||||
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
|
||||
)
|
||||
self.assertEqual(len(hidden_states), expected_num_layers)
|
||||
self.assertListEqual(
|
||||
list(hidden_states[0].shape[-2:]),
|
||||
[self.model_tester.seq_length, self.model_tester.hidden_size],
|
||||
|
394
tests/test_modeling_tf_funnel.py
Normal file
394
tests/test_modeling_tf_funnel.py
Normal file
@ -0,0 +1,394 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2020 HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import unittest
|
||||
|
||||
from transformers import FunnelConfig, is_tf_available
|
||||
from transformers.testing_utils import require_tf
|
||||
|
||||
from .test_configuration_common import ConfigTester
|
||||
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
|
||||
|
||||
|
||||
if is_tf_available():
|
||||
import tensorflow as tf
|
||||
|
||||
from transformers.modeling_tf_funnel import (
|
||||
TFFunnelBaseModel,
|
||||
TFFunnelForMaskedLM,
|
||||
TFFunnelForMultipleChoice,
|
||||
TFFunnelForPreTraining,
|
||||
TFFunnelForQuestionAnswering,
|
||||
TFFunnelForSequenceClassification,
|
||||
TFFunnelForTokenClassification,
|
||||
TFFunnelModel,
|
||||
)
|
||||
|
||||
|
||||
class TFFunnelModelTester:
|
||||
"""You can also import this e.g, from .test_modeling_funnel import FunnelModelTester """
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
parent,
|
||||
batch_size=13,
|
||||
seq_length=7,
|
||||
is_training=True,
|
||||
use_input_mask=True,
|
||||
use_token_type_ids=True,
|
||||
use_labels=True,
|
||||
vocab_size=99,
|
||||
block_sizes=[1, 1, 2],
|
||||
num_decoder_layers=1,
|
||||
d_model=32,
|
||||
n_head=4,
|
||||
d_head=8,
|
||||
d_inner=37,
|
||||
hidden_act="gelu_new",
|
||||
hidden_dropout=0.1,
|
||||
attention_dropout=0.1,
|
||||
activation_dropout=0.0,
|
||||
max_position_embeddings=512,
|
||||
type_vocab_size=3,
|
||||
num_labels=3,
|
||||
num_choices=4,
|
||||
scope=None,
|
||||
base=False,
|
||||
):
|
||||
self.parent = parent
|
||||
self.batch_size = batch_size
|
||||
self.seq_length = seq_length
|
||||
self.is_training = is_training
|
||||
self.use_input_mask = use_input_mask
|
||||
self.use_token_type_ids = use_token_type_ids
|
||||
self.use_labels = use_labels
|
||||
self.vocab_size = vocab_size
|
||||
self.block_sizes = block_sizes
|
||||
self.num_decoder_layers = num_decoder_layers
|
||||
self.d_model = d_model
|
||||
self.n_head = n_head
|
||||
self.d_head = d_head
|
||||
self.d_inner = d_inner
|
||||
self.hidden_act = hidden_act
|
||||
self.hidden_dropout = hidden_dropout
|
||||
self.attention_dropout = attention_dropout
|
||||
self.activation_dropout = activation_dropout
|
||||
self.max_position_embeddings = max_position_embeddings
|
||||
self.type_vocab_size = type_vocab_size
|
||||
self.type_sequence_label_size = 2
|
||||
self.num_labels = num_labels
|
||||
self.num_choices = num_choices
|
||||
self.scope = scope
|
||||
|
||||
# Used in the tests to check the size of the first attention layer
|
||||
self.num_attention_heads = n_head
|
||||
# Used in the tests to check the size of the first hidden state
|
||||
self.hidden_size = self.d_model
|
||||
# Used in the tests to check the number of output hidden states/attentions
|
||||
self.num_hidden_layers = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
|
||||
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
|
||||
# the last hidden state of the first block (which is the first hidden state of the decoder).
|
||||
if not base:
|
||||
self.expected_num_hidden_layers = self.num_hidden_layers + 2
|
||||
|
||||
def prepare_config_and_inputs(self):
|
||||
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
|
||||
|
||||
input_mask = None
|
||||
if self.use_input_mask:
|
||||
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
|
||||
|
||||
token_type_ids = None
|
||||
if self.use_token_type_ids:
|
||||
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
|
||||
|
||||
sequence_labels = None
|
||||
token_labels = None
|
||||
choice_labels = None
|
||||
if self.use_labels:
|
||||
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
|
||||
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
|
||||
choice_labels = ids_tensor([self.batch_size], self.num_choices)
|
||||
|
||||
config = FunnelConfig(
|
||||
vocab_size=self.vocab_size,
|
||||
block_sizes=self.block_sizes,
|
||||
num_decoder_layers=self.num_decoder_layers,
|
||||
d_model=self.d_model,
|
||||
n_head=self.n_head,
|
||||
d_head=self.d_head,
|
||||
d_inner=self.d_inner,
|
||||
hidden_act=self.hidden_act,
|
||||
hidden_dropout=self.hidden_dropout,
|
||||
attention_dropout=self.attention_dropout,
|
||||
activation_dropout=self.activation_dropout,
|
||||
max_position_embeddings=self.max_position_embeddings,
|
||||
type_vocab_size=self.type_vocab_size,
|
||||
return_dict=True,
|
||||
)
|
||||
|
||||
return (
|
||||
config,
|
||||
input_ids,
|
||||
token_type_ids,
|
||||
input_mask,
|
||||
sequence_labels,
|
||||
token_labels,
|
||||
choice_labels,
|
||||
)
|
||||
|
||||
def create_and_check_model(
|
||||
self,
|
||||
config,
|
||||
input_ids,
|
||||
token_type_ids,
|
||||
input_mask,
|
||||
sequence_labels,
|
||||
token_labels,
|
||||
choice_labels,
|
||||
):
|
||||
model = TFFunnelModel(config=config)
|
||||
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
|
||||
result = model(inputs)
|
||||
|
||||
inputs = [input_ids, input_mask]
|
||||
result = model(inputs)
|
||||
|
||||
result = model(input_ids)
|
||||
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model))
|
||||
|
||||
config.truncate_seq = False
|
||||
model = TFFunnelModel(config=config)
|
||||
result = model(input_ids)
|
||||
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model))
|
||||
|
||||
config.separate_cls = False
|
||||
model = TFFunnelModel(config=config)
|
||||
result = model(input_ids)
|
||||
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model))
|
||||
|
||||
def create_and_check_base_model(
|
||||
self,
|
||||
config,
|
||||
input_ids,
|
||||
token_type_ids,
|
||||
input_mask,
|
||||
sequence_labels,
|
||||
token_labels,
|
||||
choice_labels,
|
||||
):
|
||||
model = TFFunnelBaseModel(config=config)
|
||||
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
|
||||
result = model(inputs)
|
||||
|
||||
inputs = [input_ids, input_mask]
|
||||
result = model(inputs)
|
||||
|
||||
result = model(input_ids)
|
||||
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model))
|
||||
|
||||
config.truncate_seq = False
|
||||
model = TFFunnelBaseModel(config=config)
|
||||
result = model(input_ids)
|
||||
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 3, self.d_model))
|
||||
|
||||
config.separate_cls = False
|
||||
model = TFFunnelBaseModel(config=config)
|
||||
result = model(input_ids)
|
||||
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model))
|
||||
|
||||
def create_and_check_for_pretraining(
|
||||
self,
|
||||
config,
|
||||
input_ids,
|
||||
token_type_ids,
|
||||
input_mask,
|
||||
sequence_labels,
|
||||
token_labels,
|
||||
choice_labels,
|
||||
):
|
||||
model = TFFunnelForPreTraining(config=config)
|
||||
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
|
||||
result = model(inputs)
|
||||
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length))
|
||||
|
||||
def create_and_check_for_masked_lm(
|
||||
self,
|
||||
config,
|
||||
input_ids,
|
||||
token_type_ids,
|
||||
input_mask,
|
||||
sequence_labels,
|
||||
token_labels,
|
||||
choice_labels,
|
||||
):
|
||||
model = TFFunnelForMaskedLM(config=config)
|
||||
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
|
||||
result = model(inputs)
|
||||
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
|
||||
|
||||
def create_and_check_for_sequence_classification(
|
||||
self,
|
||||
config,
|
||||
input_ids,
|
||||
token_type_ids,
|
||||
input_mask,
|
||||
sequence_labels,
|
||||
token_labels,
|
||||
choice_labels,
|
||||
):
|
||||
config.num_labels = self.num_labels
|
||||
model = TFFunnelForSequenceClassification(config=config)
|
||||
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
|
||||
result = model(inputs)
|
||||
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
|
||||
|
||||
def create_and_check_for_multiple_choice(
|
||||
self,
|
||||
config,
|
||||
input_ids,
|
||||
token_type_ids,
|
||||
input_mask,
|
||||
sequence_labels,
|
||||
token_labels,
|
||||
choice_labels,
|
||||
):
|
||||
config.num_choices = self.num_choices
|
||||
model = TFFunnelForMultipleChoice(config=config)
|
||||
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
|
||||
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
|
||||
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
|
||||
inputs = {
|
||||
"input_ids": multiple_choice_inputs_ids,
|
||||
"attention_mask": multiple_choice_input_mask,
|
||||
"token_type_ids": multiple_choice_token_type_ids,
|
||||
}
|
||||
result = model(inputs)
|
||||
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
|
||||
|
||||
def create_and_check_for_token_classification(
|
||||
self,
|
||||
config,
|
||||
input_ids,
|
||||
token_type_ids,
|
||||
input_mask,
|
||||
sequence_labels,
|
||||
token_labels,
|
||||
choice_labels,
|
||||
):
|
||||
config.num_labels = self.num_labels
|
||||
model = TFFunnelForTokenClassification(config=config)
|
||||
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
|
||||
result = model(inputs)
|
||||
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
|
||||
|
||||
def create_and_check_for_question_answering(
|
||||
self,
|
||||
config,
|
||||
input_ids,
|
||||
token_type_ids,
|
||||
input_mask,
|
||||
sequence_labels,
|
||||
token_labels,
|
||||
choice_labels,
|
||||
):
|
||||
model = TFFunnelForQuestionAnswering(config=config)
|
||||
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
|
||||
result = model(inputs)
|
||||
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
|
||||
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
|
||||
|
||||
def prepare_config_and_inputs_for_common(self):
|
||||
config_and_inputs = self.prepare_config_and_inputs()
|
||||
(
|
||||
config,
|
||||
input_ids,
|
||||
token_type_ids,
|
||||
input_mask,
|
||||
sequence_labels,
|
||||
token_labels,
|
||||
choice_labels,
|
||||
) = config_and_inputs
|
||||
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
|
||||
return config, inputs_dict
|
||||
|
||||
|
||||
@require_tf
|
||||
class FunnelModelTest(TFModelTesterMixin, unittest.TestCase):
|
||||
all_model_classes = (
|
||||
(
|
||||
TFFunnelModel,
|
||||
TFFunnelForMaskedLM,
|
||||
TFFunnelForPreTraining,
|
||||
TFFunnelForQuestionAnswering,
|
||||
TFFunnelForTokenClassification,
|
||||
)
|
||||
if is_tf_available()
|
||||
else ()
|
||||
)
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = TFFunnelModelTester(self)
|
||||
self.config_tester = ConfigTester(self, config_class=FunnelConfig)
|
||||
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
def test_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_model(*config_and_inputs)
|
||||
|
||||
def test_for_pretraining(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
|
||||
|
||||
def test_for_masked_lm(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
|
||||
|
||||
def test_for_token_classification(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
|
||||
|
||||
def test_for_question_answering(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
|
||||
|
||||
|
||||
@require_tf
|
||||
class TFFunnelBaseModelTest(TFModelTesterMixin, unittest.TestCase):
|
||||
all_model_classes = (
|
||||
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
|
||||
)
|
||||
|
||||
def setUp(self):
|
||||
self.model_tester = TFFunnelModelTester(self, base=True)
|
||||
self.config_tester = ConfigTester(self, config_class=FunnelConfig)
|
||||
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
def test_base_model(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_base_model(*config_and_inputs)
|
||||
|
||||
def test_for_sequence_classification(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
|
||||
|
||||
def test_for_multiple_choice(self):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
|
Loading…
Reference in New Issue
Block a user