mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-17 03:28:22 +06:00
311 lines
13 KiB
Python
311 lines
13 KiB
Python
# coding=utf-8
|
|
# Copyright 2018 The Google AI Language Team Authors.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
from __future__ import absolute_import
|
|
from __future__ import division
|
|
from __future__ import print_function
|
|
|
|
import os
|
|
import unittest
|
|
import json
|
|
import random
|
|
import shutil
|
|
import pytest
|
|
|
|
import torch
|
|
|
|
from pytorch_transformers import (XLNetConfig, XLNetModel, XLNetLMHeadModel, XLNetForSequenceClassification, XLNetForQuestionAnswering)
|
|
from pytorch_transformers.modeling_xlnet import XLNET_PRETRAINED_MODEL_ARCHIVE_MAP
|
|
|
|
from .modeling_tests_commons import ConfigTester, create_and_check_commons, ids_tensor
|
|
|
|
class XLNetModelTest(unittest.TestCase):
|
|
class XLNetModelTester(object):
|
|
|
|
def __init__(self,
|
|
parent,
|
|
batch_size=13,
|
|
seq_length=7,
|
|
mem_len=10,
|
|
clamp_len=-1,
|
|
reuse_len=15,
|
|
is_training=True,
|
|
use_labels=True,
|
|
vocab_size=99,
|
|
cutoffs=[10, 50, 80],
|
|
hidden_size=32,
|
|
num_attention_heads=4,
|
|
d_inner=128,
|
|
num_hidden_layers=5,
|
|
max_position_embeddings=10,
|
|
type_sequence_label_size=2,
|
|
untie_r=True,
|
|
bi_data=False,
|
|
same_length=False,
|
|
initializer_range=0.05,
|
|
seed=1,
|
|
type_vocab_size=2,
|
|
all_model_classes=(XLNetModel, XLNetLMHeadModel,
|
|
XLNetForSequenceClassification, XLNetForQuestionAnswering),
|
|
):
|
|
self.parent = parent
|
|
self.batch_size = batch_size
|
|
self.seq_length = seq_length
|
|
self.mem_len = mem_len
|
|
# self.key_len = seq_length + mem_len
|
|
self.clamp_len = clamp_len
|
|
self.reuse_len = reuse_len
|
|
self.is_training = is_training
|
|
self.use_labels = use_labels
|
|
self.vocab_size = vocab_size
|
|
self.cutoffs = cutoffs
|
|
self.hidden_size = hidden_size
|
|
self.num_attention_heads = num_attention_heads
|
|
self.d_inner = d_inner
|
|
self.num_hidden_layers = num_hidden_layers
|
|
self.max_position_embeddings = max_position_embeddings
|
|
self.bi_data = bi_data
|
|
self.untie_r = untie_r
|
|
self.same_length = same_length
|
|
self.initializer_range = initializer_range
|
|
self.seed = seed
|
|
self.type_vocab_size = type_vocab_size
|
|
self.type_sequence_label_size = type_sequence_label_size
|
|
self.all_model_classes = all_model_classes
|
|
|
|
def prepare_config_and_inputs(self):
|
|
input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
|
|
input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
|
|
segment_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
|
|
input_mask = ids_tensor([self.batch_size, self.seq_length], 2).float()
|
|
|
|
input_ids_q = ids_tensor([self.batch_size, self.seq_length + 1], self.vocab_size)
|
|
perm_mask = torch.zeros(self.batch_size, self.seq_length + 1, self.seq_length + 1, dtype=torch.float)
|
|
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
|
|
target_mapping = torch.zeros(self.batch_size, 1, self.seq_length + 1, dtype=torch.float)
|
|
target_mapping[:, 0, -1] = 1.0 # predict last token
|
|
inp_q = target_mapping[:, 0, :].clone() # predict last token
|
|
|
|
sequence_labels = None
|
|
lm_labels = None
|
|
is_impossible_labels = None
|
|
if self.use_labels:
|
|
lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
|
|
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
|
|
is_impossible_labels = ids_tensor([self.batch_size], 2).float()
|
|
|
|
config = XLNetConfig(
|
|
vocab_size_or_config_json_file=self.vocab_size,
|
|
d_model=self.hidden_size,
|
|
n_head=self.num_attention_heads,
|
|
d_inner=self.d_inner,
|
|
n_layer=self.num_hidden_layers,
|
|
untie_r=self.untie_r,
|
|
max_position_embeddings=self.max_position_embeddings,
|
|
mem_len=self.mem_len,
|
|
clamp_len=self.clamp_len,
|
|
same_length=self.same_length,
|
|
reuse_len=self.reuse_len,
|
|
bi_data=self.bi_data,
|
|
initializer_range=self.initializer_range,
|
|
num_labels=self.type_sequence_label_size)
|
|
|
|
return (config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask,
|
|
target_mapping, inp_q, segment_ids, lm_labels, sequence_labels, is_impossible_labels)
|
|
|
|
def set_seed(self):
|
|
random.seed(self.seed)
|
|
torch.manual_seed(self.seed)
|
|
|
|
def create_and_check_xlnet_base_model(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask,
|
|
target_mapping, inp_q, segment_ids, lm_labels, sequence_labels, is_impossible_labels):
|
|
model = XLNetModel(config)
|
|
model.eval()
|
|
|
|
_, _ = model(input_ids_1, input_mask=input_mask)
|
|
_, _ = model(input_ids_1, attention_mask=input_mask)
|
|
_, _ = model(input_ids_1, token_type_ids=segment_ids)
|
|
outputs, mems_1 = model(input_ids_1)
|
|
|
|
result = {
|
|
"mems_1": mems_1,
|
|
"outputs": outputs,
|
|
}
|
|
|
|
self.parent.assertListEqual(
|
|
list(result["outputs"].size()),
|
|
[self.batch_size, self.seq_length, self.hidden_size])
|
|
self.parent.assertListEqual(
|
|
list(list(mem.size()) for mem in result["mems_1"]),
|
|
[[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers)
|
|
|
|
def create_and_check_xlnet_lm_head(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask,
|
|
target_mapping, inp_q, segment_ids, lm_labels, sequence_labels, is_impossible_labels):
|
|
model = XLNetLMHeadModel(config)
|
|
model.eval()
|
|
|
|
loss_1, all_logits_1, mems_1 = model(input_ids_1, token_type_ids=segment_ids, labels=lm_labels)
|
|
|
|
loss_2, all_logits_2, mems_2 = model(input_ids_2, token_type_ids=segment_ids, labels=lm_labels, mems=mems_1)
|
|
|
|
logits, _ = model(input_ids_q, perm_mask=perm_mask, target_mapping=target_mapping, inp_q=inp_q)
|
|
|
|
result = {
|
|
"loss_1": loss_1,
|
|
"mems_1": mems_1,
|
|
"all_logits_1": all_logits_1,
|
|
"loss_2": loss_2,
|
|
"mems_2": mems_2,
|
|
"all_logits_2": all_logits_2,
|
|
}
|
|
|
|
self.parent.assertListEqual(
|
|
list(result["loss_1"].size()),
|
|
[])
|
|
self.parent.assertListEqual(
|
|
list(result["all_logits_1"].size()),
|
|
[self.batch_size, self.seq_length, self.vocab_size])
|
|
self.parent.assertListEqual(
|
|
list(list(mem.size()) for mem in result["mems_1"]),
|
|
[[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers)
|
|
|
|
self.parent.assertListEqual(
|
|
list(result["loss_2"].size()),
|
|
[])
|
|
self.parent.assertListEqual(
|
|
list(result["all_logits_2"].size()),
|
|
[self.batch_size, self.seq_length, self.vocab_size])
|
|
self.parent.assertListEqual(
|
|
list(list(mem.size()) for mem in result["mems_2"]),
|
|
[[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers)
|
|
|
|
def create_and_check_xlnet_qa(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask,
|
|
target_mapping, inp_q, segment_ids, lm_labels, sequence_labels, is_impossible_labels):
|
|
model = XLNetForQuestionAnswering(config)
|
|
model.eval()
|
|
|
|
outputs = model(input_ids_1)
|
|
start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits, mems = outputs
|
|
|
|
outputs = model(input_ids_1, start_positions=sequence_labels,
|
|
end_positions=sequence_labels,
|
|
cls_index=sequence_labels,
|
|
is_impossible=is_impossible_labels,
|
|
p_mask=input_mask)
|
|
|
|
outputs = model(input_ids_1, start_positions=sequence_labels,
|
|
end_positions=sequence_labels,
|
|
cls_index=sequence_labels,
|
|
is_impossible=is_impossible_labels)
|
|
|
|
total_loss, start_logits, end_logits, cls_logits, mems = outputs
|
|
|
|
outputs = model(input_ids_1, start_positions=sequence_labels,
|
|
end_positions=sequence_labels)
|
|
|
|
total_loss, start_logits, end_logits, mems = outputs
|
|
|
|
result = {
|
|
"loss": total_loss,
|
|
"start_logits": start_logits,
|
|
"end_logits": end_logits,
|
|
"cls_logits": cls_logits,
|
|
"mems": mems,
|
|
}
|
|
|
|
self.parent.assertListEqual(
|
|
list(result["loss"].size()),
|
|
[])
|
|
self.parent.assertListEqual(
|
|
list(result["start_logits"].size()),
|
|
[self.batch_size, self.seq_length])
|
|
self.parent.assertListEqual(
|
|
list(result["end_logits"].size()),
|
|
[self.batch_size, self.seq_length])
|
|
self.parent.assertListEqual(
|
|
list(result["cls_logits"].size()),
|
|
[self.batch_size])
|
|
self.parent.assertListEqual(
|
|
list(list(mem.size()) for mem in result["mems"]),
|
|
[[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers)
|
|
|
|
def create_and_check_xlnet_sequence_classif(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask,
|
|
target_mapping, inp_q, segment_ids, lm_labels, sequence_labels, is_impossible_labels):
|
|
model = XLNetForSequenceClassification(config)
|
|
model.eval()
|
|
|
|
logits, mems_1 = model(input_ids_1)
|
|
loss, logits, mems_1 = model(input_ids_1, labels=sequence_labels)
|
|
|
|
result = {
|
|
"loss": loss,
|
|
"mems_1": mems_1,
|
|
"logits": logits,
|
|
}
|
|
|
|
self.parent.assertListEqual(
|
|
list(result["loss"].size()),
|
|
[])
|
|
self.parent.assertListEqual(
|
|
list(result["logits"].size()),
|
|
[self.batch_size, self.type_sequence_label_size])
|
|
self.parent.assertListEqual(
|
|
list(list(mem.size()) for mem in result["mems_1"]),
|
|
[[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers)
|
|
|
|
def create_and_check_xlnet_commons(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask,
|
|
target_mapping, inp_q, segment_ids, lm_labels, sequence_labels, is_impossible_labels):
|
|
inputs_dict = {'input_ids': input_ids_1}
|
|
create_and_check_commons(self, config, inputs_dict, test_pruning=False)
|
|
|
|
def test_default(self):
|
|
self.run_tester(XLNetModelTest.XLNetModelTester(self))
|
|
|
|
def test_config(self):
|
|
config_tester = ConfigTester(self, config_class=XLNetConfig, d_inner=37)
|
|
config_tester.run_common_tests()
|
|
|
|
@pytest.mark.slow
|
|
def test_model_from_pretrained(self):
|
|
cache_dir = "/tmp/pytorch_transformers_test/"
|
|
for model_name in list(XLNET_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
|
|
model = XLNetModel.from_pretrained(model_name, cache_dir=cache_dir)
|
|
shutil.rmtree(cache_dir)
|
|
self.assertIsNotNone(model)
|
|
|
|
def run_tester(self, tester):
|
|
tester.set_seed()
|
|
config_and_inputs = tester.prepare_config_and_inputs()
|
|
tester.create_and_check_xlnet_base_model(*config_and_inputs)
|
|
|
|
tester.set_seed()
|
|
config_and_inputs = tester.prepare_config_and_inputs()
|
|
tester.create_and_check_xlnet_lm_head(*config_and_inputs)
|
|
|
|
tester.set_seed()
|
|
config_and_inputs = tester.prepare_config_and_inputs()
|
|
tester.create_and_check_xlnet_sequence_classif(*config_and_inputs)
|
|
|
|
tester.set_seed()
|
|
config_and_inputs = tester.prepare_config_and_inputs()
|
|
tester.create_and_check_xlnet_qa(*config_and_inputs)
|
|
|
|
tester.set_seed()
|
|
config_and_inputs = tester.prepare_config_and_inputs()
|
|
tester.create_and_check_xlnet_commons(*config_and_inputs)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
unittest.main()
|