mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-07 14:50:07 +06:00

* splitting fast and slow tokenizers [WIP] * [WIP] splitting sentencepiece and tokenizers dependencies * update dummy objects * add name_or_path to models and tokenizers * prefix added to file names * prefix * styling + quality * spliting all the tokenizer files - sorting sentencepiece based ones * update tokenizer version up to 0.9.0 * remove hard dependency on sentencepiece 🎉 * and removed hard dependency on tokenizers 🎉 * update conversion script * update missing models * fixing tests * move test_tokenization_fast to main tokenization tests - fix bugs * bump up tokenizers * fix bert_generation * update ad fix several tokenizers * keep sentencepiece in deps for now * fix funnel and deberta tests * fix fsmt * fix marian tests * fix layoutlm * fix squeezebert and gpt2 * fix T5 tokenization * fix xlnet tests * style * fix mbart * bump up tokenizers to 0.9.2 * fix model tests * fix tf models * fix seq2seq examples * fix tests without sentencepiece * fix slow => fast conversion without sentencepiece * update auto and bert generation tests * fix mbart tests * fix auto and common test without tokenizers * fix tests without tokenizers * clean up tests lighten up when tokenizers + sentencepiece are both off * style quality and tests fixing * add sentencepiece to doc/examples reqs * leave sentencepiece on for now * style quality split hebert and fix pegasus * WIP Herbert fast * add sample_text_no_unicode and fix hebert tokenization * skip FSMT example test for now * fix style * fix fsmt in example tests * update following Lysandre and Sylvain's comments * Update src/transformers/testing_utils.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/testing_utils.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/tokenization_utils_base.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/tokenization_utils_base.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
186 lines
8.0 KiB
Python
186 lines
8.0 KiB
Python
import json
|
|
import os
|
|
import unittest
|
|
|
|
from transformers import BartTokenizer, BartTokenizerFast, BatchEncoding
|
|
from transformers.file_utils import cached_property
|
|
from transformers.testing_utils import require_tokenizers, require_torch
|
|
from transformers.tokenization_roberta import VOCAB_FILES_NAMES
|
|
|
|
from .test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
|
|
|
|
|
|
@require_tokenizers
|
|
class TestTokenizationBart(TokenizerTesterMixin, unittest.TestCase):
|
|
tokenizer_class = BartTokenizer
|
|
rust_tokenizer_class = BartTokenizerFast
|
|
test_rust_tokenizer = True
|
|
from_pretrained_filter = filter_roberta_detectors
|
|
# from_pretrained_kwargs = {'add_prefix_space': True}
|
|
|
|
def setUp(self):
|
|
super().setUp()
|
|
vocab = [
|
|
"l",
|
|
"o",
|
|
"w",
|
|
"e",
|
|
"r",
|
|
"s",
|
|
"t",
|
|
"i",
|
|
"d",
|
|
"n",
|
|
"\u0120",
|
|
"\u0120l",
|
|
"\u0120n",
|
|
"\u0120lo",
|
|
"\u0120low",
|
|
"er",
|
|
"\u0120lowest",
|
|
"\u0120newer",
|
|
"\u0120wider",
|
|
"<unk>",
|
|
]
|
|
vocab_tokens = dict(zip(vocab, range(len(vocab))))
|
|
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
|
|
self.special_tokens_map = {"unk_token": "<unk>"}
|
|
|
|
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
|
|
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
|
|
with open(self.vocab_file, "w", encoding="utf-8") as fp:
|
|
fp.write(json.dumps(vocab_tokens) + "\n")
|
|
with open(self.merges_file, "w", encoding="utf-8") as fp:
|
|
fp.write("\n".join(merges))
|
|
|
|
def get_tokenizer(self, **kwargs):
|
|
kwargs.update(self.special_tokens_map)
|
|
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
|
|
|
|
def get_rust_tokenizer(self, **kwargs):
|
|
kwargs.update(self.special_tokens_map)
|
|
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
|
|
|
|
def get_input_output_texts(self, tokenizer):
|
|
return "lower newer", "lower newer"
|
|
|
|
@cached_property
|
|
def default_tokenizer(self):
|
|
return BartTokenizer.from_pretrained("facebook/bart-large")
|
|
|
|
@cached_property
|
|
def default_tokenizer_fast(self):
|
|
return BartTokenizerFast.from_pretrained("facebook/bart-large")
|
|
|
|
@require_torch
|
|
def test_prepare_seq2seq_batch(self):
|
|
src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
|
|
tgt_text = [
|
|
"Summary of the text.",
|
|
"Another summary.",
|
|
]
|
|
expected_src_tokens = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
|
|
|
|
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
|
|
batch = tokenizer.prepare_seq2seq_batch(
|
|
src_text, tgt_texts=tgt_text, max_length=len(expected_src_tokens), return_tensors="pt"
|
|
)
|
|
self.assertIsInstance(batch, BatchEncoding)
|
|
|
|
self.assertEqual((2, 9), batch.input_ids.shape)
|
|
self.assertEqual((2, 9), batch.attention_mask.shape)
|
|
result = batch.input_ids.tolist()[0]
|
|
self.assertListEqual(expected_src_tokens, result)
|
|
# Test that special tokens are reset
|
|
|
|
# Test Prepare Seq
|
|
@require_torch
|
|
def test_seq2seq_batch_empty_target_text(self):
|
|
src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
|
|
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
|
|
batch = tokenizer.prepare_seq2seq_batch(src_text, return_tensors="pt")
|
|
# check if input_ids are returned and no labels
|
|
self.assertIn("input_ids", batch)
|
|
self.assertIn("attention_mask", batch)
|
|
self.assertNotIn("labels", batch)
|
|
self.assertNotIn("decoder_attention_mask", batch)
|
|
|
|
@require_torch
|
|
def test_seq2seq_batch_max_target_length(self):
|
|
src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."]
|
|
tgt_text = [
|
|
"Summary of the text.",
|
|
"Another summary.",
|
|
]
|
|
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
|
|
batch = tokenizer.prepare_seq2seq_batch(
|
|
src_text, tgt_texts=tgt_text, max_target_length=32, padding="max_length", return_tensors="pt"
|
|
)
|
|
self.assertEqual(32, batch["labels"].shape[1])
|
|
|
|
# test None max_target_length
|
|
batch = tokenizer.prepare_seq2seq_batch(
|
|
src_text, tgt_texts=tgt_text, max_length=32, padding="max_length", return_tensors="pt"
|
|
)
|
|
self.assertEqual(32, batch["labels"].shape[1])
|
|
|
|
@require_torch
|
|
def test_seq2seq_batch_not_longer_than_maxlen(self):
|
|
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
|
|
batch = tokenizer.prepare_seq2seq_batch(
|
|
["I am a small frog" * 1024, "I am a small frog"], return_tensors="pt"
|
|
)
|
|
self.assertIsInstance(batch, BatchEncoding)
|
|
self.assertEqual(batch.input_ids.shape, (2, 1024))
|
|
|
|
@require_torch
|
|
def test_special_tokens(self):
|
|
|
|
src_text = ["A long paragraph for summarization."]
|
|
tgt_text = [
|
|
"Summary of the text.",
|
|
]
|
|
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
|
|
batch = tokenizer.prepare_seq2seq_batch(src_text, tgt_texts=tgt_text, return_tensors="pt")
|
|
input_ids = batch["input_ids"]
|
|
labels = batch["labels"]
|
|
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
|
|
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
|
|
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
|
|
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
|
|
|
|
def test_pretokenized_inputs(self):
|
|
pass
|
|
|
|
def test_embeded_special_tokens(self):
|
|
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
|
|
with self.subTest("{} ({})".format(tokenizer.__class__.__name__, pretrained_name)):
|
|
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
|
|
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
|
|
sentence = "A, <mask> AllenNLP sentence."
|
|
tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
|
|
tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
|
|
|
|
# token_type_ids should put 0 everywhere
|
|
self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
|
|
|
|
# attention_mask should put 1 everywhere, so sum over length should be 1
|
|
self.assertEqual(
|
|
sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]),
|
|
sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]),
|
|
)
|
|
|
|
tokens_r_str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
|
|
tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
|
|
|
|
# Rust correctly handles the space before the mask while python doesnt
|
|
self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
|
|
self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
|
|
|
|
self.assertSequenceEqual(
|
|
tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"]
|
|
)
|
|
self.assertSequenceEqual(
|
|
tokens_r_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"]
|
|
)
|