mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-04 05:10:06 +06:00

* splitting fast and slow tokenizers [WIP] * [WIP] splitting sentencepiece and tokenizers dependencies * update dummy objects * add name_or_path to models and tokenizers * prefix added to file names * prefix * styling + quality * spliting all the tokenizer files - sorting sentencepiece based ones * update tokenizer version up to 0.9.0 * remove hard dependency on sentencepiece 🎉 * and removed hard dependency on tokenizers 🎉 * update conversion script * update missing models * fixing tests * move test_tokenization_fast to main tokenization tests - fix bugs * bump up tokenizers * fix bert_generation * update ad fix several tokenizers * keep sentencepiece in deps for now * fix funnel and deberta tests * fix fsmt * fix marian tests * fix layoutlm * fix squeezebert and gpt2 * fix T5 tokenization * fix xlnet tests * style * fix mbart * bump up tokenizers to 0.9.2 * fix model tests * fix tf models * fix seq2seq examples * fix tests without sentencepiece * fix slow => fast conversion without sentencepiece * update auto and bert generation tests * fix mbart tests * fix auto and common test without tokenizers * fix tests without tokenizers * clean up tests lighten up when tokenizers + sentencepiece are both off * style quality and tests fixing * add sentencepiece to doc/examples reqs * leave sentencepiece on for now * style quality split hebert and fix pegasus * WIP Herbert fast * add sample_text_no_unicode and fix hebert tokenization * skip FSMT example test for now * fix style * fix fsmt in example tests * update following Lysandre and Sylvain's comments * Update src/transformers/testing_utils.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/testing_utils.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/tokenization_utils_base.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/tokenization_utils_base.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
90 lines
3.4 KiB
Python
90 lines
3.4 KiB
Python
# coding=utf-8
|
|
# Copyright 2020 Huggingface
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
|
|
from transformers import (
|
|
DPRContextEncoderTokenizer,
|
|
DPRContextEncoderTokenizerFast,
|
|
DPRQuestionEncoderTokenizer,
|
|
DPRQuestionEncoderTokenizerFast,
|
|
DPRReaderOutput,
|
|
DPRReaderTokenizer,
|
|
DPRReaderTokenizerFast,
|
|
)
|
|
from transformers.testing_utils import require_tokenizers, slow
|
|
from transformers.tokenization_utils_base import BatchEncoding
|
|
|
|
from .test_tokenization_bert import BertTokenizationTest
|
|
|
|
|
|
@require_tokenizers
|
|
class DPRContextEncoderTokenizationTest(BertTokenizationTest):
|
|
|
|
tokenizer_class = DPRContextEncoderTokenizer
|
|
rust_tokenizer_class = DPRContextEncoderTokenizerFast
|
|
test_rust_tokenizer = True
|
|
|
|
|
|
@require_tokenizers
|
|
class DPRQuestionEncoderTokenizationTest(BertTokenizationTest):
|
|
|
|
tokenizer_class = DPRQuestionEncoderTokenizer
|
|
rust_tokenizer_class = DPRQuestionEncoderTokenizerFast
|
|
test_rust_tokenizer = True
|
|
|
|
|
|
@require_tokenizers
|
|
class DPRReaderTokenizationTest(BertTokenizationTest):
|
|
|
|
tokenizer_class = DPRReaderTokenizer
|
|
rust_tokenizer_class = DPRReaderTokenizerFast
|
|
test_rust_tokenizer = True
|
|
|
|
@slow
|
|
def test_decode_best_spans(self):
|
|
tokenizer = self.tokenizer_class.from_pretrained("bert-base-uncased")
|
|
|
|
text_1 = tokenizer.encode("question sequence", add_special_tokens=False)
|
|
text_2 = tokenizer.encode("title sequence", add_special_tokens=False)
|
|
text_3 = tokenizer.encode("text sequence " * 4, add_special_tokens=False)
|
|
input_ids = [[101] + text_1 + [102] + text_2 + [102] + text_3]
|
|
reader_input = BatchEncoding({"input_ids": input_ids})
|
|
|
|
start_logits = [[0] * len(input_ids[0])]
|
|
end_logits = [[0] * len(input_ids[0])]
|
|
relevance_logits = [0]
|
|
reader_output = DPRReaderOutput(start_logits, end_logits, relevance_logits)
|
|
|
|
start_index, end_index = 8, 9
|
|
start_logits[0][start_index] = 10
|
|
end_logits[0][end_index] = 10
|
|
predicted_spans = tokenizer.decode_best_spans(reader_input, reader_output)
|
|
self.assertEqual(predicted_spans[0].start_index, start_index)
|
|
self.assertEqual(predicted_spans[0].end_index, end_index)
|
|
self.assertEqual(predicted_spans[0].doc_id, 0)
|
|
|
|
@slow
|
|
def test_call(self):
|
|
tokenizer = self.tokenizer_class.from_pretrained("bert-base-uncased")
|
|
|
|
text_1 = tokenizer.encode("question sequence", add_special_tokens=False)
|
|
text_2 = tokenizer.encode("title sequence", add_special_tokens=False)
|
|
text_3 = tokenizer.encode("text sequence", add_special_tokens=False)
|
|
expected_input_ids = [101] + text_1 + [102] + text_2 + [102] + text_3
|
|
encoded_input = tokenizer(questions=["question sequence"], titles=["title sequence"], texts=["text sequence"])
|
|
self.assertIn("input_ids", encoded_input)
|
|
self.assertIn("attention_mask", encoded_input)
|
|
self.assertListEqual(encoded_input["input_ids"][0], expected_input_ids)
|