mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-06 14:20:04 +06:00

* first try * remove old template * finish bart * finish mbart * delete unnecessary line * init pegasus * save intermediate * correct pegasus * finish pegasus * remove cookie cutter leftover * add marian * finish blenderbot * replace in file * correctly split blenderbot * delete "old" folder * correct "add statement" * adapt config for tf comp * correct configs for tf * remove ipdb * fix more stuff * fix mbart * push pegasus fix * fix mbart * more fixes * fix research projects code * finish docs for bart, mbart, and marian * delete unnecessary file * correct attn typo * correct configs * remove pegasus for seq class * correct peg docs * correct peg docs * finish configs * further improve docs * add copied from statements to mbart * fix copied from in mbart * add copy statements to marian * add copied from to marian * add pegasus copied from * finish pegasus * finish copied from * Apply suggestions from code review * make style * backward comp blenderbot * apply lysandres and sylvains suggestions * apply suggestions * push last fixes * fix docs * fix tok tests * fix imports code style * fix doc
38 lines
1.6 KiB
Python
38 lines
1.6 KiB
Python
#!/usr/bin/env python3
|
|
# coding=utf-8
|
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""Tests for Blenderbot Tokenizers, including common tests for BlenderbotSmallTokenizer."""
|
|
import unittest
|
|
|
|
from transformers.file_utils import cached_property
|
|
from transformers.models.blenderbot.tokenization_blenderbot import BlenderbotTokenizer
|
|
|
|
|
|
class Blenderbot3BTokenizerTests(unittest.TestCase):
|
|
@cached_property
|
|
def tokenizer_3b(self):
|
|
return BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
|
|
|
|
def test_encode_decode_cycle(self):
|
|
tok = self.tokenizer_3b
|
|
src_text = " I am a small frog."
|
|
encoded = tok([src_text], padding=False, truncation=False)["input_ids"]
|
|
decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
|
assert src_text == decoded
|
|
|
|
def test_3B_tokenization_same_as_parlai(self):
|
|
assert self.tokenizer_3b.add_prefix_space
|
|
assert self.tokenizer_3b([" Sam", "Sam"]).input_ids == [[5502, 2], [5502, 2]]
|