mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-02 19:21:31 +06:00

* Update tokenization_utils_base.py * add assertion * check batch len * Update src/transformers/tokenization_utils_base.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * add error message Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
3296 lines
155 KiB
Python
3296 lines
155 KiB
Python
# coding=utf-8
|
|
# Copyright 2020 The HuggingFace Inc. team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""
|
|
Base classes common to both the slow and the fast tokenization classes: PreTrainedTokenizerBase (host all the user
|
|
fronting encoding methods) Special token mixing (host the special tokens logic) and BatchEncoding (wrap the dictionary
|
|
of output with special method for the Fast tokenizers)
|
|
"""
|
|
|
|
import copy
|
|
import json
|
|
import os
|
|
import warnings
|
|
from collections import OrderedDict, UserDict
|
|
from contextlib import contextmanager
|
|
from dataclasses import dataclass, field
|
|
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
|
|
|
|
import numpy as np
|
|
|
|
import requests
|
|
|
|
from .file_utils import (
|
|
ExplicitEnum,
|
|
PaddingStrategy,
|
|
PushToHubMixin,
|
|
TensorType,
|
|
_is_jax,
|
|
_is_numpy,
|
|
_is_tensorflow,
|
|
_is_torch,
|
|
_is_torch_device,
|
|
add_end_docstrings,
|
|
cached_path,
|
|
hf_bucket_url,
|
|
is_flax_available,
|
|
is_offline_mode,
|
|
is_remote_url,
|
|
is_tf_available,
|
|
is_tokenizers_available,
|
|
is_torch_available,
|
|
to_py_obj,
|
|
torch_required,
|
|
)
|
|
from .utils import logging
|
|
|
|
|
|
if TYPE_CHECKING:
|
|
if is_torch_available():
|
|
import torch
|
|
if is_tf_available():
|
|
import tensorflow as tf
|
|
if is_flax_available():
|
|
import jax.numpy as jnp # noqa: F401
|
|
|
|
|
|
if is_tokenizers_available():
|
|
from tokenizers import AddedToken
|
|
from tokenizers import Encoding as EncodingFast
|
|
else:
|
|
|
|
@dataclass(frozen=True, eq=True)
|
|
class AddedToken:
|
|
"""
|
|
AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the
|
|
way it should behave.
|
|
"""
|
|
|
|
content: str = field(default_factory=str)
|
|
single_word: bool = False
|
|
lstrip: bool = False
|
|
rstrip: bool = False
|
|
normalized: bool = True
|
|
|
|
def __getstate__(self):
|
|
return self.__dict__
|
|
|
|
@dataclass
|
|
class EncodingFast:
|
|
"""This is dummy class because without the `tokenizers` library we don't have these objects anyway"""
|
|
|
|
pass
|
|
|
|
|
|
logger = logging.get_logger(__name__)
|
|
|
|
VERY_LARGE_INTEGER = int(1e30) # This is used to set the max input length for a model with infinite size input
|
|
LARGE_INTEGER = int(1e20) # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER
|
|
|
|
# Define type aliases and NamedTuples
|
|
TextInput = str
|
|
PreTokenizedInput = List[str]
|
|
EncodedInput = List[int]
|
|
TextInputPair = Tuple[str, str]
|
|
PreTokenizedInputPair = Tuple[List[str], List[str]]
|
|
EncodedInputPair = Tuple[List[int], List[int]]
|
|
|
|
|
|
# Slow tokenizers used to be saved in three separated files
|
|
SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
|
|
ADDED_TOKENS_FILE = "added_tokens.json"
|
|
TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
|
|
|
|
# Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file
|
|
FULL_TOKENIZER_FILE = "tokenizer.json"
|
|
|
|
|
|
class TruncationStrategy(ExplicitEnum):
|
|
"""
|
|
Possible values for the ``truncation`` argument in :meth:`PreTrainedTokenizerBase.__call__`. Useful for
|
|
tab-completion in an IDE.
|
|
"""
|
|
|
|
ONLY_FIRST = "only_first"
|
|
ONLY_SECOND = "only_second"
|
|
LONGEST_FIRST = "longest_first"
|
|
DO_NOT_TRUNCATE = "do_not_truncate"
|
|
|
|
|
|
class CharSpan(NamedTuple):
|
|
"""
|
|
Character span in the original string.
|
|
|
|
Args:
|
|
start (:obj:`int`): Index of the first character in the original string.
|
|
end (:obj:`int`): Index of the character following the last character in the original string.
|
|
"""
|
|
|
|
start: int
|
|
end: int
|
|
|
|
|
|
class TokenSpan(NamedTuple):
|
|
"""
|
|
Token span in an encoded string (list of tokens).
|
|
|
|
Args:
|
|
start (:obj:`int`): Index of the first token in the span.
|
|
end (:obj:`int`): Index of the token following the last token in the span.
|
|
"""
|
|
|
|
start: int
|
|
end: int
|
|
|
|
|
|
class BatchEncoding(UserDict):
|
|
"""
|
|
Holds the output of the :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.encode_plus` and
|
|
:meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.batch_encode` methods (tokens,
|
|
attention_masks, etc).
|
|
|
|
This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes
|
|
utility methods to map from word/character space to token space.
|
|
|
|
Args:
|
|
data (:obj:`dict`):
|
|
Dictionary of lists/arrays/tensors returned by the encode/batch_encode methods ('input_ids',
|
|
'attention_mask', etc.).
|
|
encoding (:obj:`tokenizers.Encoding` or :obj:`Sequence[tokenizers.Encoding]`, `optional`):
|
|
If the tokenizer is a fast tokenizer which outputs additional information like mapping from word/character
|
|
space to token space the :obj:`tokenizers.Encoding` instance or list of instance (for batches) hold this
|
|
information.
|
|
tensor_type (:obj:`Union[None, str, TensorType]`, `optional`):
|
|
You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
|
|
initialization.
|
|
prepend_batch_axis (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
|
Whether or not to add a batch axis when converting to tensors (see :obj:`tensor_type` above).
|
|
n_sequences (:obj:`Optional[int]`, `optional`):
|
|
You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
|
|
initialization.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
data: Optional[Dict[str, Any]] = None,
|
|
encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None,
|
|
tensor_type: Union[None, str, TensorType] = None,
|
|
prepend_batch_axis: bool = False,
|
|
n_sequences: Optional[int] = None,
|
|
):
|
|
super().__init__(data)
|
|
|
|
if isinstance(encoding, EncodingFast):
|
|
encoding = [encoding]
|
|
|
|
self._encodings = encoding
|
|
|
|
if n_sequences is None and encoding is not None and len(encoding):
|
|
n_sequences = encoding[0].n_sequences
|
|
|
|
self._n_sequences = n_sequences
|
|
|
|
self.convert_to_tensors(tensor_type=tensor_type, prepend_batch_axis=prepend_batch_axis)
|
|
|
|
@property
|
|
def n_sequences(self) -> Optional[int]:
|
|
"""
|
|
:obj:`Optional[int]`: The number of sequences used to generate each sample from the batch encoded in this
|
|
:class:`~transformers.BatchEncoding`. Currently can be one of :obj:`None` (unknown), :obj:`1` (a single
|
|
sentence) or :obj:`2` (a pair of sentences)
|
|
"""
|
|
return self._n_sequences
|
|
|
|
@property
|
|
def is_fast(self) -> bool:
|
|
"""
|
|
:obj:`bool`: Indicate whether this :class:`~transformers.BatchEncoding` was generated from the result of a
|
|
:class:`~transformers.PreTrainedTokenizerFast` or not.
|
|
"""
|
|
return self._encodings is not None
|
|
|
|
def __getitem__(self, item: Union[int, str]) -> Union[Any, EncodingFast]:
|
|
"""
|
|
If the key is a string, returns the value of the dict associated to :obj:`key` ('input_ids', 'attention_mask',
|
|
etc.).
|
|
|
|
If the key is an integer, get the :obj:`tokenizers.Encoding` for batch item with index :obj:`key`.
|
|
"""
|
|
if isinstance(item, str):
|
|
return self.data[item]
|
|
elif self._encodings is not None:
|
|
return self._encodings[item]
|
|
else:
|
|
raise KeyError(
|
|
"Indexing with integers (to access backend Encoding for a given batch index) "
|
|
"is not available when using Python based tokenizers"
|
|
)
|
|
|
|
def __getattr__(self, item: str):
|
|
try:
|
|
return self.data[item]
|
|
except KeyError:
|
|
raise AttributeError
|
|
|
|
def __getstate__(self):
|
|
return {"data": self.data, "encodings": self._encodings}
|
|
|
|
def __setstate__(self, state):
|
|
if "data" in state:
|
|
self.data = state["data"]
|
|
|
|
if "encodings" in state:
|
|
self._encodings = state["encodings"]
|
|
|
|
def keys(self):
|
|
return self.data.keys()
|
|
|
|
def values(self):
|
|
return self.data.values()
|
|
|
|
def items(self):
|
|
return self.data.items()
|
|
|
|
# After this point:
|
|
# Extended properties and methods only available for fast (Rust-based) tokenizers
|
|
# provided by HuggingFace tokenizers library.
|
|
|
|
@property
|
|
def encodings(self) -> Optional[List[EncodingFast]]:
|
|
"""
|
|
:obj:`Optional[List[tokenizers.Encoding]]`: The list all encodings from the tokenization process. Returns
|
|
:obj:`None` if the input was tokenized through Python (i.e., not a fast) tokenizer.
|
|
"""
|
|
return self._encodings
|
|
|
|
def tokens(self, batch_index: int = 0) -> List[str]:
|
|
"""
|
|
Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to
|
|
integer indices) at a given batch index (only works for the output of a fast tokenizer).
|
|
|
|
Args:
|
|
batch_index (:obj:`int`, `optional`, defaults to 0): The index to access in the batch.
|
|
|
|
Returns:
|
|
:obj:`List[str]`: The list of tokens at that index.
|
|
"""
|
|
if not self._encodings:
|
|
raise ValueError("tokens() is not available when using Python-based tokenizers")
|
|
return self._encodings[batch_index].tokens
|
|
|
|
def sequence_ids(self, batch_index: int = 0) -> List[Optional[int]]:
|
|
"""
|
|
Return a list mapping the tokens to the id of their original sentences:
|
|
|
|
- :obj:`None` for special tokens added around or between sequences,
|
|
- :obj:`0` for tokens corresponding to words in the first sequence,
|
|
- :obj:`1` for tokens corresponding to words in the second sequence when a pair of sequences was jointly
|
|
encoded.
|
|
|
|
Args:
|
|
batch_index (:obj:`int`, `optional`, defaults to 0): The index to access in the batch.
|
|
|
|
Returns:
|
|
:obj:`List[Optional[int]]`: A list indicating the sequence id corresponding to each token. Special tokens
|
|
added by the tokenizer are mapped to :obj:`None` and other tokens are mapped to the index of their
|
|
corresponding sequence.
|
|
"""
|
|
if not self._encodings:
|
|
raise ValueError("sequence_ids() is not available when using Python-based tokenizers")
|
|
return self._encodings[batch_index].sequence_ids
|
|
|
|
def words(self, batch_index: int = 0) -> List[Optional[int]]:
|
|
"""
|
|
Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
|
|
|
|
Args:
|
|
batch_index (:obj:`int`, `optional`, defaults to 0): The index to access in the batch.
|
|
|
|
Returns:
|
|
:obj:`List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by
|
|
the tokenizer are mapped to :obj:`None` and other tokens are mapped to the index of their corresponding
|
|
word (several tokens will be mapped to the same word index if they are parts of that word).
|
|
"""
|
|
if not self._encodings:
|
|
raise ValueError("words() is not available when using Python-based tokenizers")
|
|
warnings.warn(
|
|
"`BatchEncoding.words()` property is deprecated and should be replaced with the identical, "
|
|
"but more self-explanatory `BatchEncoding.word_ids()` property.",
|
|
FutureWarning,
|
|
)
|
|
return self.word_ids(batch_index)
|
|
|
|
def word_ids(self, batch_index: int = 0) -> List[Optional[int]]:
|
|
"""
|
|
Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.
|
|
|
|
Args:
|
|
batch_index (:obj:`int`, `optional`, defaults to 0): The index to access in the batch.
|
|
|
|
Returns:
|
|
:obj:`List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by
|
|
the tokenizer are mapped to :obj:`None` and other tokens are mapped to the index of their corresponding
|
|
word (several tokens will be mapped to the same word index if they are parts of that word).
|
|
"""
|
|
if not self._encodings:
|
|
raise ValueError("word_ids() is not available when using Python-based tokenizers")
|
|
return self._encodings[batch_index].word_ids
|
|
|
|
def token_to_sequence(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
|
|
"""
|
|
Get the index of the sequence represented by the given token. In the general use case, this method returns
|
|
:obj:`0` for a single sequence or the first sequence of a pair, and :obj:`1` for the second sequence of a pair
|
|
|
|
Can be called as:
|
|
|
|
- ``self.token_to_sequence(token_index)`` if batch size is 1
|
|
- ``self.token_to_sequence(batch_index, token_index)`` if batch size is greater than 1
|
|
|
|
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
|
|
words are defined by the user). In this case it allows to easily associate encoded tokens with provided
|
|
tokenized words.
|
|
|
|
Args:
|
|
batch_or_token_index (:obj:`int`):
|
|
Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
|
|
the token in the sequence.
|
|
token_index (:obj:`int`, `optional`):
|
|
If a batch index is provided in `batch_or_token_index`, this can be the index of the token in the
|
|
sequence.
|
|
|
|
Returns:
|
|
:obj:`int`: Index of the word in the input sequence.
|
|
"""
|
|
|
|
if not self._encodings:
|
|
raise ValueError("token_to_sequence() is not available when using Python based tokenizers")
|
|
if token_index is not None:
|
|
batch_index = batch_or_token_index
|
|
else:
|
|
batch_index = 0
|
|
token_index = batch_or_token_index
|
|
if batch_index < 0:
|
|
batch_index = self._batch_size + batch_index
|
|
if token_index < 0:
|
|
token_index = self._seq_len + token_index
|
|
return self._encodings[batch_index].token_to_sequence(token_index)
|
|
|
|
def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int:
|
|
"""
|
|
Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch.
|
|
|
|
Can be called as:
|
|
|
|
- ``self.token_to_word(token_index)`` if batch size is 1
|
|
- ``self.token_to_word(batch_index, token_index)`` if batch size is greater than 1
|
|
|
|
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
|
|
words are defined by the user). In this case it allows to easily associate encoded tokens with provided
|
|
tokenized words.
|
|
|
|
Args:
|
|
batch_or_token_index (:obj:`int`):
|
|
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
|
|
the token in the sequence.
|
|
token_index (:obj:`int`, `optional`):
|
|
If a batch index is provided in `batch_or_token_index`, this can be the index of the token in the
|
|
sequence.
|
|
|
|
Returns:
|
|
:obj:`int`: Index of the word in the input sequence.
|
|
"""
|
|
|
|
if not self._encodings:
|
|
raise ValueError("token_to_word() is not available when using Python based tokenizers")
|
|
if token_index is not None:
|
|
batch_index = batch_or_token_index
|
|
else:
|
|
batch_index = 0
|
|
token_index = batch_or_token_index
|
|
if batch_index < 0:
|
|
batch_index = self._batch_size + batch_index
|
|
if token_index < 0:
|
|
token_index = self._seq_len + token_index
|
|
return self._encodings[batch_index].token_to_word(token_index)
|
|
|
|
def word_to_tokens(
|
|
self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0
|
|
) -> Optional[TokenSpan]:
|
|
"""
|
|
Get the encoded token span corresponding to a word in a sequence of the batch.
|
|
|
|
Token spans are returned as a :class:`~transformers.tokenization_utils_base.TokenSpan` with:
|
|
|
|
- **start** -- Index of the first token.
|
|
- **end** -- Index of the token following the last token.
|
|
|
|
Can be called as:
|
|
|
|
- ``self.word_to_tokens(word_index, sequence_index: int = 0)`` if batch size is 1
|
|
- ``self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)`` if batch size is greater or equal
|
|
to 1
|
|
|
|
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
|
|
are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
|
|
words.
|
|
|
|
Args:
|
|
batch_or_word_index (:obj:`int`):
|
|
Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
|
|
the word in the sequence.
|
|
word_index (:obj:`int`, `optional`):
|
|
If a batch index is provided in `batch_or_token_index`, this can be the index of the word in the
|
|
sequence.
|
|
sequence_index (:obj:`int`, `optional`, defaults to 0):
|
|
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
|
|
or 1) the provided word index belongs to.
|
|
|
|
Returns:
|
|
Optional :class:`~transformers.tokenization_utils_base.TokenSpan` Span of tokens in the encoded sequence.
|
|
Returns :obj:`None` if no tokens correspond to the word.
|
|
"""
|
|
|
|
if not self._encodings:
|
|
raise ValueError("word_to_tokens() is not available when using Python based tokenizers")
|
|
if word_index is not None:
|
|
batch_index = batch_or_word_index
|
|
else:
|
|
batch_index = 0
|
|
word_index = batch_or_word_index
|
|
if batch_index < 0:
|
|
batch_index = self._batch_size + batch_index
|
|
if word_index < 0:
|
|
word_index = self._seq_len + word_index
|
|
span = self._encodings[batch_index].word_to_tokens(word_index, sequence_index)
|
|
return TokenSpan(*span) if span is not None else None
|
|
|
|
def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> CharSpan:
|
|
"""
|
|
Get the character span corresponding to an encoded token in a sequence of the batch.
|
|
|
|
Character spans are returned as a :class:`~transformers.tokenization_utils_base.CharSpan` with:
|
|
|
|
- **start** -- Index of the first character in the original string associated to the token.
|
|
- **end** -- Index of the character following the last character in the original string associated to the
|
|
token.
|
|
|
|
Can be called as:
|
|
|
|
- ``self.token_to_chars(token_index)`` if batch size is 1
|
|
- ``self.token_to_chars(batch_index, token_index)`` if batch size is greater or equal to 1
|
|
|
|
Args:
|
|
batch_or_token_index (:obj:`int`):
|
|
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
|
|
the token in the sequence.
|
|
token_index (:obj:`int`, `optional`):
|
|
If a batch index is provided in `batch_or_token_index`, this can be the index of the token or tokens in
|
|
the sequence.
|
|
|
|
Returns:
|
|
:class:`~transformers.tokenization_utils_base.CharSpan`: Span of characters in the original string.
|
|
"""
|
|
|
|
if not self._encodings:
|
|
raise ValueError("token_to_chars() is not available when using Python based tokenizers")
|
|
if token_index is not None:
|
|
batch_index = batch_or_token_index
|
|
else:
|
|
batch_index = 0
|
|
token_index = batch_or_token_index
|
|
return CharSpan(*(self._encodings[batch_index].token_to_chars(token_index)))
|
|
|
|
def char_to_token(
|
|
self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0
|
|
) -> int:
|
|
"""
|
|
Get the index of the token in the encoded output comprising a character in the original string for a sequence
|
|
of the batch.
|
|
|
|
Can be called as:
|
|
|
|
- ``self.char_to_token(char_index)`` if batch size is 1
|
|
- ``self.char_to_token(batch_index, char_index)`` if batch size is greater or equal to 1
|
|
|
|
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
|
|
are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
|
|
words.
|
|
|
|
Args:
|
|
batch_or_char_index (:obj:`int`):
|
|
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
|
|
the word in the sequence
|
|
char_index (:obj:`int`, `optional`):
|
|
If a batch index is provided in `batch_or_token_index`, this can be the index of the word in the
|
|
sequence.
|
|
sequence_index (:obj:`int`, `optional`, defaults to 0):
|
|
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
|
|
or 1) the provided character index belongs to.
|
|
|
|
|
|
Returns:
|
|
:obj:`int`: Index of the token.
|
|
"""
|
|
|
|
if not self._encodings:
|
|
raise ValueError("char_to_token() is not available when using Python based tokenizers")
|
|
if char_index is not None:
|
|
batch_index = batch_or_char_index
|
|
else:
|
|
batch_index = 0
|
|
char_index = batch_or_char_index
|
|
return self._encodings[batch_index].char_to_token(char_index, sequence_index)
|
|
|
|
def word_to_chars(
|
|
self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0
|
|
) -> CharSpan:
|
|
"""
|
|
Get the character span in the original string corresponding to given word in a sequence of the batch.
|
|
|
|
Character spans are returned as a CharSpan NamedTuple with:
|
|
|
|
- start: index of the first character in the original string
|
|
- end: index of the character following the last character in the original string
|
|
|
|
Can be called as:
|
|
|
|
- ``self.word_to_chars(word_index)`` if batch size is 1
|
|
- ``self.word_to_chars(batch_index, word_index)`` if batch size is greater or equal to 1
|
|
|
|
Args:
|
|
batch_or_word_index (:obj:`int`):
|
|
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
|
|
the word in the sequence
|
|
word_index (:obj:`int`, `optional`):
|
|
If a batch index is provided in `batch_or_token_index`, this can be the index of the word in the
|
|
sequence.
|
|
sequence_index (:obj:`int`, `optional`, defaults to 0):
|
|
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
|
|
or 1) the provided word index belongs to.
|
|
|
|
Returns:
|
|
:obj:`CharSpan` or :obj:`List[CharSpan]`: Span(s) of the associated character or characters in the string.
|
|
CharSpan are NamedTuple with:
|
|
|
|
- start: index of the first character associated to the token in the original string
|
|
- end: index of the character following the last character associated to the token in the original
|
|
string
|
|
"""
|
|
|
|
if not self._encodings:
|
|
raise ValueError("word_to_chars() is not available when using Python based tokenizers")
|
|
if word_index is not None:
|
|
batch_index = batch_or_word_index
|
|
else:
|
|
batch_index = 0
|
|
word_index = batch_or_word_index
|
|
return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index, sequence_index)))
|
|
|
|
def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0) -> int:
|
|
"""
|
|
Get the word in the original string corresponding to a character in the original string of a sequence of the
|
|
batch.
|
|
|
|
Can be called as:
|
|
|
|
- ``self.char_to_word(char_index)`` if batch size is 1
|
|
- ``self.char_to_word(batch_index, char_index)`` if batch size is greater than 1
|
|
|
|
This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
|
|
are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
|
|
words.
|
|
|
|
Args:
|
|
batch_or_char_index (:obj:`int`):
|
|
Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
|
|
the character in the original string.
|
|
char_index (:obj:`int`, `optional`):
|
|
If a batch index is provided in `batch_or_token_index`, this can be the index of the character in the
|
|
original string.
|
|
sequence_index (:obj:`int`, `optional`, defaults to 0):
|
|
If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
|
|
or 1) the provided character index belongs to.
|
|
|
|
|
|
Returns:
|
|
:obj:`int` or :obj:`List[int]`: Index or indices of the associated encoded token(s).
|
|
"""
|
|
|
|
if not self._encodings:
|
|
raise ValueError("char_to_word() is not available when using Python based tokenizers")
|
|
if char_index is not None:
|
|
batch_index = batch_or_char_index
|
|
else:
|
|
batch_index = 0
|
|
char_index = batch_or_char_index
|
|
return self._encodings[batch_index].char_to_word(char_index, sequence_index)
|
|
|
|
def convert_to_tensors(
|
|
self, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False
|
|
):
|
|
"""
|
|
Convert the inner content to tensors.
|
|
|
|
Args:
|
|
tensor_type (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`):
|
|
The type of tensors to use. If :obj:`str`, should be one of the values of the enum
|
|
:class:`~transformers.file_utils.TensorType`. If :obj:`None`, no modification is done.
|
|
prepend_batch_axis (:obj:`int`, `optional`, defaults to :obj:`False`):
|
|
Whether or not to add the batch dimension during the conversion.
|
|
"""
|
|
if tensor_type is None:
|
|
return self
|
|
|
|
# Convert to TensorType
|
|
if not isinstance(tensor_type, TensorType):
|
|
tensor_type = TensorType(tensor_type)
|
|
|
|
# Get a function reference for the correct framework
|
|
if tensor_type == TensorType.TENSORFLOW:
|
|
if not is_tf_available():
|
|
raise ImportError(
|
|
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed."
|
|
)
|
|
import tensorflow as tf
|
|
|
|
as_tensor = tf.constant
|
|
is_tensor = tf.is_tensor
|
|
elif tensor_type == TensorType.PYTORCH:
|
|
if not is_torch_available():
|
|
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.")
|
|
import torch
|
|
|
|
as_tensor = torch.tensor
|
|
is_tensor = torch.is_tensor
|
|
elif tensor_type == TensorType.JAX:
|
|
if not is_flax_available():
|
|
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.")
|
|
import jax.numpy as jnp # noqa: F811
|
|
|
|
as_tensor = jnp.array
|
|
is_tensor = _is_jax
|
|
else:
|
|
as_tensor = np.asarray
|
|
is_tensor = _is_numpy
|
|
# (mfuntowicz: This code is unreachable)
|
|
# else:
|
|
# raise ImportError(
|
|
# f"Unable to convert output to tensors format {tensor_type}"
|
|
# )
|
|
|
|
# Do the tensor conversion in batch
|
|
for key, value in self.items():
|
|
try:
|
|
if prepend_batch_axis:
|
|
value = [value]
|
|
|
|
if not is_tensor(value):
|
|
tensor = as_tensor(value)
|
|
|
|
# Removing this for now in favor of controlling the shape with `prepend_batch_axis`
|
|
# # at-least2d
|
|
# if tensor.ndim > 2:
|
|
# tensor = tensor.squeeze(0)
|
|
# elif tensor.ndim < 2:
|
|
# tensor = tensor[None, :]
|
|
|
|
self[key] = tensor
|
|
except: # noqa E722
|
|
if key == "overflowing_tokens":
|
|
raise ValueError(
|
|
"Unable to create tensor returning overflowing tokens of different lengths. "
|
|
"Please see if a fast version of this tokenizer is available to have this feature available."
|
|
)
|
|
raise ValueError(
|
|
"Unable to create tensor, you should probably activate truncation and/or padding "
|
|
"with 'padding=True' 'truncation=True' to have batched tensors with the same length."
|
|
)
|
|
|
|
return self
|
|
|
|
@torch_required
|
|
def to(self, device: Union[str, "torch.device"]) -> "BatchEncoding":
|
|
"""
|
|
Send all values to device by calling :obj:`v.to(device)` (PyTorch only).
|
|
|
|
Args:
|
|
device (:obj:`str` or :obj:`torch.device`): The device to put the tensors on.
|
|
|
|
Returns:
|
|
:class:`~transformers.BatchEncoding`: The same instance after modification.
|
|
"""
|
|
|
|
# This check catches things like APEX blindly calling "to" on all inputs to a module
|
|
# Otherwise it passes the casts down and casts the LongTensor containing the token idxs
|
|
# into a HalfTensor
|
|
if isinstance(device, str) or _is_torch_device(device) or isinstance(device, int):
|
|
self.data = {k: v.to(device=device) for k, v in self.data.items()}
|
|
else:
|
|
logger.warning(f"Attempting to cast a BatchEncoding to type {str(device)}. This is not supported.")
|
|
return self
|
|
|
|
|
|
class SpecialTokensMixin:
|
|
"""
|
|
A mixin derived by :class:`~transformers.PreTrainedTokenizer` and :class:`~transformers.PreTrainedTokenizerFast` to
|
|
handle specific behaviors related to special tokens. In particular, this class hold the attributes which can be
|
|
used to directly access these special tokens in a model-independent manner and allow to set and update the special
|
|
tokens.
|
|
|
|
Args:
|
|
bos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A special token representing the beginning of a sentence.
|
|
eos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A special token representing the end of a sentence.
|
|
unk_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A special token representing an out-of-vocabulary token.
|
|
sep_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A special token separating two different sentences in the same input (used by BERT for instance).
|
|
pad_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
|
|
attention mechanisms or loss computation.
|
|
cls_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A special token representing the class of the input (used by BERT for instance).
|
|
mask_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A special token representing a masked token (used by masked-language modeling pretraining objectives, like
|
|
BERT).
|
|
additional_special_tokens (tuple or list of :obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A tuple or a list of additional special tokens.
|
|
"""
|
|
|
|
SPECIAL_TOKENS_ATTRIBUTES = [
|
|
"bos_token",
|
|
"eos_token",
|
|
"unk_token",
|
|
"sep_token",
|
|
"pad_token",
|
|
"cls_token",
|
|
"mask_token",
|
|
"additional_special_tokens",
|
|
]
|
|
|
|
def __init__(self, verbose=True, **kwargs):
|
|
self._bos_token = None
|
|
self._eos_token = None
|
|
self._unk_token = None
|
|
self._sep_token = None
|
|
self._pad_token = None
|
|
self._cls_token = None
|
|
self._mask_token = None
|
|
self._pad_token_type_id = 0
|
|
self._additional_special_tokens = []
|
|
self.verbose = verbose
|
|
|
|
# We directly set the hidden value to allow initialization with special tokens
|
|
# which are not yet in the vocabulary. Necessary for serialization/de-serialization
|
|
# TODO clean this up at some point (probably by switching to fast tokenizers)
|
|
for key, value in kwargs.items():
|
|
if value is None:
|
|
continue
|
|
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
|
|
if key == "additional_special_tokens":
|
|
assert isinstance(value, (list, tuple)), f"Value {value} is not a list or tuple"
|
|
assert all(isinstance(t, str) for t in value), "One of the tokens is not a string"
|
|
setattr(self, key, value)
|
|
elif isinstance(value, (str, AddedToken)):
|
|
setattr(self, key, value)
|
|
else:
|
|
raise TypeError(f"special token {key} has to be either str or AddedToken but got: {type(value)}")
|
|
|
|
def sanitize_special_tokens(self) -> int:
|
|
"""
|
|
Make sure that all the special tokens attributes of the tokenizer (:obj:`tokenizer.mask_token`,
|
|
:obj:`tokenizer.cls_token`, etc.) are in the vocabulary.
|
|
|
|
Add the missing ones to the vocabulary if needed.
|
|
|
|
Return:
|
|
:obj:`int`: The number of tokens added in the vocabulary during the operation.
|
|
"""
|
|
return self.add_tokens(self.all_special_tokens_extended, special_tokens=True)
|
|
|
|
def add_special_tokens(self, special_tokens_dict: Dict[str, Union[str, AddedToken]]) -> int:
|
|
"""
|
|
Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If
|
|
special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the
|
|
current vocabulary).
|
|
|
|
.. Note::
|
|
When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of
|
|
the model so that its embedding matrix matches the tokenizer.
|
|
|
|
In order to do that, please use the :meth:`~transformers.PreTrainedModel.resize_token_embeddings` method.
|
|
|
|
Using :obj:`add_special_tokens` will ensure your special tokens can be used in several ways:
|
|
|
|
- Special tokens are carefully handled by the tokenizer (they are never split).
|
|
- You can easily refer to special tokens using tokenizer class attributes like :obj:`tokenizer.cls_token`. This
|
|
makes it easy to develop model-agnostic training and fine-tuning scripts.
|
|
|
|
When possible, special tokens are already registered for provided pretrained models (for instance
|
|
:class:`~transformers.BertTokenizer` :obj:`cls_token` is already registered to be :obj`'[CLS]'` and XLM's one
|
|
is also registered to be :obj:`'</s>'`).
|
|
|
|
Args:
|
|
special_tokens_dict (dictionary `str` to `str` or :obj:`tokenizers.AddedToken`):
|
|
Keys should be in the list of predefined special attributes: [``bos_token``, ``eos_token``,
|
|
``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
|
|
``additional_special_tokens``].
|
|
|
|
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer
|
|
assign the index of the ``unk_token`` to them).
|
|
|
|
Returns:
|
|
:obj:`int`: Number of tokens added to the vocabulary.
|
|
|
|
Examples::
|
|
|
|
# Let's see how to add a new classification token to GPT-2
|
|
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
|
model = GPT2Model.from_pretrained('gpt2')
|
|
|
|
special_tokens_dict = {'cls_token': '<CLS>'}
|
|
|
|
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
|
|
print('We have added', num_added_toks, 'tokens')
|
|
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
|
|
model.resize_token_embeddings(len(tokenizer))
|
|
|
|
assert tokenizer.cls_token == '<CLS>'
|
|
"""
|
|
if not special_tokens_dict:
|
|
return 0
|
|
|
|
added_tokens = 0
|
|
for key, value in special_tokens_dict.items():
|
|
assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f"Key {key} is not a special token"
|
|
|
|
if self.verbose:
|
|
logger.info(f"Assigning {value} to the {key} key of the tokenizer")
|
|
setattr(self, key, value)
|
|
|
|
if key == "additional_special_tokens":
|
|
assert isinstance(value, (list, tuple)) and all(
|
|
isinstance(t, (str, AddedToken)) for t in value
|
|
), f"Tokens {value} for key {key} should all be str or AddedToken instances"
|
|
added_tokens += self.add_tokens(value, special_tokens=True)
|
|
else:
|
|
assert isinstance(
|
|
value, (str, AddedToken)
|
|
), f"Token {value} for key {key} should be a str or an AddedToken instance"
|
|
added_tokens += self.add_tokens([value], special_tokens=True)
|
|
|
|
return added_tokens
|
|
|
|
def add_tokens(
|
|
self, new_tokens: Union[str, AddedToken, List[Union[str, AddedToken]]], special_tokens: bool = False
|
|
) -> int:
|
|
"""
|
|
Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
|
|
it with indices starting from length of the current vocabulary.
|
|
|
|
.. Note::
|
|
When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of
|
|
the model so that its embedding matrix matches the tokenizer.
|
|
|
|
In order to do that, please use the :meth:`~transformers.PreTrainedModel.resize_token_embeddings` method.
|
|
|
|
Args:
|
|
new_tokens (:obj:`str`, :obj:`tokenizers.AddedToken` or a list of `str` or :obj:`tokenizers.AddedToken`):
|
|
Tokens are only added if they are not already in the vocabulary. :obj:`tokenizers.AddedToken` wraps a
|
|
string token to let you personalize its behavior: whether this token should only match against a single
|
|
word, whether this token should strip all potential whitespaces on the left side, whether this token
|
|
should strip all potential whitespaces on the right side, etc.
|
|
special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
|
Can be used to specify if the token is a special token. This mostly change the normalization behavior
|
|
(special tokens like CLS or [MASK] are usually not lower-cased for instance).
|
|
|
|
See details for :obj:`tokenizers.AddedToken` in HuggingFace tokenizers library.
|
|
|
|
Returns:
|
|
:obj:`int`: Number of tokens added to the vocabulary.
|
|
|
|
Examples::
|
|
|
|
# Let's see how to increase the vocabulary of Bert model and tokenizer
|
|
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
|
|
model = BertModel.from_pretrained('bert-base-uncased')
|
|
|
|
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
|
|
print('We have added', num_added_toks, 'tokens')
|
|
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
|
|
model.resize_token_embeddings(len(tokenizer))
|
|
"""
|
|
if not new_tokens:
|
|
return 0
|
|
|
|
if not isinstance(new_tokens, (list, tuple)):
|
|
new_tokens = [new_tokens]
|
|
|
|
return self._add_tokens(new_tokens, special_tokens=special_tokens)
|
|
|
|
def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
|
|
raise NotImplementedError
|
|
|
|
@property
|
|
def bos_token(self) -> str:
|
|
"""
|
|
:obj:`str`: Beginning of sentence token. Log an error if used while not having been set.
|
|
"""
|
|
if self._bos_token is None and self.verbose:
|
|
logger.error("Using bos_token, but it is not set yet.")
|
|
return None
|
|
return str(self._bos_token)
|
|
|
|
@property
|
|
def eos_token(self) -> str:
|
|
"""
|
|
:obj:`str`: End of sentence token. Log an error if used while not having been set.
|
|
"""
|
|
if self._eos_token is None and self.verbose:
|
|
logger.error("Using eos_token, but it is not set yet.")
|
|
return None
|
|
return str(self._eos_token)
|
|
|
|
@property
|
|
def unk_token(self) -> str:
|
|
"""
|
|
:obj:`str`: Unknown token. Log an error if used while not having been set.
|
|
"""
|
|
if self._unk_token is None and self.verbose:
|
|
logger.error("Using unk_token, but it is not set yet.")
|
|
return None
|
|
return str(self._unk_token)
|
|
|
|
@property
|
|
def sep_token(self) -> str:
|
|
"""
|
|
:obj:`str`: Separation token, to separate context and query in an input sequence. Log an error if used while
|
|
not having been set.
|
|
"""
|
|
if self._sep_token is None and self.verbose:
|
|
logger.error("Using sep_token, but it is not set yet.")
|
|
return None
|
|
return str(self._sep_token)
|
|
|
|
@property
|
|
def pad_token(self) -> str:
|
|
"""
|
|
:obj:`str`: Padding token. Log an error if used while not having been set.
|
|
"""
|
|
if self._pad_token is None and self.verbose:
|
|
logger.error("Using pad_token, but it is not set yet.")
|
|
return None
|
|
return str(self._pad_token)
|
|
|
|
@property
|
|
def cls_token(self) -> str:
|
|
"""
|
|
:obj:`str`: Classification token, to extract a summary of an input sequence leveraging self-attention along the
|
|
full depth of the model. Log an error if used while not having been set.
|
|
"""
|
|
if self._cls_token is None and self.verbose:
|
|
logger.error("Using cls_token, but it is not set yet.")
|
|
return None
|
|
return str(self._cls_token)
|
|
|
|
@property
|
|
def mask_token(self) -> str:
|
|
"""
|
|
:obj:`str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while
|
|
not having been set.
|
|
"""
|
|
if self._mask_token is None and self.verbose:
|
|
logger.error("Using mask_token, but it is not set yet.")
|
|
return None
|
|
return str(self._mask_token)
|
|
|
|
@property
|
|
def additional_special_tokens(self) -> List[str]:
|
|
"""
|
|
:obj:`List[str]`: All the additional special tokens you may want to use. Log an error if used while not having
|
|
been set.
|
|
"""
|
|
if self._additional_special_tokens is None and self.verbose:
|
|
logger.error("Using additional_special_tokens, but it is not set yet.")
|
|
return None
|
|
return [str(tok) for tok in self._additional_special_tokens]
|
|
|
|
@bos_token.setter
|
|
def bos_token(self, value):
|
|
self._bos_token = value
|
|
|
|
@eos_token.setter
|
|
def eos_token(self, value):
|
|
self._eos_token = value
|
|
|
|
@unk_token.setter
|
|
def unk_token(self, value):
|
|
self._unk_token = value
|
|
|
|
@sep_token.setter
|
|
def sep_token(self, value):
|
|
self._sep_token = value
|
|
|
|
@pad_token.setter
|
|
def pad_token(self, value):
|
|
self._pad_token = value
|
|
|
|
@cls_token.setter
|
|
def cls_token(self, value):
|
|
self._cls_token = value
|
|
|
|
@mask_token.setter
|
|
def mask_token(self, value):
|
|
self._mask_token = value
|
|
|
|
@additional_special_tokens.setter
|
|
def additional_special_tokens(self, value):
|
|
self._additional_special_tokens = value
|
|
|
|
@property
|
|
def bos_token_id(self) -> Optional[int]:
|
|
"""
|
|
:obj:`Optional[int]`: Id of the beginning of sentence token in the vocabulary. Returns :obj:`None` if the token
|
|
has not been set.
|
|
"""
|
|
if self._bos_token is None:
|
|
return None
|
|
return self.convert_tokens_to_ids(self.bos_token)
|
|
|
|
@property
|
|
def eos_token_id(self) -> Optional[int]:
|
|
"""
|
|
:obj:`Optional[int]`: Id of the end of sentence token in the vocabulary. Returns :obj:`None` if the token has
|
|
not been set.
|
|
"""
|
|
if self._eos_token is None:
|
|
return None
|
|
return self.convert_tokens_to_ids(self.eos_token)
|
|
|
|
@property
|
|
def unk_token_id(self) -> Optional[int]:
|
|
"""
|
|
:obj:`Optional[int]`: Id of the unknown token in the vocabulary. Returns :obj:`None` if the token has not been
|
|
set.
|
|
"""
|
|
if self._unk_token is None:
|
|
return None
|
|
return self.convert_tokens_to_ids(self.unk_token)
|
|
|
|
@property
|
|
def sep_token_id(self) -> Optional[int]:
|
|
"""
|
|
:obj:`Optional[int]`: Id of the separation token in the vocabulary, to separate context and query in an input
|
|
sequence. Returns :obj:`None` if the token has not been set.
|
|
"""
|
|
if self._sep_token is None:
|
|
return None
|
|
return self.convert_tokens_to_ids(self.sep_token)
|
|
|
|
@property
|
|
def pad_token_id(self) -> Optional[int]:
|
|
"""
|
|
:obj:`Optional[int]`: Id of the padding token in the vocabulary. Returns :obj:`None` if the token has not been
|
|
set.
|
|
"""
|
|
if self._pad_token is None:
|
|
return None
|
|
return self.convert_tokens_to_ids(self.pad_token)
|
|
|
|
@property
|
|
def pad_token_type_id(self) -> int:
|
|
"""
|
|
:obj:`int`: Id of the padding token type in the vocabulary.
|
|
"""
|
|
return self._pad_token_type_id
|
|
|
|
@property
|
|
def cls_token_id(self) -> Optional[int]:
|
|
"""
|
|
:obj:`Optional[int]`: Id of the classification token in the vocabulary, to extract a summary of an input
|
|
sequence leveraging self-attention along the full depth of the model.
|
|
|
|
Returns :obj:`None` if the token has not been set.
|
|
"""
|
|
if self._cls_token is None:
|
|
return None
|
|
return self.convert_tokens_to_ids(self.cls_token)
|
|
|
|
@property
|
|
def mask_token_id(self) -> Optional[int]:
|
|
"""
|
|
:obj:`Optional[int]`: Id of the mask token in the vocabulary, used when training a model with masked-language
|
|
modeling. Returns :obj:`None` if the token has not been set.
|
|
"""
|
|
if self._mask_token is None:
|
|
return None
|
|
return self.convert_tokens_to_ids(self.mask_token)
|
|
|
|
@property
|
|
def additional_special_tokens_ids(self) -> List[int]:
|
|
"""
|
|
:obj:`List[int]`: Ids of all the additional special tokens in the vocabulary. Log an error if used while not
|
|
having been set.
|
|
"""
|
|
return self.convert_tokens_to_ids(self.additional_special_tokens)
|
|
|
|
@bos_token_id.setter
|
|
def bos_token_id(self, value):
|
|
self._bos_token = self.convert_tokens_to_ids(value)
|
|
|
|
@eos_token_id.setter
|
|
def eos_token_id(self, value):
|
|
self._eos_token = self.convert_tokens_to_ids(value)
|
|
|
|
@unk_token_id.setter
|
|
def unk_token_id(self, value):
|
|
self._unk_token = self.convert_tokens_to_ids(value)
|
|
|
|
@sep_token_id.setter
|
|
def sep_token_id(self, value):
|
|
self._sep_token = self.convert_tokens_to_ids(value)
|
|
|
|
@pad_token_id.setter
|
|
def pad_token_id(self, value):
|
|
self._pad_token = self.convert_tokens_to_ids(value)
|
|
|
|
@cls_token_id.setter
|
|
def cls_token_id(self, value):
|
|
self._cls_token = self.convert_tokens_to_ids(value)
|
|
|
|
@mask_token_id.setter
|
|
def mask_token_id(self, value):
|
|
self._mask_token = self.convert_tokens_to_ids(value)
|
|
|
|
@additional_special_tokens_ids.setter
|
|
def additional_special_tokens_ids(self, values):
|
|
self._additional_special_tokens = [self.convert_tokens_to_ids(value) for value in values]
|
|
|
|
@property
|
|
def special_tokens_map(self) -> Dict[str, Union[str, List[str]]]:
|
|
"""
|
|
:obj:`Dict[str, Union[str, List[str]]]`: A dictionary mapping special token class attributes (:obj:`cls_token`,
|
|
:obj:`unk_token`, etc.) to their values (:obj:`'<unk>'`, :obj:`'<cls>'`, etc.).
|
|
|
|
Convert potential tokens of :obj:`tokenizers.AddedToken` type to string.
|
|
"""
|
|
set_attr = {}
|
|
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
|
|
attr_value = getattr(self, "_" + attr)
|
|
if attr_value:
|
|
set_attr[attr] = str(attr_value)
|
|
return set_attr
|
|
|
|
@property
|
|
def special_tokens_map_extended(self) -> Dict[str, Union[str, AddedToken, List[Union[str, AddedToken]]]]:
|
|
"""
|
|
:obj:`Dict[str, Union[str, tokenizers.AddedToken, List[Union[str, tokenizers.AddedToken]]]]`: A dictionary
|
|
mapping special token class attributes (:obj:`cls_token`, :obj:`unk_token`, etc.) to their values
|
|
(:obj:`'<unk>'`, :obj:`'<cls>'`, etc.).
|
|
|
|
Don't convert tokens of :obj:`tokenizers.AddedToken` type to string so they can be used to control more finely
|
|
how special tokens are tokenized.
|
|
"""
|
|
set_attr = {}
|
|
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
|
|
attr_value = getattr(self, "_" + attr)
|
|
if attr_value:
|
|
set_attr[attr] = attr_value
|
|
return set_attr
|
|
|
|
@property
|
|
def all_special_tokens(self) -> List[str]:
|
|
"""
|
|
:obj:`List[str]`: All the special tokens (:obj:`'<unk>'`, :obj:`'<cls>'`, etc.) mapped to class attributes.
|
|
|
|
Convert tokens of :obj:`tokenizers.AddedToken` type to string.
|
|
"""
|
|
all_toks = [str(s) for s in self.all_special_tokens_extended]
|
|
return all_toks
|
|
|
|
@property
|
|
def all_special_tokens_extended(self) -> List[Union[str, AddedToken]]:
|
|
"""
|
|
:obj:`List[Union[str, tokenizers.AddedToken]]`: All the special tokens (:obj:`'<unk>'`, :obj:`'<cls>'`, etc.)
|
|
mapped to class attributes.
|
|
|
|
Don't convert tokens of :obj:`tokenizers.AddedToken` type to string so they can be used to control more finely
|
|
how special tokens are tokenized.
|
|
"""
|
|
all_toks = []
|
|
set_attr = self.special_tokens_map_extended
|
|
for attr_value in set_attr.values():
|
|
all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value])
|
|
all_toks = list(OrderedDict.fromkeys(all_toks))
|
|
return all_toks
|
|
|
|
@property
|
|
def all_special_ids(self) -> List[int]:
|
|
"""
|
|
:obj:`List[int]`: List the ids of the special tokens(:obj:`'<unk>'`, :obj:`'<cls>'`, etc.) mapped to class
|
|
attributes.
|
|
"""
|
|
all_toks = self.all_special_tokens
|
|
all_ids = self.convert_tokens_to_ids(all_toks)
|
|
return all_ids
|
|
|
|
|
|
ENCODE_KWARGS_DOCSTRING = r"""
|
|
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
|
Whether or not to encode the sequences with the special tokens relative to their model.
|
|
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`False`):
|
|
Activates and controls padding. Accepts the following values:
|
|
|
|
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a
|
|
single sequence if provided).
|
|
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
|
|
maximum acceptable input length for the model if that argument is not provided.
|
|
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
|
|
different lengths).
|
|
truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`False`):
|
|
Activates and controls truncation. Accepts the following values:
|
|
|
|
* :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument
|
|
:obj:`max_length` or to the maximum acceptable input length for the model if that argument is not
|
|
provided. This will truncate token by token, removing a token from the longest sequence in the pair
|
|
if a pair of sequences (or a batch of pairs) is provided.
|
|
* :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to
|
|
the maximum acceptable input length for the model if that argument is not provided. This will only
|
|
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
|
|
* :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or
|
|
to the maximum acceptable input length for the model if that argument is not provided. This will only
|
|
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
|
|
* :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with
|
|
sequence lengths greater than the model maximum admissible input size).
|
|
max_length (:obj:`int`, `optional`):
|
|
Controls the maximum length to use by one of the truncation/padding parameters.
|
|
|
|
If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum
|
|
length is required by one of the truncation/padding parameters. If the model has no specific maximum
|
|
input length (like XLNet) truncation/padding to a maximum length will be deactivated.
|
|
stride (:obj:`int`, `optional`, defaults to 0):
|
|
If set to a number along with :obj:`max_length`, the overflowing tokens returned when
|
|
:obj:`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
|
|
returned to provide some overlap between truncated and overflowing sequences. The value of this
|
|
argument defines the number of overlapping tokens.
|
|
is_split_into_words (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
|
Whether or not the input is already pre-tokenized (e.g., split into words). If set to :obj:`True`, the
|
|
tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
|
|
which it will tokenize. This is useful for NER or token classification.
|
|
pad_to_multiple_of (:obj:`int`, `optional`):
|
|
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
|
|
the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
|
|
return_tensors (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`):
|
|
If set, will return tensors instead of list of python integers. Acceptable values are:
|
|
|
|
* :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects.
|
|
* :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects.
|
|
* :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects.
|
|
"""
|
|
|
|
ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
|
|
return_token_type_ids (:obj:`bool`, `optional`):
|
|
Whether to return token type IDs. If left to the default, will return the token type IDs according to
|
|
the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
|
|
|
|
`What are token type IDs? <../glossary.html#token-type-ids>`__
|
|
return_attention_mask (:obj:`bool`, `optional`):
|
|
Whether to return the attention mask. If left to the default, will return the attention mask according
|
|
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
|
|
|
|
`What are attention masks? <../glossary.html#attention-mask>`__
|
|
return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
|
Whether or not to return overflowing token sequences.
|
|
return_special_tokens_mask (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
|
Whether or not to return special tokens mask information.
|
|
return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
|
Whether or not to return :obj:`(char_start, char_end)` for each token.
|
|
|
|
This is only available on fast tokenizers inheriting from
|
|
:class:`~transformers.PreTrainedTokenizerFast`, if using Python's tokenizer, this method will raise
|
|
:obj:`NotImplementedError`.
|
|
return_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
|
Whether or not to return the lengths of the encoded inputs.
|
|
verbose (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
|
Whether or not to print more information and warnings.
|
|
**kwargs: passed to the :obj:`self.tokenize()` method
|
|
|
|
Return:
|
|
:class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields:
|
|
|
|
- **input_ids** -- List of token ids to be fed to a model.
|
|
|
|
`What are input IDs? <../glossary.html#input-ids>`__
|
|
|
|
- **token_type_ids** -- List of token type ids to be fed to a model (when :obj:`return_token_type_ids=True`
|
|
or if `"token_type_ids"` is in :obj:`self.model_input_names`).
|
|
|
|
`What are token type IDs? <../glossary.html#token-type-ids>`__
|
|
|
|
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
|
|
:obj:`return_attention_mask=True` or if `"attention_mask"` is in :obj:`self.model_input_names`).
|
|
|
|
`What are attention masks? <../glossary.html#attention-mask>`__
|
|
|
|
- **overflowing_tokens** -- List of overflowing tokens sequences (when a :obj:`max_length` is specified and
|
|
:obj:`return_overflowing_tokens=True`).
|
|
- **num_truncated_tokens** -- Number of tokens truncated (when a :obj:`max_length` is specified and
|
|
:obj:`return_overflowing_tokens=True`).
|
|
- **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
|
|
regular sequence tokens (when :obj:`add_special_tokens=True` and :obj:`return_special_tokens_mask=True`).
|
|
- **length** -- The length of the inputs (when :obj:`return_length=True`)
|
|
"""
|
|
|
|
INIT_TOKENIZER_DOCSTRING = r"""
|
|
Class attributes (overridden by derived classes)
|
|
|
|
- **vocab_files_names** (:obj:`Dict[str, str]`) -- A dictionary with, as keys, the ``__init__`` keyword name of
|
|
each vocabulary file required by the model, and as associated values, the filename for saving the associated
|
|
file (string).
|
|
- **pretrained_vocab_files_map** (:obj:`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the
|
|
high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the
|
|
low-level being the :obj:`short-cut-names` of the pretrained models with, as associated values, the
|
|
:obj:`url` to the associated pretrained vocabulary file.
|
|
- **max_model_input_sizes** (:obj:`Dict[str, Optinal[int]]`) -- A dictionary with, as keys, the
|
|
:obj:`short-cut-names` of the pretrained models, and as associated values, the maximum length of the sequence
|
|
inputs of this model, or :obj:`None` if the model has no maximum input size.
|
|
- **pretrained_init_configuration** (:obj:`Dict[str, Dict[str, Any]]`) -- A dictionary with, as keys, the
|
|
:obj:`short-cut-names` of the pretrained models, and as associated values, a dictionary of specific arguments
|
|
to pass to the ``__init__`` method of the tokenizer class for this pretrained model when loading the
|
|
tokenizer with the :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`
|
|
method.
|
|
- **model_input_names** (:obj:`List[str]`) -- A list of inputs expected in the forward pass of the model.
|
|
- **padding_side** (:obj:`str`) -- The default value for the side on which the model should have padding
|
|
applied. Should be :obj:`'right'` or :obj:`'left'`.
|
|
|
|
Args:
|
|
model_max_length (:obj:`int`, `optional`):
|
|
The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is
|
|
loaded with :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`, this
|
|
will be set to the value stored for the associated model in ``max_model_input_sizes`` (see above). If no
|
|
value is provided, will default to VERY_LARGE_INTEGER (:obj:`int(1e30)`).
|
|
padding_side: (:obj:`str`, `optional`):
|
|
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
|
|
Default value is picked from the class attribute of the same name.
|
|
model_input_names (:obj:`List[string]`, `optional`):
|
|
The list of inputs accepted by the forward pass of the model (like :obj:`"token_type_ids"` or
|
|
:obj:`"attention_mask"`). Default value is picked from the class attribute of the same name.
|
|
bos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A special token representing the beginning of a sentence. Will be associated to ``self.bos_token`` and
|
|
``self.bos_token_id``.
|
|
eos_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A special token representing the end of a sentence. Will be associated to ``self.eos_token`` and
|
|
``self.eos_token_id``.
|
|
unk_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A special token representing an out-of-vocabulary token. Will be associated to ``self.unk_token`` and
|
|
``self.unk_token_id``.
|
|
sep_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A special token separating two different sentences in the same input (used by BERT for instance). Will be
|
|
associated to ``self.sep_token`` and ``self.sep_token_id``.
|
|
pad_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
|
|
attention mechanisms or loss computation. Will be associated to ``self.pad_token`` and
|
|
``self.pad_token_id``.
|
|
cls_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A special token representing the class of the input (used by BERT for instance). Will be associated to
|
|
``self.cls_token`` and ``self.cls_token_id``.
|
|
mask_token (:obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A special token representing a masked token (used by masked-language modeling pretraining objectives, like
|
|
BERT). Will be associated to ``self.mask_token`` and ``self.mask_token_id``.
|
|
additional_special_tokens (tuple or list of :obj:`str` or :obj:`tokenizers.AddedToken`, `optional`):
|
|
A tuple or a list of additional special tokens. Add them here to ensure they won't be split by the
|
|
tokenization process. Will be associated to ``self.additional_special_tokens`` and
|
|
``self.additional_special_tokens_ids``.
|
|
"""
|
|
|
|
|
|
@add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
|
|
class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
|
|
"""
|
|
Base class for :class:`~transformers.PreTrainedTokenizer` and :class:`~transformers.PreTrainedTokenizerFast`.
|
|
|
|
Handles shared (mostly boiler plate) methods for those two classes.
|
|
"""
|
|
|
|
vocab_files_names: Dict[str, str] = {}
|
|
pretrained_vocab_files_map: Dict[str, Dict[str, str]] = {}
|
|
pretrained_init_configuration: Dict[str, Dict[str, Any]] = {}
|
|
max_model_input_sizes: Dict[str, Optional[int]] = {}
|
|
|
|
# first name has to correspond to main model input name
|
|
# to make sure `tokenizer.pad(...)` works correctly
|
|
model_input_names: List[str] = ["input_ids", "token_type_ids", "attention_mask"]
|
|
padding_side: str = "right"
|
|
slow_tokenizer_class = None
|
|
|
|
def __init__(self, **kwargs):
|
|
# inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``)
|
|
self.init_inputs = ()
|
|
self.init_kwargs = copy.deepcopy(kwargs)
|
|
self.name_or_path = kwargs.pop("name_or_path", "")
|
|
|
|
# For backward compatibility we fallback to set model_max_length from max_len if provided
|
|
model_max_length = kwargs.pop("model_max_length", kwargs.pop("max_len", None))
|
|
self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
|
|
|
|
# Padding side is right by default and overridden in subclasses. If specified in the kwargs, it is changed.
|
|
self.padding_side = kwargs.pop("padding_side", self.padding_side)
|
|
assert self.padding_side in [
|
|
"right",
|
|
"left",
|
|
], f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}"
|
|
self.model_input_names = kwargs.pop("model_input_names", self.model_input_names)
|
|
|
|
self.deprecation_warnings = (
|
|
{}
|
|
) # Use to store when we have already noticed a deprecation warning (avoid overlogging).
|
|
|
|
super().__init__(**kwargs)
|
|
|
|
@property
|
|
def max_len_single_sentence(self) -> int:
|
|
"""
|
|
:obj:`int`: The maximum length of a sentence that can be fed to the model.
|
|
"""
|
|
return self.model_max_length - self.num_special_tokens_to_add(pair=False)
|
|
|
|
@property
|
|
def max_len_sentences_pair(self) -> int:
|
|
"""
|
|
:obj:`int`: The maximum combined length of a pair of sentences that can be fed to the model.
|
|
"""
|
|
return self.model_max_length - self.num_special_tokens_to_add(pair=True)
|
|
|
|
@max_len_single_sentence.setter
|
|
def max_len_single_sentence(self, value) -> int:
|
|
# For backward compatibility, allow to try to setup 'max_len_single_sentence'.
|
|
if value == self.model_max_length - self.num_special_tokens_to_add(pair=False) and self.verbose:
|
|
if not self.deprecation_warnings.get("max_len_single_sentence", False):
|
|
logger.warning(
|
|
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
|
|
)
|
|
self.deprecation_warnings["max_len_single_sentence"] = True
|
|
else:
|
|
raise ValueError(
|
|
"Setting 'max_len_single_sentence' is now deprecated. " "This value is automatically set up."
|
|
)
|
|
|
|
@max_len_sentences_pair.setter
|
|
def max_len_sentences_pair(self, value) -> int:
|
|
# For backward compatibility, allow to try to setup 'max_len_sentences_pair'.
|
|
if value == self.model_max_length - self.num_special_tokens_to_add(pair=True) and self.verbose:
|
|
if not self.deprecation_warnings.get("max_len_sentences_pair", False):
|
|
logger.warning(
|
|
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
|
|
)
|
|
self.deprecation_warnings["max_len_sentences_pair"] = True
|
|
else:
|
|
raise ValueError(
|
|
"Setting 'max_len_sentences_pair' is now deprecated. " "This value is automatically set up."
|
|
)
|
|
|
|
def __repr__(self) -> str:
|
|
return (
|
|
f"{'PreTrainedTokenizerFast' if self.is_fast else 'PreTrainedTokenizer'}(name_or_path='{self.name_or_path}', "
|
|
f"vocab_size={self.vocab_size}, model_max_len={self.model_max_length}, is_fast={self.is_fast}, "
|
|
f"padding_side='{self.padding_side}', special_tokens={self.special_tokens_map_extended})"
|
|
)
|
|
|
|
def get_vocab(self) -> Dict[str, int]:
|
|
"""
|
|
Returns the vocabulary as a dictionary of token to index.
|
|
|
|
:obj:`tokenizer.get_vocab()[token]` is equivalent to :obj:`tokenizer.convert_tokens_to_ids(token)` when
|
|
:obj:`token` is in the vocab.
|
|
|
|
Returns:
|
|
:obj:`Dict[str, int]`: The vocabulary.
|
|
"""
|
|
raise NotImplementedError()
|
|
|
|
@classmethod
|
|
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs):
|
|
r"""
|
|
Instantiate a :class:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase` (or a derived class) from
|
|
a predefined tokenizer.
|
|
|
|
Args:
|
|
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
|
|
Can be either:
|
|
|
|
- A string, the `model id` of a predefined tokenizer hosted inside a model repo on huggingface.co.
|
|
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under a
|
|
user or organization name, like ``dbmdz/bert-base-german-cased``.
|
|
- A path to a `directory` containing vocabulary files required by the tokenizer, for instance saved
|
|
using the :meth:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`
|
|
method, e.g., ``./my_model_directory/``.
|
|
- (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary
|
|
file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g.,
|
|
``./my_model_directory/vocab.txt``.
|
|
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
|
|
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the
|
|
standard cache should not be used.
|
|
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
|
Whether or not to force the (re-)download the vocabulary files and override the cached versions if they
|
|
exist.
|
|
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
|
Whether or not to delete incompletely received files. Attempt to resume the download if such a file
|
|
exists.
|
|
proxies (:obj:`Dict[str, str], `optional`):
|
|
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
|
|
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
|
use_auth_token (:obj:`str` or `bool`, `optional`):
|
|
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
|
|
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
|
|
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
|
|
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
|
|
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
|
|
identifier allowed by git.
|
|
subfolder (:obj:`str`, `optional`):
|
|
In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
|
|
facebook/rag-token-base), specify it here.
|
|
inputs (additional positional arguments, `optional`):
|
|
Will be passed along to the Tokenizer ``__init__`` method.
|
|
kwargs (additional keyword arguments, `optional`):
|
|
Will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like
|
|
``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``,
|
|
``mask_token``, ``additional_special_tokens``. See parameters in the ``__init__`` for more details.
|
|
|
|
.. note::
|
|
|
|
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
|
|
|
|
Examples::
|
|
|
|
# We can't instantiate directly the base class `PreTrainedTokenizerBase` so let's show our examples on a derived class: BertTokenizer
|
|
# Download vocabulary from huggingface.co and cache.
|
|
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
|
|
|
# Download vocabulary from huggingface.co (user-uploaded) and cache.
|
|
tokenizer = BertTokenizer.from_pretrained('dbmdz/bert-base-german-cased')
|
|
|
|
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
|
|
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')
|
|
|
|
# If the tokenizer uses a single vocabulary file, you can point directly to this file
|
|
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')
|
|
|
|
# You can link tokens to special vocabulary when instantiating
|
|
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
|
|
# You should be sure '<unk>' is in the vocabulary when doing that.
|
|
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
|
|
assert tokenizer.unk_token == '<unk>'
|
|
|
|
"""
|
|
cache_dir = kwargs.pop("cache_dir", None)
|
|
force_download = kwargs.pop("force_download", False)
|
|
resume_download = kwargs.pop("resume_download", False)
|
|
proxies = kwargs.pop("proxies", None)
|
|
local_files_only = kwargs.pop("local_files_only", False)
|
|
use_auth_token = kwargs.pop("use_auth_token", None)
|
|
revision = kwargs.pop("revision", None)
|
|
subfolder = kwargs.pop("subfolder", None)
|
|
from_pipeline = kwargs.pop("_from_pipeline", None)
|
|
from_auto_class = kwargs.pop("_from_auto", False)
|
|
|
|
user_agent = {"file_type": "tokenizer", "from_auto_class": from_auto_class, "is_fast": "Fast" in cls.__name__}
|
|
if from_pipeline is not None:
|
|
user_agent["using_pipeline"] = from_pipeline
|
|
|
|
if is_offline_mode() and not local_files_only:
|
|
logger.info("Offline mode: forcing local_files_only=True")
|
|
local_files_only = True
|
|
|
|
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
|
|
vocab_files = {}
|
|
init_configuration = {}
|
|
|
|
if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
|
|
if len(cls.vocab_files_names) > 1:
|
|
raise ValueError(
|
|
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not "
|
|
"supported for this tokenizer. Use a model identifier or the path to a directory instead."
|
|
)
|
|
warnings.warn(
|
|
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated and "
|
|
"won't be possible anymore in v5. Use a model identifier or the path to a directory instead.",
|
|
FutureWarning,
|
|
)
|
|
file_id = list(cls.vocab_files_names.keys())[0]
|
|
vocab_files[file_id] = pretrained_model_name_or_path
|
|
else:
|
|
# At this point pretrained_model_name_or_path is either a directory or a model identifier name
|
|
additional_files_names = {
|
|
"added_tokens_file": ADDED_TOKENS_FILE,
|
|
"special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE,
|
|
"tokenizer_config_file": TOKENIZER_CONFIG_FILE,
|
|
"tokenizer_file": FULL_TOKENIZER_FILE,
|
|
}
|
|
# Look for the tokenizer files
|
|
for file_id, file_name in {**cls.vocab_files_names, **additional_files_names}.items():
|
|
if os.path.isdir(pretrained_model_name_or_path):
|
|
if subfolder is not None:
|
|
full_file_name = os.path.join(pretrained_model_name_or_path, subfolder, file_name)
|
|
else:
|
|
full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
|
|
if not os.path.exists(full_file_name):
|
|
logger.info(f"Didn't find file {full_file_name}. We won't load it.")
|
|
full_file_name = None
|
|
else:
|
|
full_file_name = hf_bucket_url(
|
|
pretrained_model_name_or_path,
|
|
filename=file_name,
|
|
subfolder=subfolder,
|
|
revision=revision,
|
|
mirror=None,
|
|
)
|
|
|
|
vocab_files[file_id] = full_file_name
|
|
|
|
# Get files from url, cache, or disk depending on the case
|
|
resolved_vocab_files = {}
|
|
unresolved_files = []
|
|
for file_id, file_path in vocab_files.items():
|
|
if file_path is None:
|
|
resolved_vocab_files[file_id] = None
|
|
else:
|
|
try:
|
|
resolved_vocab_files[file_id] = cached_path(
|
|
file_path,
|
|
cache_dir=cache_dir,
|
|
force_download=force_download,
|
|
proxies=proxies,
|
|
resume_download=resume_download,
|
|
local_files_only=local_files_only,
|
|
use_auth_token=use_auth_token,
|
|
user_agent=user_agent,
|
|
)
|
|
|
|
except FileNotFoundError as error:
|
|
if local_files_only:
|
|
unresolved_files.append(file_id)
|
|
else:
|
|
raise error
|
|
|
|
except requests.exceptions.HTTPError as err:
|
|
if "404 Client Error" in str(err):
|
|
logger.debug(err)
|
|
resolved_vocab_files[file_id] = None
|
|
else:
|
|
raise err
|
|
|
|
if len(unresolved_files) > 0:
|
|
logger.info(
|
|
f"Can't load following files from cache: {unresolved_files} and cannot check if these "
|
|
"files are necessary for the tokenizer to operate."
|
|
)
|
|
|
|
if all(full_file_name is None for full_file_name in resolved_vocab_files.values()):
|
|
msg = (
|
|
f"Can't load tokenizer for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
|
|
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
|
|
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing relevant tokenizer files\n\n"
|
|
)
|
|
raise EnvironmentError(msg)
|
|
|
|
for file_id, file_path in vocab_files.items():
|
|
if file_id not in resolved_vocab_files:
|
|
continue
|
|
|
|
if file_path == resolved_vocab_files[file_id]:
|
|
logger.info(f"loading file {file_path}")
|
|
else:
|
|
logger.info(f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}")
|
|
|
|
return cls._from_pretrained(
|
|
resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, **kwargs
|
|
)
|
|
|
|
@classmethod
|
|
def _from_pretrained(
|
|
cls, resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, **kwargs
|
|
):
|
|
# We instantiate fast tokenizers based on a slow tokenizer if we don't have access to the tokenizer.json
|
|
# file or if `from_slow` is set to True.
|
|
from_slow = kwargs.get("from_slow", False)
|
|
has_tokenizer_file = resolved_vocab_files.get("tokenizer_file", None) is not None
|
|
if (from_slow or not has_tokenizer_file) and cls.slow_tokenizer_class is not None:
|
|
slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained(
|
|
copy.deepcopy(resolved_vocab_files),
|
|
pretrained_model_name_or_path,
|
|
copy.deepcopy(init_configuration),
|
|
*init_inputs,
|
|
**(copy.deepcopy(kwargs)),
|
|
)
|
|
else:
|
|
slow_tokenizer = None
|
|
|
|
# Prepare tokenizer initialization kwargs
|
|
# Did we saved some inputs and kwargs to reload ?
|
|
tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None)
|
|
if tokenizer_config_file is not None:
|
|
with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle:
|
|
init_kwargs = json.load(tokenizer_config_handle)
|
|
saved_init_inputs = init_kwargs.pop("init_inputs", ())
|
|
if not init_inputs:
|
|
init_inputs = saved_init_inputs
|
|
else:
|
|
init_kwargs = init_configuration
|
|
|
|
# Update with newly provided kwargs
|
|
init_kwargs.update(kwargs)
|
|
|
|
# Convert AddedTokens serialized as dict to class instances
|
|
def convert_added_tokens(obj: Union[AddedToken, Any]):
|
|
if isinstance(obj, dict) and "__type" in obj and obj["__type"] == "AddedToken":
|
|
obj.pop("__type")
|
|
return AddedToken(**obj)
|
|
elif isinstance(obj, (list, tuple)):
|
|
return list(convert_added_tokens(o) for o in obj)
|
|
elif isinstance(obj, dict):
|
|
return {k: convert_added_tokens(v) for k, v in obj.items()}
|
|
return obj
|
|
|
|
init_kwargs = convert_added_tokens(init_kwargs)
|
|
|
|
# Set max length if needed
|
|
if pretrained_model_name_or_path in cls.max_model_input_sizes:
|
|
# if we're using a pretrained model, ensure the tokenizer
|
|
# wont index sequences longer than the number of positional embeddings
|
|
model_max_length = cls.max_model_input_sizes[pretrained_model_name_or_path]
|
|
if model_max_length is not None and isinstance(model_max_length, (int, float)):
|
|
init_kwargs["model_max_length"] = min(init_kwargs.get("model_max_length", int(1e30)), model_max_length)
|
|
|
|
# Merge resolved_vocab_files arguments in init_kwargs.
|
|
added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
|
|
for args_name, file_path in resolved_vocab_files.items():
|
|
if args_name not in init_kwargs:
|
|
init_kwargs[args_name] = file_path
|
|
|
|
if slow_tokenizer is not None:
|
|
init_kwargs["__slow_tokenizer"] = slow_tokenizer
|
|
|
|
init_kwargs["name_or_path"] = pretrained_model_name_or_path
|
|
|
|
# Instantiate tokenizer.
|
|
try:
|
|
tokenizer = cls(*init_inputs, **init_kwargs)
|
|
except OSError:
|
|
raise OSError(
|
|
"Unable to load vocabulary from file. "
|
|
"Please check that the provided vocabulary is accessible and not corrupted."
|
|
)
|
|
|
|
# Save inputs and kwargs for saving and re-loading with ``save_pretrained``
|
|
# Removed: Now done at the base class level
|
|
# tokenizer.init_inputs = init_inputs
|
|
# tokenizer.init_kwargs = init_kwargs
|
|
|
|
# If there is a complementary special token map, load it
|
|
special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None)
|
|
if special_tokens_map_file is not None:
|
|
with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle:
|
|
special_tokens_map = json.load(special_tokens_map_handle)
|
|
for key, value in special_tokens_map.items():
|
|
if isinstance(value, dict):
|
|
value = AddedToken(**value)
|
|
elif isinstance(value, list):
|
|
value = [AddedToken(**token) if isinstance(token, dict) else token for token in value]
|
|
setattr(tokenizer, key, value)
|
|
|
|
# Add supplementary tokens.
|
|
special_tokens = tokenizer.all_special_tokens
|
|
if added_tokens_file is not None:
|
|
with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
|
|
added_tok_encoder = json.load(added_tokens_handle)
|
|
|
|
# Sort added tokens by index
|
|
added_tok_encoder_sorted = list(sorted(added_tok_encoder.items(), key=lambda x: x[1]))
|
|
|
|
for token, index in added_tok_encoder_sorted:
|
|
if has_tokenizer_file and index != len(tokenizer) and tokenizer.convert_tokens_to_ids(token) != index:
|
|
# Tokenizer fast: added token needs to either be in the vocabulary with the proper index or the
|
|
# index is the current length of the tokenizer (not in vocabulary)
|
|
raise ValueError(
|
|
f"Wrong index found for {token}: should be {tokenizer.convert_tokens_to_ids(token)} but found "
|
|
f"{index}."
|
|
)
|
|
elif not has_tokenizer_file and index != len(tokenizer):
|
|
# Tokenizer slow: added token cannot already be in the vocabulary so its index needs to be the
|
|
# current length of the tokenizer.
|
|
raise ValueError(
|
|
f"Non-consecutive added token '{token}' found. "
|
|
f"Should have index {len(tokenizer)} but has index {index} in saved vocabulary."
|
|
)
|
|
|
|
# Safe to call on a tokenizer fast even if token already there.
|
|
tokenizer.add_tokens(token, special_tokens=bool(token in special_tokens))
|
|
|
|
# Check all our special tokens are registered as "no split" token (we don't cut them) and are in the vocab
|
|
added_tokens = tokenizer.sanitize_special_tokens()
|
|
if added_tokens:
|
|
logger.warning(
|
|
"Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained."
|
|
)
|
|
|
|
return tokenizer
|
|
|
|
def save_pretrained(
|
|
self,
|
|
save_directory: Union[str, os.PathLike],
|
|
legacy_format: Optional[bool] = None,
|
|
filename_prefix: Optional[str] = None,
|
|
push_to_hub: bool = False,
|
|
**kwargs,
|
|
) -> Tuple[str]:
|
|
"""
|
|
Save the full tokenizer state.
|
|
|
|
|
|
This method make sure the full tokenizer can then be re-loaded using the
|
|
:meth:`~transformers.tokenization_utils_base.PreTrainedTokenizer.from_pretrained` class method..
|
|
|
|
.. Warning::
|
|
This won't save modifications you may have applied to the tokenizer after the instantiation (for instance,
|
|
modifying :obj:`tokenizer.do_lower_case` after creation).
|
|
|
|
Args:
|
|
save_directory (:obj:`str` or :obj:`os.PathLike`): The path to a directory where the tokenizer will be saved.
|
|
legacy_format (:obj:`bool`, `optional`):
|
|
Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON
|
|
format as well as in legacy format, i.e. with tokenizer specific vocabulary and a separate added_tokens
|
|
files.
|
|
|
|
If :obj:`False`, will only save the tokenizer in the unified JSON format. This format is incompatible
|
|
with "slow" tokenizers (not powered by the `tokenizers` library), so the tokenizer will not be able to
|
|
be loaded in the corresponding "slow" tokenizer.
|
|
|
|
If :obj:`True`, will save the tokenizer in legacy format.
|
|
filename_prefix: (:obj:`str`, `optional`):
|
|
A prefix to add to the names of the files saved by the tokenizer.
|
|
|
|
Returns:
|
|
A tuple of :obj:`str`: The files saved.
|
|
"""
|
|
if os.path.isfile(save_directory):
|
|
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
|
|
return
|
|
os.makedirs(save_directory, exist_ok=True)
|
|
|
|
special_tokens_map_file = os.path.join(
|
|
save_directory, (filename_prefix + "-" if filename_prefix else "") + SPECIAL_TOKENS_MAP_FILE
|
|
)
|
|
tokenizer_config_file = os.path.join(
|
|
save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_CONFIG_FILE
|
|
)
|
|
|
|
tokenizer_config = copy.deepcopy(self.init_kwargs)
|
|
if len(self.init_inputs) > 0:
|
|
tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
|
|
for file_id in self.vocab_files_names.keys():
|
|
tokenizer_config.pop(file_id, None)
|
|
|
|
# Sanitize AddedTokens
|
|
def convert_added_tokens(obj: Union[AddedToken, Any], add_type_field=True):
|
|
if isinstance(obj, AddedToken):
|
|
out = obj.__getstate__()
|
|
if add_type_field:
|
|
out["__type"] = "AddedToken"
|
|
return out
|
|
elif isinstance(obj, (list, tuple)):
|
|
return list(convert_added_tokens(o, add_type_field=add_type_field) for o in obj)
|
|
elif isinstance(obj, dict):
|
|
return {k: convert_added_tokens(v, add_type_field=add_type_field) for k, v in obj.items()}
|
|
return obj
|
|
|
|
# add_type_field=True to allow dicts in the kwargs / differentiate from AddedToken serialization
|
|
tokenizer_config = convert_added_tokens(tokenizer_config, add_type_field=True)
|
|
with open(tokenizer_config_file, "w", encoding="utf-8") as f:
|
|
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
|
|
logger.info(f"tokenizer config file saved in {tokenizer_config_file}")
|
|
|
|
# Sanitize AddedTokens in special_tokens_map
|
|
write_dict = convert_added_tokens(self.special_tokens_map_extended, add_type_field=False)
|
|
with open(special_tokens_map_file, "w", encoding="utf-8") as f:
|
|
f.write(json.dumps(write_dict, ensure_ascii=False))
|
|
logger.info(f"Special tokens file saved in {special_tokens_map_file}")
|
|
|
|
file_names = (tokenizer_config_file, special_tokens_map_file)
|
|
|
|
save_files = self._save_pretrained(
|
|
save_directory=save_directory,
|
|
file_names=file_names,
|
|
legacy_format=legacy_format,
|
|
filename_prefix=filename_prefix,
|
|
)
|
|
|
|
if push_to_hub:
|
|
# Annoyingly, the return contains files that don't exist.
|
|
existing_files = [f for f in save_files if os.path.isfile(f)]
|
|
url = self._push_to_hub(save_files=existing_files, **kwargs)
|
|
logger.info(f"Tokenizer pushed to the hub in this commit: {url}")
|
|
|
|
return save_files
|
|
|
|
def _save_pretrained(
|
|
self,
|
|
save_directory: Union[str, os.PathLike],
|
|
file_names: Tuple[str],
|
|
legacy_format: Optional[bool] = None,
|
|
filename_prefix: Optional[str] = None,
|
|
) -> Tuple[str]:
|
|
"""
|
|
Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens.
|
|
|
|
Fast tokenizers can also be saved in a unique JSON file containing {config + vocab + added-tokens} using the
|
|
specific :meth:`~transformers.tokenization_utils_fast.PreTrainedTokenizerFast._save_pretrained`
|
|
"""
|
|
if legacy_format is False:
|
|
raise ValueError(
|
|
"Only fast tokenizers (instances of PreTrainedTokenizerFast) can be saved in non legacy format."
|
|
)
|
|
|
|
save_directory = str(save_directory)
|
|
|
|
added_tokens_file = os.path.join(
|
|
save_directory, (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE
|
|
)
|
|
added_vocab = self.get_added_vocab()
|
|
if added_vocab:
|
|
with open(added_tokens_file, "w", encoding="utf-8") as f:
|
|
out_str = json.dumps(added_vocab, ensure_ascii=False)
|
|
f.write(out_str)
|
|
logger.info(f"added tokens file saved in {added_tokens_file}")
|
|
|
|
vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix)
|
|
|
|
return file_names + vocab_files + (added_tokens_file,)
|
|
|
|
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
|
"""
|
|
Save only the vocabulary of the tokenizer (vocabulary + added tokens).
|
|
|
|
This method won't save the configuration and special token mappings of the tokenizer. Use
|
|
:meth:`~transformers.PreTrainedTokenizerFast._save_pretrained` to save the whole state of the tokenizer.
|
|
|
|
Args:
|
|
save_directory (:obj:`str`):
|
|
The directory in which to save the vocabulary.
|
|
filename_prefix (:obj:`str`, `optional`):
|
|
An optional prefix to add to the named of the saved files.
|
|
|
|
Returns:
|
|
:obj:`Tuple(str)`: Paths to the files saved.
|
|
"""
|
|
raise NotImplementedError
|
|
|
|
def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
|
|
"""
|
|
Converts a string in a sequence of tokens, replacing unknown tokens with the :obj:`unk_token`.
|
|
|
|
Args:
|
|
text (:obj:`str`):
|
|
The sequence to be encoded.
|
|
pair (:obj:`str`, `optional`):
|
|
A second sequence to be encoded with the first.
|
|
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
|
Whether or not to add the special tokens associated with the corresponding model.
|
|
kwargs (additional keyword arguments, `optional`):
|
|
Will be passed to the underlying model specific encode method. See details in
|
|
:meth:`~transformers.PreTrainedTokenizerBase.__call__`
|
|
|
|
Returns:
|
|
:obj:`List[str]`: The list of tokens.
|
|
"""
|
|
raise NotImplementedError
|
|
|
|
@add_end_docstrings(
|
|
ENCODE_KWARGS_DOCSTRING,
|
|
"""
|
|
**kwargs: Passed along to the `.tokenize()` method.
|
|
""",
|
|
"""
|
|
Returns:
|
|
:obj:`List[int]`, :obj:`torch.Tensor`, :obj:`tf.Tensor` or :obj:`np.ndarray`: The tokenized ids of the
|
|
text.
|
|
""",
|
|
)
|
|
def encode(
|
|
self,
|
|
text: Union[TextInput, PreTokenizedInput, EncodedInput],
|
|
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
|
|
add_special_tokens: bool = True,
|
|
padding: Union[bool, str, PaddingStrategy] = False,
|
|
truncation: Union[bool, str, TruncationStrategy] = False,
|
|
max_length: Optional[int] = None,
|
|
stride: int = 0,
|
|
return_tensors: Optional[Union[str, TensorType]] = None,
|
|
**kwargs
|
|
) -> List[int]:
|
|
"""
|
|
Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.
|
|
|
|
Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.
|
|
|
|
Args:
|
|
text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`):
|
|
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
|
|
``tokenize`` method) or a list of integers (tokenized string ids using the ``convert_tokens_to_ids``
|
|
method).
|
|
text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`):
|
|
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
|
|
the ``tokenize`` method) or a list of integers (tokenized string ids using the
|
|
``convert_tokens_to_ids`` method).
|
|
"""
|
|
encoded_inputs = self.encode_plus(
|
|
text,
|
|
text_pair=text_pair,
|
|
add_special_tokens=add_special_tokens,
|
|
padding=padding,
|
|
truncation=truncation,
|
|
max_length=max_length,
|
|
stride=stride,
|
|
return_tensors=return_tensors,
|
|
**kwargs,
|
|
)
|
|
|
|
return encoded_inputs["input_ids"]
|
|
|
|
def num_special_tokens_to_add(self, pair: bool = False) -> int:
|
|
raise NotImplementedError
|
|
|
|
def _get_padding_truncation_strategies(
|
|
self, padding=False, truncation=False, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs
|
|
):
|
|
"""
|
|
Find the correct padding/truncation strategy with backward compatibility for old arguments (truncation_strategy
|
|
and pad_to_max_length) and behaviors.
|
|
"""
|
|
old_truncation_strategy = kwargs.pop("truncation_strategy", "do_not_truncate")
|
|
old_pad_to_max_length = kwargs.pop("pad_to_max_length", False)
|
|
|
|
# Backward compatibility for previous behavior, maybe we should deprecate it:
|
|
# If you only set max_length, it activates truncation for max_length
|
|
if max_length is not None and padding is False and truncation is False:
|
|
if verbose:
|
|
if not self.deprecation_warnings.get("Truncation-not-explicitly-activated", False):
|
|
logger.warning(
|
|
"Truncation was not explicitly activated but `max_length` is provided a specific value, "
|
|
"please use `truncation=True` to explicitly truncate examples to max length. "
|
|
"Defaulting to 'longest_first' truncation strategy. "
|
|
"If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy "
|
|
"more precisely by providing a specific strategy to `truncation`."
|
|
)
|
|
self.deprecation_warnings["Truncation-not-explicitly-activated"] = True
|
|
truncation = "longest_first"
|
|
|
|
# Get padding strategy
|
|
if padding is False and old_pad_to_max_length:
|
|
if verbose:
|
|
warnings.warn(
|
|
"The `pad_to_max_length` argument is deprecated and will be removed in a future version, "
|
|
"use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or "
|
|
"use `padding='max_length'` to pad to a max length. In this case, you can give a specific "
|
|
"length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the "
|
|
"maximal input size of the model (e.g. 512 for Bert).",
|
|
FutureWarning,
|
|
)
|
|
if max_length is None:
|
|
padding_strategy = PaddingStrategy.LONGEST
|
|
else:
|
|
padding_strategy = PaddingStrategy.MAX_LENGTH
|
|
elif padding is not False:
|
|
if padding is True:
|
|
padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
|
|
elif not isinstance(padding, PaddingStrategy):
|
|
padding_strategy = PaddingStrategy(padding)
|
|
elif isinstance(padding, PaddingStrategy):
|
|
padding_strategy = padding
|
|
else:
|
|
padding_strategy = PaddingStrategy.DO_NOT_PAD
|
|
|
|
# Get truncation strategy
|
|
if truncation is False and old_truncation_strategy != "do_not_truncate":
|
|
if verbose:
|
|
warnings.warn(
|
|
"The `truncation_strategy` argument is deprecated and will be removed in a future version, "
|
|
"use `truncation=True` to truncate examples to a max length. You can give a specific "
|
|
"length with `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the "
|
|
"maximal input size of the model (e.g. 512 for Bert). "
|
|
" If you have pairs of inputs, you can give a specific truncation strategy selected among "
|
|
"`truncation='only_first'` (will only truncate the first sentence in the pairs) "
|
|
"`truncation='only_second'` (will only truncate the second sentence in the pairs) "
|
|
"or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence in the pairs).",
|
|
FutureWarning,
|
|
)
|
|
truncation_strategy = TruncationStrategy(old_truncation_strategy)
|
|
elif truncation is not False:
|
|
if truncation is True:
|
|
truncation_strategy = (
|
|
TruncationStrategy.LONGEST_FIRST
|
|
) # Default to truncate the longest sequences in pairs of inputs
|
|
elif not isinstance(truncation, TruncationStrategy):
|
|
truncation_strategy = TruncationStrategy(truncation)
|
|
elif isinstance(truncation, TruncationStrategy):
|
|
truncation_strategy = truncation
|
|
else:
|
|
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
|
|
|
|
# Set max length if needed
|
|
if max_length is None:
|
|
if padding_strategy == PaddingStrategy.MAX_LENGTH:
|
|
if self.model_max_length > LARGE_INTEGER:
|
|
if verbose:
|
|
if not self.deprecation_warnings.get("Asking-to-pad-to-max_length", False):
|
|
logger.warning(
|
|
"Asking to pad to max_length but no maximum length is provided and the model has no predefined maximum length. "
|
|
"Default to no padding."
|
|
)
|
|
self.deprecation_warnings["Asking-to-pad-to-max_length"] = True
|
|
padding_strategy = PaddingStrategy.DO_NOT_PAD
|
|
else:
|
|
max_length = self.model_max_length
|
|
|
|
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE:
|
|
if self.model_max_length > LARGE_INTEGER:
|
|
if verbose:
|
|
if not self.deprecation_warnings.get("Asking-to-truncate-to-max_length", False):
|
|
logger.warning(
|
|
"Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. "
|
|
"Default to no truncation."
|
|
)
|
|
self.deprecation_warnings["Asking-to-truncate-to-max_length"] = True
|
|
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
|
|
else:
|
|
max_length = self.model_max_length
|
|
|
|
# Test if we have a padding token
|
|
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (not self.pad_token or self.pad_token_id < 0):
|
|
raise ValueError(
|
|
"Asking to pad but the tokenizer does not have a padding token. "
|
|
"Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` "
|
|
"or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`."
|
|
)
|
|
|
|
# Check that we will truncate to a multiple of pad_to_multiple_of if both are provided
|
|
if (
|
|
truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
|
|
and padding_strategy != PaddingStrategy.DO_NOT_PAD
|
|
and pad_to_multiple_of is not None
|
|
and max_length is not None
|
|
and (max_length % pad_to_multiple_of != 0)
|
|
):
|
|
raise ValueError(
|
|
f"Truncation and padding are both activated but "
|
|
f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})."
|
|
)
|
|
|
|
return padding_strategy, truncation_strategy, max_length, kwargs
|
|
|
|
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
|
|
def __call__(
|
|
self,
|
|
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
|
|
text_pair: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
|
|
add_special_tokens: bool = True,
|
|
padding: Union[bool, str, PaddingStrategy] = False,
|
|
truncation: Union[bool, str, TruncationStrategy] = False,
|
|
max_length: Optional[int] = None,
|
|
stride: int = 0,
|
|
is_split_into_words: bool = False,
|
|
pad_to_multiple_of: Optional[int] = None,
|
|
return_tensors: Optional[Union[str, TensorType]] = None,
|
|
return_token_type_ids: Optional[bool] = None,
|
|
return_attention_mask: Optional[bool] = None,
|
|
return_overflowing_tokens: bool = False,
|
|
return_special_tokens_mask: bool = False,
|
|
return_offsets_mapping: bool = False,
|
|
return_length: bool = False,
|
|
verbose: bool = True,
|
|
**kwargs
|
|
) -> BatchEncoding:
|
|
"""
|
|
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
|
|
sequences.
|
|
|
|
Args:
|
|
text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`):
|
|
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
|
|
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
|
|
:obj:`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
|
|
text_pair (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`):
|
|
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
|
|
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
|
|
:obj:`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
|
|
"""
|
|
# Input type checking for clearer error
|
|
assert isinstance(text, str) or (
|
|
isinstance(text, (list, tuple))
|
|
and (
|
|
len(text) == 0
|
|
or (
|
|
isinstance(text[0], str)
|
|
or (isinstance(text[0], (list, tuple)) and (len(text[0]) == 0 or isinstance(text[0][0], str)))
|
|
)
|
|
)
|
|
), (
|
|
"text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
|
|
"or `List[List[str]]` (batch of pretokenized examples)."
|
|
)
|
|
|
|
assert (
|
|
text_pair is None
|
|
or isinstance(text_pair, str)
|
|
or (
|
|
isinstance(text_pair, (list, tuple))
|
|
and (
|
|
len(text_pair) == 0
|
|
or (
|
|
isinstance(text_pair[0], str)
|
|
or (
|
|
isinstance(text_pair[0], (list, tuple))
|
|
and (len(text_pair[0]) == 0 or isinstance(text_pair[0][0], str))
|
|
)
|
|
)
|
|
)
|
|
)
|
|
), (
|
|
"text_pair input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
|
|
"or `List[List[str]]` (batch of pretokenized examples)."
|
|
)
|
|
|
|
is_batched = bool(
|
|
(not is_split_into_words and isinstance(text, (list, tuple)))
|
|
or (
|
|
is_split_into_words and isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
|
|
)
|
|
)
|
|
|
|
if is_batched:
|
|
if isinstance(text_pair, str):
|
|
raise TypeError(
|
|
"when tokenizing batches of text, `text_pair` must be a list or tuple with the same length as `text`."
|
|
)
|
|
if text_pair is not None and len(text) != len(text_pair):
|
|
raise ValueError(
|
|
f"batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}."
|
|
)
|
|
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
|
|
return self.batch_encode_plus(
|
|
batch_text_or_text_pairs=batch_text_or_text_pairs,
|
|
add_special_tokens=add_special_tokens,
|
|
padding=padding,
|
|
truncation=truncation,
|
|
max_length=max_length,
|
|
stride=stride,
|
|
is_split_into_words=is_split_into_words,
|
|
pad_to_multiple_of=pad_to_multiple_of,
|
|
return_tensors=return_tensors,
|
|
return_token_type_ids=return_token_type_ids,
|
|
return_attention_mask=return_attention_mask,
|
|
return_overflowing_tokens=return_overflowing_tokens,
|
|
return_special_tokens_mask=return_special_tokens_mask,
|
|
return_offsets_mapping=return_offsets_mapping,
|
|
return_length=return_length,
|
|
verbose=verbose,
|
|
**kwargs,
|
|
)
|
|
else:
|
|
return self.encode_plus(
|
|
text=text,
|
|
text_pair=text_pair,
|
|
add_special_tokens=add_special_tokens,
|
|
padding=padding,
|
|
truncation=truncation,
|
|
max_length=max_length,
|
|
stride=stride,
|
|
is_split_into_words=is_split_into_words,
|
|
pad_to_multiple_of=pad_to_multiple_of,
|
|
return_tensors=return_tensors,
|
|
return_token_type_ids=return_token_type_ids,
|
|
return_attention_mask=return_attention_mask,
|
|
return_overflowing_tokens=return_overflowing_tokens,
|
|
return_special_tokens_mask=return_special_tokens_mask,
|
|
return_offsets_mapping=return_offsets_mapping,
|
|
return_length=return_length,
|
|
verbose=verbose,
|
|
**kwargs,
|
|
)
|
|
|
|
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
|
|
def encode_plus(
|
|
self,
|
|
text: Union[TextInput, PreTokenizedInput, EncodedInput],
|
|
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
|
|
add_special_tokens: bool = True,
|
|
padding: Union[bool, str, PaddingStrategy] = False,
|
|
truncation: Union[bool, str, TruncationStrategy] = False,
|
|
max_length: Optional[int] = None,
|
|
stride: int = 0,
|
|
is_split_into_words: bool = False,
|
|
pad_to_multiple_of: Optional[int] = None,
|
|
return_tensors: Optional[Union[str, TensorType]] = None,
|
|
return_token_type_ids: Optional[bool] = None,
|
|
return_attention_mask: Optional[bool] = None,
|
|
return_overflowing_tokens: bool = False,
|
|
return_special_tokens_mask: bool = False,
|
|
return_offsets_mapping: bool = False,
|
|
return_length: bool = False,
|
|
verbose: bool = True,
|
|
**kwargs
|
|
) -> BatchEncoding:
|
|
"""
|
|
Tokenize and prepare for the model a sequence or a pair of sequences.
|
|
|
|
.. warning::
|
|
This method is deprecated, ``__call__`` should be used instead.
|
|
|
|
Args:
|
|
text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]` (the latter only for not-fast tokenizers)):
|
|
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
|
|
``tokenize`` method) or a list of integers (tokenized string ids using the ``convert_tokens_to_ids``
|
|
method).
|
|
text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`):
|
|
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
|
|
the ``tokenize`` method) or a list of integers (tokenized string ids using the
|
|
``convert_tokens_to_ids`` method).
|
|
"""
|
|
|
|
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
|
|
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
|
|
padding=padding,
|
|
truncation=truncation,
|
|
max_length=max_length,
|
|
pad_to_multiple_of=pad_to_multiple_of,
|
|
verbose=verbose,
|
|
**kwargs,
|
|
)
|
|
|
|
return self._encode_plus(
|
|
text=text,
|
|
text_pair=text_pair,
|
|
add_special_tokens=add_special_tokens,
|
|
padding_strategy=padding_strategy,
|
|
truncation_strategy=truncation_strategy,
|
|
max_length=max_length,
|
|
stride=stride,
|
|
is_split_into_words=is_split_into_words,
|
|
pad_to_multiple_of=pad_to_multiple_of,
|
|
return_tensors=return_tensors,
|
|
return_token_type_ids=return_token_type_ids,
|
|
return_attention_mask=return_attention_mask,
|
|
return_overflowing_tokens=return_overflowing_tokens,
|
|
return_special_tokens_mask=return_special_tokens_mask,
|
|
return_offsets_mapping=return_offsets_mapping,
|
|
return_length=return_length,
|
|
verbose=verbose,
|
|
**kwargs,
|
|
)
|
|
|
|
def _encode_plus(
|
|
self,
|
|
text: Union[TextInput, PreTokenizedInput, EncodedInput],
|
|
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
|
|
add_special_tokens: bool = True,
|
|
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
|
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
|
|
max_length: Optional[int] = None,
|
|
stride: int = 0,
|
|
is_split_into_words: bool = False,
|
|
pad_to_multiple_of: Optional[int] = None,
|
|
return_tensors: Optional[Union[str, TensorType]] = None,
|
|
return_token_type_ids: Optional[bool] = None,
|
|
return_attention_mask: Optional[bool] = None,
|
|
return_overflowing_tokens: bool = False,
|
|
return_special_tokens_mask: bool = False,
|
|
return_offsets_mapping: bool = False,
|
|
return_length: bool = False,
|
|
verbose: bool = True,
|
|
**kwargs
|
|
) -> BatchEncoding:
|
|
raise NotImplementedError
|
|
|
|
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
|
|
def batch_encode_plus(
|
|
self,
|
|
batch_text_or_text_pairs: Union[
|
|
List[TextInput],
|
|
List[TextInputPair],
|
|
List[PreTokenizedInput],
|
|
List[PreTokenizedInputPair],
|
|
List[EncodedInput],
|
|
List[EncodedInputPair],
|
|
],
|
|
add_special_tokens: bool = True,
|
|
padding: Union[bool, str, PaddingStrategy] = False,
|
|
truncation: Union[bool, str, TruncationStrategy] = False,
|
|
max_length: Optional[int] = None,
|
|
stride: int = 0,
|
|
is_split_into_words: bool = False,
|
|
pad_to_multiple_of: Optional[int] = None,
|
|
return_tensors: Optional[Union[str, TensorType]] = None,
|
|
return_token_type_ids: Optional[bool] = None,
|
|
return_attention_mask: Optional[bool] = None,
|
|
return_overflowing_tokens: bool = False,
|
|
return_special_tokens_mask: bool = False,
|
|
return_offsets_mapping: bool = False,
|
|
return_length: bool = False,
|
|
verbose: bool = True,
|
|
**kwargs
|
|
) -> BatchEncoding:
|
|
"""
|
|
Tokenize and prepare for the model a list of sequences or a list of pairs of sequences.
|
|
|
|
.. warning::
|
|
This method is deprecated, ``__call__`` should be used instead.
|
|
|
|
Args:
|
|
batch_text_or_text_pairs (:obj:`List[str]`, :obj:`List[Tuple[str, str]]`, :obj:`List[List[str]]`, :obj:`List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also :obj:`List[List[int]]`, :obj:`List[Tuple[List[int], List[int]]]`):
|
|
Batch of sequences or pair of sequences to be encoded. This can be a list of
|
|
string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see
|
|
details in ``encode_plus``).
|
|
"""
|
|
|
|
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
|
|
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
|
|
padding=padding,
|
|
truncation=truncation,
|
|
max_length=max_length,
|
|
pad_to_multiple_of=pad_to_multiple_of,
|
|
verbose=verbose,
|
|
**kwargs,
|
|
)
|
|
|
|
return self._batch_encode_plus(
|
|
batch_text_or_text_pairs=batch_text_or_text_pairs,
|
|
add_special_tokens=add_special_tokens,
|
|
padding_strategy=padding_strategy,
|
|
truncation_strategy=truncation_strategy,
|
|
max_length=max_length,
|
|
stride=stride,
|
|
is_split_into_words=is_split_into_words,
|
|
pad_to_multiple_of=pad_to_multiple_of,
|
|
return_tensors=return_tensors,
|
|
return_token_type_ids=return_token_type_ids,
|
|
return_attention_mask=return_attention_mask,
|
|
return_overflowing_tokens=return_overflowing_tokens,
|
|
return_special_tokens_mask=return_special_tokens_mask,
|
|
return_offsets_mapping=return_offsets_mapping,
|
|
return_length=return_length,
|
|
verbose=verbose,
|
|
**kwargs,
|
|
)
|
|
|
|
def _batch_encode_plus(
|
|
self,
|
|
batch_text_or_text_pairs: Union[
|
|
List[TextInput],
|
|
List[TextInputPair],
|
|
List[PreTokenizedInput],
|
|
List[PreTokenizedInputPair],
|
|
List[EncodedInput],
|
|
List[EncodedInputPair],
|
|
],
|
|
add_special_tokens: bool = True,
|
|
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
|
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
|
|
max_length: Optional[int] = None,
|
|
stride: int = 0,
|
|
is_split_into_words: bool = False,
|
|
pad_to_multiple_of: Optional[int] = None,
|
|
return_tensors: Optional[Union[str, TensorType]] = None,
|
|
return_token_type_ids: Optional[bool] = None,
|
|
return_attention_mask: Optional[bool] = None,
|
|
return_overflowing_tokens: bool = False,
|
|
return_special_tokens_mask: bool = False,
|
|
return_offsets_mapping: bool = False,
|
|
return_length: bool = False,
|
|
verbose: bool = True,
|
|
**kwargs
|
|
) -> BatchEncoding:
|
|
raise NotImplementedError
|
|
|
|
def pad(
|
|
self,
|
|
encoded_inputs: Union[
|
|
BatchEncoding,
|
|
List[BatchEncoding],
|
|
Dict[str, EncodedInput],
|
|
Dict[str, List[EncodedInput]],
|
|
List[Dict[str, EncodedInput]],
|
|
],
|
|
padding: Union[bool, str, PaddingStrategy] = True,
|
|
max_length: Optional[int] = None,
|
|
pad_to_multiple_of: Optional[int] = None,
|
|
return_attention_mask: Optional[bool] = None,
|
|
return_tensors: Optional[Union[str, TensorType]] = None,
|
|
verbose: bool = True,
|
|
) -> BatchEncoding:
|
|
"""
|
|
Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
|
|
in the batch.
|
|
|
|
Padding side (left/right) padding token ids are defined at the tokenizer level (with ``self.padding_side``,
|
|
``self.pad_token_id`` and ``self.pad_token_type_id``)
|
|
|
|
.. note::
|
|
|
|
If the ``encoded_inputs`` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the
|
|
result will use the same type unless you provide a different tensor type with ``return_tensors``. In the
|
|
case of PyTorch tensors, you will lose the specific device of your tensors however.
|
|
|
|
Args:
|
|
encoded_inputs (:class:`~transformers.BatchEncoding`, list of :class:`~transformers.BatchEncoding`, :obj:`Dict[str, List[int]]`, :obj:`Dict[str, List[List[int]]` or :obj:`List[Dict[str, List[int]]]`):
|
|
Tokenized inputs. Can represent one input (:class:`~transformers.BatchEncoding` or :obj:`Dict[str,
|
|
List[int]]`) or a batch of tokenized inputs (list of :class:`~transformers.BatchEncoding`, `Dict[str,
|
|
List[List[int]]]` or `List[Dict[str, List[int]]]`) so you can use this method during preprocessing as
|
|
well as in a PyTorch Dataloader collate function.
|
|
|
|
Instead of :obj:`List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors),
|
|
see the note above for the return type.
|
|
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
|
|
Select a strategy to pad the returned sequences (according to the model's padding side and padding
|
|
index) among:
|
|
|
|
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a
|
|
single sequence if provided).
|
|
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
|
|
maximum acceptable input length for the model if that argument is not provided.
|
|
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
|
|
different lengths).
|
|
max_length (:obj:`int`, `optional`):
|
|
Maximum length of the returned list and optionally padding length (see above).
|
|
pad_to_multiple_of (:obj:`int`, `optional`):
|
|
If set will pad the sequence to a multiple of the provided value.
|
|
|
|
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
|
|
>= 7.5 (Volta).
|
|
return_attention_mask (:obj:`bool`, `optional`):
|
|
Whether to return the attention mask. If left to the default, will return the attention mask according
|
|
to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.
|
|
|
|
`What are attention masks? <../glossary.html#attention-mask>`__
|
|
return_tensors (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`):
|
|
If set, will return tensors instead of list of python integers. Acceptable values are:
|
|
|
|
* :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects.
|
|
* :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects.
|
|
* :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects.
|
|
verbose (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
|
Whether or not to print more information and warnings.
|
|
"""
|
|
# If we have a list of dicts, let's convert it in a dict of lists
|
|
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
|
|
if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], (dict, BatchEncoding)):
|
|
encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
|
|
|
|
# The model's main input name, usually `input_ids`, has be passed for padding
|
|
if self.model_input_names[0] not in encoded_inputs:
|
|
raise ValueError(
|
|
"You should supply an encoding or a list of encodings to this method "
|
|
f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}"
|
|
)
|
|
|
|
required_input = encoded_inputs[self.model_input_names[0]]
|
|
|
|
if not required_input:
|
|
if return_attention_mask:
|
|
encoded_inputs["attention_mask"] = []
|
|
return encoded_inputs
|
|
|
|
# If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects
|
|
# and rebuild them afterwards if no return_tensors is specified
|
|
# Note that we lose the specific device the tensor may be on for PyTorch
|
|
|
|
first_element = required_input[0]
|
|
if isinstance(first_element, (list, tuple)):
|
|
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
|
|
index = 0
|
|
while len(required_input[index]) == 0:
|
|
index += 1
|
|
if index < len(required_input):
|
|
first_element = required_input[index][0]
|
|
# At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do.
|
|
if not isinstance(first_element, (int, list, tuple)):
|
|
if is_tf_available() and _is_tensorflow(first_element):
|
|
return_tensors = "tf" if return_tensors is None else return_tensors
|
|
elif is_torch_available() and _is_torch(first_element):
|
|
return_tensors = "pt" if return_tensors is None else return_tensors
|
|
elif isinstance(first_element, np.ndarray):
|
|
return_tensors = "np" if return_tensors is None else return_tensors
|
|
else:
|
|
raise ValueError(
|
|
f"type of {first_element} unknown: {type(first_element)}. "
|
|
f"Should be one of a python, numpy, pytorch or tensorflow object."
|
|
)
|
|
|
|
for key, value in encoded_inputs.items():
|
|
encoded_inputs[key] = to_py_obj(value)
|
|
|
|
# Convert padding_strategy in PaddingStrategy
|
|
padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
|
|
padding=padding, max_length=max_length, verbose=verbose
|
|
)
|
|
|
|
required_input = encoded_inputs[self.model_input_names[0]]
|
|
if required_input and not isinstance(required_input[0], (list, tuple)):
|
|
encoded_inputs = self._pad(
|
|
encoded_inputs,
|
|
max_length=max_length,
|
|
padding_strategy=padding_strategy,
|
|
pad_to_multiple_of=pad_to_multiple_of,
|
|
return_attention_mask=return_attention_mask,
|
|
)
|
|
return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
|
|
|
|
batch_size = len(required_input)
|
|
assert all(
|
|
len(v) == batch_size for v in encoded_inputs.values()
|
|
), "Some items in the output dictionary have a different batch size than others."
|
|
|
|
if padding_strategy == PaddingStrategy.LONGEST:
|
|
max_length = max(len(inputs) for inputs in required_input)
|
|
padding_strategy = PaddingStrategy.MAX_LENGTH
|
|
|
|
batch_outputs = {}
|
|
for i in range(batch_size):
|
|
inputs = dict((k, v[i]) for k, v in encoded_inputs.items())
|
|
outputs = self._pad(
|
|
inputs,
|
|
max_length=max_length,
|
|
padding_strategy=padding_strategy,
|
|
pad_to_multiple_of=pad_to_multiple_of,
|
|
return_attention_mask=return_attention_mask,
|
|
)
|
|
|
|
for key, value in outputs.items():
|
|
if key not in batch_outputs:
|
|
batch_outputs[key] = []
|
|
batch_outputs[key].append(value)
|
|
|
|
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
|
|
|
|
def create_token_type_ids_from_sequences(
|
|
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
|
) -> List[int]:
|
|
"""
|
|
Create the token type IDs corresponding to the sequences passed. `What are token type IDs?
|
|
<../glossary.html#token-type-ids>`__
|
|
|
|
Should be overridden in a subclass if the model has a special way of building those.
|
|
|
|
Args:
|
|
token_ids_0 (:obj:`List[int]`): The first tokenized sequence.
|
|
token_ids_1 (:obj:`List[int]`, `optional`): The second tokenized sequence.
|
|
|
|
Returns:
|
|
:obj:`List[int]`: The token type ids.
|
|
"""
|
|
if token_ids_1 is None:
|
|
return len(token_ids_0) * [0]
|
|
return [0] * len(token_ids_0) + [1] * len(token_ids_1)
|
|
|
|
def build_inputs_with_special_tokens(
|
|
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
|
) -> List[int]:
|
|
"""
|
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
|
adding special tokens.
|
|
|
|
This implementation does not add special tokens and this method should be overridden in a subclass.
|
|
|
|
Args:
|
|
token_ids_0 (:obj:`List[int]`): The first tokenized sequence.
|
|
token_ids_1 (:obj:`List[int]`, `optional`): The second tokenized sequence.
|
|
|
|
Returns:
|
|
:obj:`List[int]`: The model input with special tokens.
|
|
"""
|
|
if token_ids_1 is None:
|
|
return token_ids_0
|
|
return token_ids_0 + token_ids_1
|
|
|
|
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
|
|
def prepare_for_model(
|
|
self,
|
|
ids: List[int],
|
|
pair_ids: Optional[List[int]] = None,
|
|
add_special_tokens: bool = True,
|
|
padding: Union[bool, str, PaddingStrategy] = False,
|
|
truncation: Union[bool, str, TruncationStrategy] = False,
|
|
max_length: Optional[int] = None,
|
|
stride: int = 0,
|
|
pad_to_multiple_of: Optional[int] = None,
|
|
return_tensors: Optional[Union[str, TensorType]] = None,
|
|
return_token_type_ids: Optional[bool] = None,
|
|
return_attention_mask: Optional[bool] = None,
|
|
return_overflowing_tokens: bool = False,
|
|
return_special_tokens_mask: bool = False,
|
|
return_offsets_mapping: bool = False,
|
|
return_length: bool = False,
|
|
verbose: bool = True,
|
|
prepend_batch_axis: bool = False,
|
|
**kwargs
|
|
) -> BatchEncoding:
|
|
"""
|
|
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
|
|
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
|
|
manages a moving window (with user defined stride) for overflowing tokens
|
|
|
|
Args:
|
|
ids (:obj:`List[int]`):
|
|
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the ``tokenize``
|
|
and ``convert_tokens_to_ids`` methods.
|
|
pair_ids (:obj:`List[int]`, `optional`):
|
|
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the ``tokenize``
|
|
and ``convert_tokens_to_ids`` methods.
|
|
"""
|
|
|
|
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
|
|
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
|
|
padding=padding,
|
|
truncation=truncation,
|
|
max_length=max_length,
|
|
pad_to_multiple_of=pad_to_multiple_of,
|
|
verbose=verbose,
|
|
**kwargs,
|
|
)
|
|
|
|
pair = bool(pair_ids is not None)
|
|
len_ids = len(ids)
|
|
len_pair_ids = len(pair_ids) if pair else 0
|
|
|
|
if return_token_type_ids and not add_special_tokens:
|
|
raise ValueError(
|
|
"Asking to return token_type_ids while setting add_special_tokens to False "
|
|
"results in an undefined behavior. Please set add_special_tokens to True or "
|
|
"set return_token_type_ids to None."
|
|
)
|
|
|
|
# Load from model defaults
|
|
if return_token_type_ids is None:
|
|
return_token_type_ids = "token_type_ids" in self.model_input_names
|
|
if return_attention_mask is None:
|
|
return_attention_mask = "attention_mask" in self.model_input_names
|
|
|
|
encoded_inputs = {}
|
|
|
|
# Compute the total size of the returned encodings
|
|
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
|
|
|
|
# Truncation: Handle max sequence length
|
|
overflowing_tokens = []
|
|
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
|
|
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
|
|
ids,
|
|
pair_ids=pair_ids,
|
|
num_tokens_to_remove=total_len - max_length,
|
|
truncation_strategy=truncation_strategy,
|
|
stride=stride,
|
|
)
|
|
|
|
if return_overflowing_tokens:
|
|
encoded_inputs["overflowing_tokens"] = overflowing_tokens
|
|
encoded_inputs["num_truncated_tokens"] = total_len - max_length
|
|
|
|
# Add special tokens
|
|
if add_special_tokens:
|
|
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
|
|
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
|
|
else:
|
|
sequence = ids + pair_ids if pair else ids
|
|
token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
|
|
|
|
# Build output dictionary
|
|
encoded_inputs["input_ids"] = sequence
|
|
if return_token_type_ids:
|
|
encoded_inputs["token_type_ids"] = token_type_ids
|
|
if return_special_tokens_mask:
|
|
if add_special_tokens:
|
|
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
|
|
else:
|
|
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
|
|
|
|
# Check lengths
|
|
self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
|
|
|
|
# Padding
|
|
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
|
|
encoded_inputs = self.pad(
|
|
encoded_inputs,
|
|
max_length=max_length,
|
|
padding=padding_strategy.value,
|
|
pad_to_multiple_of=pad_to_multiple_of,
|
|
return_attention_mask=return_attention_mask,
|
|
)
|
|
|
|
if return_length:
|
|
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
|
|
|
|
batch_outputs = BatchEncoding(
|
|
encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
|
|
)
|
|
|
|
return batch_outputs
|
|
|
|
def truncate_sequences(
|
|
self,
|
|
ids: List[int],
|
|
pair_ids: Optional[List[int]] = None,
|
|
num_tokens_to_remove: int = 0,
|
|
truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
|
|
stride: int = 0,
|
|
) -> Tuple[List[int], List[int], List[int]]:
|
|
"""
|
|
Truncates a sequence pair in-place following the strategy.
|
|
|
|
Args:
|
|
ids (:obj:`List[int]`):
|
|
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the ``tokenize``
|
|
and ``convert_tokens_to_ids`` methods.
|
|
pair_ids (:obj:`List[int]`, `optional`):
|
|
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the ``tokenize``
|
|
and ``convert_tokens_to_ids`` methods.
|
|
num_tokens_to_remove (:obj:`int`, `optional`, defaults to 0):
|
|
Number of tokens to remove using the truncation strategy.
|
|
truncation_strategy (:obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`False`):
|
|
The strategy to follow for truncation. Can be:
|
|
|
|
* :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or
|
|
to the maximum acceptable input length for the model if that argument is not provided. This will
|
|
truncate token by token, removing a token from the longest sequence in the pair if a pair of
|
|
sequences (or a batch of pairs) is provided.
|
|
* :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to
|
|
the maximum acceptable input length for the model if that argument is not provided. This will only
|
|
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
|
|
* :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or
|
|
to the maximum acceptable input length for the model if that argument is not provided. This will only
|
|
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
|
|
* :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
|
|
greater than the model maximum admissible input size).
|
|
stride (:obj:`int`, `optional`, defaults to 0):
|
|
If set to a positive number, the overflowing tokens returned will contain some tokens from the main
|
|
sequence returned. The value of this argument defines the number of additional tokens.
|
|
|
|
Returns:
|
|
:obj:`Tuple[List[int], List[int], List[int]]`: The truncated ``ids``, the truncated ``pair_ids`` and the
|
|
list of overflowing tokens.
|
|
"""
|
|
if num_tokens_to_remove <= 0:
|
|
return ids, pair_ids, []
|
|
|
|
if not isinstance(truncation_strategy, TruncationStrategy):
|
|
truncation_strategy = TruncationStrategy(truncation_strategy)
|
|
|
|
overflowing_tokens = []
|
|
if truncation_strategy == TruncationStrategy.LONGEST_FIRST:
|
|
for _ in range(num_tokens_to_remove):
|
|
if pair_ids is None or len(ids) > len(pair_ids):
|
|
if not overflowing_tokens:
|
|
window_len = min(len(ids), stride + 1)
|
|
else:
|
|
window_len = 1
|
|
overflowing_tokens.extend(ids[-window_len:])
|
|
ids = ids[:-1]
|
|
else:
|
|
if not overflowing_tokens:
|
|
window_len = min(len(pair_ids), stride + 1)
|
|
else:
|
|
window_len = 1
|
|
overflowing_tokens.extend(pair_ids[-window_len:])
|
|
pair_ids = pair_ids[:-1]
|
|
elif truncation_strategy == TruncationStrategy.ONLY_FIRST:
|
|
if len(ids) > num_tokens_to_remove:
|
|
window_len = min(len(ids), stride + num_tokens_to_remove)
|
|
overflowing_tokens = ids[-window_len:]
|
|
ids = ids[:-num_tokens_to_remove]
|
|
else:
|
|
logger.error(
|
|
f"We need to remove {num_tokens_to_remove} to truncate the input"
|
|
f"but the first sequence has a length {len(ids)}. "
|
|
f"Please select another truncation strategy than {truncation_strategy}, "
|
|
f"for instance 'longest_first' or 'only_second'."
|
|
)
|
|
elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
|
|
if len(pair_ids) > num_tokens_to_remove:
|
|
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
|
|
overflowing_tokens = pair_ids[-window_len:]
|
|
pair_ids = pair_ids[:-num_tokens_to_remove]
|
|
else:
|
|
logger.error(
|
|
f"We need to remove {num_tokens_to_remove} to truncate the input"
|
|
f"but the second sequence has a length {len(pair_ids)}. "
|
|
f"Please select another truncation strategy than {truncation_strategy}, "
|
|
f"for instance 'longest_first' or 'only_first'."
|
|
)
|
|
|
|
return (ids, pair_ids, overflowing_tokens)
|
|
|
|
def _pad(
|
|
self,
|
|
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
|
|
max_length: Optional[int] = None,
|
|
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
|
pad_to_multiple_of: Optional[int] = None,
|
|
return_attention_mask: Optional[bool] = None,
|
|
) -> dict:
|
|
"""
|
|
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
|
|
|
|
Args:
|
|
encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
|
|
max_length: maximum length of the returned list and optionally padding length (see below).
|
|
Will truncate by taking into account the special tokens.
|
|
padding_strategy: PaddingStrategy to use for padding.
|
|
|
|
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
|
|
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
|
|
- PaddingStrategy.DO_NOT_PAD: Do not pad
|
|
The tokenizer padding sides are defined in self.padding_side:
|
|
|
|
- 'left': pads on the left of the sequences
|
|
- 'right': pads on the right of the sequences
|
|
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
|
|
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
|
|
>= 7.5 (Volta).
|
|
return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics)
|
|
"""
|
|
# Load from model defaults
|
|
if return_attention_mask is None:
|
|
return_attention_mask = "attention_mask" in self.model_input_names
|
|
|
|
required_input = encoded_inputs[self.model_input_names[0]]
|
|
|
|
if padding_strategy == PaddingStrategy.LONGEST:
|
|
max_length = len(required_input)
|
|
|
|
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
|
|
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
|
|
|
|
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
|
|
|
|
if needs_to_be_padded:
|
|
difference = max_length - len(required_input)
|
|
if self.padding_side == "right":
|
|
if return_attention_mask:
|
|
encoded_inputs["attention_mask"] = [1] * len(required_input) + [0] * difference
|
|
if "token_type_ids" in encoded_inputs:
|
|
encoded_inputs["token_type_ids"] = (
|
|
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
|
|
)
|
|
if "special_tokens_mask" in encoded_inputs:
|
|
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
|
|
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
|
|
elif self.padding_side == "left":
|
|
if return_attention_mask:
|
|
encoded_inputs["attention_mask"] = [0] * difference + [1] * len(required_input)
|
|
if "token_type_ids" in encoded_inputs:
|
|
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
|
|
"token_type_ids"
|
|
]
|
|
if "special_tokens_mask" in encoded_inputs:
|
|
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
|
|
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
|
|
else:
|
|
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
|
|
elif return_attention_mask and "attention_mask" not in encoded_inputs:
|
|
encoded_inputs["attention_mask"] = [1] * len(required_input)
|
|
|
|
return encoded_inputs
|
|
|
|
def convert_tokens_to_string(self, tokens: List[str]) -> str:
|
|
"""
|
|
Converts a sequence of tokens in a single string. The most simple way to do it is ``" ".join(tokens)`` but we
|
|
often want to remove sub-word tokenization artifacts at the same time.
|
|
|
|
Args:
|
|
tokens (:obj:`List[str]`): The token to join in a string.
|
|
|
|
Returns:
|
|
:obj:`str`: The joined tokens.
|
|
"""
|
|
raise NotImplementedError
|
|
|
|
def batch_decode(
|
|
self,
|
|
sequences: Union[List[int], List[List[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"],
|
|
skip_special_tokens: bool = False,
|
|
clean_up_tokenization_spaces: bool = True,
|
|
**kwargs
|
|
) -> List[str]:
|
|
"""
|
|
Convert a list of lists of token ids into a list of strings by calling decode.
|
|
|
|
Args:
|
|
sequences (:obj:`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`):
|
|
List of tokenized input ids. Can be obtained using the ``__call__`` method.
|
|
skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
|
Whether or not to remove special tokens in the decoding.
|
|
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
|
Whether or not to clean up the tokenization spaces.
|
|
kwargs (additional keyword arguments, `optional`):
|
|
Will be passed to the underlying model specific decode method.
|
|
|
|
Returns:
|
|
:obj:`List[str]`: The list of decoded sentences.
|
|
"""
|
|
return [
|
|
self.decode(
|
|
seq,
|
|
skip_special_tokens=skip_special_tokens,
|
|
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
|
**kwargs,
|
|
)
|
|
for seq in sequences
|
|
]
|
|
|
|
def decode(
|
|
self,
|
|
token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
|
|
skip_special_tokens: bool = False,
|
|
clean_up_tokenization_spaces: bool = True,
|
|
**kwargs
|
|
) -> str:
|
|
"""
|
|
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
|
|
tokens and clean up tokenization spaces.
|
|
|
|
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
|
|
|
|
Args:
|
|
token_ids (:obj:`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
|
|
List of tokenized input ids. Can be obtained using the ``__call__`` method.
|
|
skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
|
Whether or not to remove special tokens in the decoding.
|
|
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
|
Whether or not to clean up the tokenization spaces.
|
|
kwargs (additional keyword arguments, `optional`):
|
|
Will be passed to the underlying model specific decode method.
|
|
|
|
Returns:
|
|
:obj:`str`: The decoded sentence.
|
|
"""
|
|
# Convert inputs to python lists
|
|
token_ids = to_py_obj(token_ids)
|
|
|
|
return self._decode(
|
|
token_ids=token_ids,
|
|
skip_special_tokens=skip_special_tokens,
|
|
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
|
**kwargs,
|
|
)
|
|
|
|
def _decode(
|
|
self,
|
|
token_ids: Union[int, List[int]],
|
|
skip_special_tokens: bool = False,
|
|
clean_up_tokenization_spaces: bool = True,
|
|
**kwargs
|
|
) -> str:
|
|
raise NotImplementedError
|
|
|
|
def get_special_tokens_mask(
|
|
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
|
) -> List[int]:
|
|
"""
|
|
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
|
|
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
|
|
|
|
Args:
|
|
token_ids_0 (:obj:`List[int]`):
|
|
List of ids of the first sequence.
|
|
token_ids_1 (:obj:`List[int]`, `optional`):
|
|
List of ids of the second sequence.
|
|
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
|
Whether or not the token list is already formatted with special tokens for the model.
|
|
|
|
Returns:
|
|
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
|
"""
|
|
assert already_has_special_tokens and token_ids_1 is None, (
|
|
"You cannot use ``already_has_special_tokens=False`` with this tokenizer. "
|
|
"Please use a slow (full python) tokenizer to activate this argument."
|
|
"Or set `return_special_tokens_mask=True` when calling the encoding method "
|
|
"to get the special tokens mask in any tokenizer. "
|
|
)
|
|
|
|
all_special_ids = self.all_special_ids # cache the property
|
|
|
|
special_tokens_mask = [1 if token in all_special_ids else 0 for token in token_ids_0]
|
|
|
|
return special_tokens_mask
|
|
|
|
@staticmethod
|
|
def clean_up_tokenization(out_string: str) -> str:
|
|
"""
|
|
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.
|
|
|
|
Args:
|
|
out_string (:obj:`str`): The text to clean up.
|
|
|
|
Returns:
|
|
:obj:`str`: The cleaned-up string.
|
|
"""
|
|
out_string = (
|
|
out_string.replace(" .", ".")
|
|
.replace(" ?", "?")
|
|
.replace(" !", "!")
|
|
.replace(" ,", ",")
|
|
.replace(" ' ", "'")
|
|
.replace(" n't", "n't")
|
|
.replace(" 'm", "'m")
|
|
.replace(" 's", "'s")
|
|
.replace(" 've", "'ve")
|
|
.replace(" 're", "'re")
|
|
)
|
|
return out_string
|
|
|
|
def _eventual_warn_about_too_long_sequence(self, ids: List[int], max_length: Optional[int], verbose: bool):
|
|
"""
|
|
Depending on the input and internal state we might trigger a warning about a sequence that is too long for it's
|
|
corresponding model
|
|
|
|
Args:
|
|
ids (:obj:`List[str]`): The ids produced by the tokenization
|
|
max_length (:obj:`int`, `optional`): The max_length desired (does not trigger a warning if it is set)
|
|
verbose (:obj:`bool`): Whether or not to print more information and warnings.
|
|
|
|
"""
|
|
if max_length is None and len(ids) > self.model_max_length and verbose:
|
|
if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False):
|
|
logger.warning(
|
|
"Token indices sequence length is longer than the specified maximum sequence length "
|
|
f"for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model "
|
|
"will result in indexing errors"
|
|
)
|
|
self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True
|
|
|
|
@contextmanager
|
|
def as_target_tokenizer(self):
|
|
"""
|
|
Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to
|
|
sequence-to-sequence models that need a slightly different processing for the labels.
|
|
"""
|
|
yield
|
|
|
|
def prepare_seq2seq_batch(
|
|
self,
|
|
src_texts: List[str],
|
|
tgt_texts: Optional[List[str]] = None,
|
|
max_length: Optional[int] = None,
|
|
max_target_length: Optional[int] = None,
|
|
padding: str = "longest",
|
|
return_tensors: str = None,
|
|
truncation: bool = True,
|
|
**kwargs,
|
|
) -> BatchEncoding:
|
|
"""
|
|
Prepare model inputs for translation. For best performance, translate one sentence at a time.
|
|
|
|
Arguments:
|
|
src_texts (:obj:`List[str]`):
|
|
List of documents to summarize or source language texts.
|
|
tgt_texts (:obj:`list`, `optional`):
|
|
List of summaries or target language texts.
|
|
max_length (:obj:`int`, `optional`):
|
|
Controls the maximum length for encoder inputs (documents to summarize or source language texts) If
|
|
left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length
|
|
is required by one of the truncation/padding parameters. If the model has no specific maximum input
|
|
length (like XLNet) truncation/padding to a maximum length will be deactivated.
|
|
max_target_length (:obj:`int`, `optional`):
|
|
Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set
|
|
to :obj:`None`, this will use the max_length value.
|
|
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`False`):
|
|
Activates and controls padding. Accepts the following values:
|
|
|
|
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a
|
|
single sequence if provided).
|
|
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
|
|
maximum acceptable input length for the model if that argument is not provided.
|
|
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
|
|
different lengths).
|
|
return_tensors (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`):
|
|
If set, will return tensors instead of list of python integers. Acceptable values are:
|
|
|
|
* :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects.
|
|
* :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects.
|
|
* :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects.
|
|
truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`True`):
|
|
Activates and controls truncation. Accepts the following values:
|
|
|
|
* :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument
|
|
:obj:`max_length` or to the maximum acceptable input length for the model if that argument is not
|
|
provided. This will truncate token by token, removing a token from the longest sequence in the pair
|
|
if a pair of sequences (or a batch of pairs) is provided.
|
|
* :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to
|
|
the maximum acceptable input length for the model if that argument is not provided. This will only
|
|
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
|
|
* :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or
|
|
to the maximum acceptable input length for the model if that argument is not provided. This will only
|
|
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
|
|
* :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with
|
|
sequence lengths greater than the model maximum admissible input size).
|
|
**kwargs:
|
|
Additional keyword arguments passed along to :obj:`self.__call__`.
|
|
|
|
Return:
|
|
:class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields:
|
|
|
|
- **input_ids** -- List of token ids to be fed to the encoder.
|
|
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model.
|
|
- **labels** -- List of token ids for tgt_texts.
|
|
|
|
The full set of keys ``[input_ids, attention_mask, labels]``, will only be returned if tgt_texts is passed.
|
|
Otherwise, input_ids, attention_mask will be the only keys.
|
|
"""
|
|
warnings.warn(
|
|
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
|
|
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
|
|
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
|
|
"details",
|
|
FutureWarning,
|
|
)
|
|
# mBART-specific kwargs that should be ignored by other models.
|
|
kwargs.pop("src_lang", None)
|
|
kwargs.pop("tgt_lang", None)
|
|
if max_length is None:
|
|
max_length = self.model_max_length
|
|
model_inputs = self(
|
|
src_texts,
|
|
add_special_tokens=True,
|
|
return_tensors=return_tensors,
|
|
max_length=max_length,
|
|
padding=padding,
|
|
truncation=truncation,
|
|
**kwargs,
|
|
)
|
|
if tgt_texts is None:
|
|
return model_inputs
|
|
# Process tgt_texts
|
|
if max_target_length is None:
|
|
max_target_length = max_length
|
|
with self.as_target_tokenizer():
|
|
labels = self(
|
|
tgt_texts,
|
|
add_special_tokens=True,
|
|
return_tensors=return_tensors,
|
|
padding=padding,
|
|
max_length=max_target_length,
|
|
truncation=truncation,
|
|
**kwargs,
|
|
)
|
|
model_inputs["labels"] = labels["input_ids"]
|
|
return model_inputs
|