Added documentation and changed parameters for special_tokens_sentences_pair.

This commit is contained in:
LysandreJik 2019-08-12 15:13:53 -04:00
parent 912fdff899
commit 22ac004a7c
5 changed files with 50 additions and 18 deletions

View File

@ -167,12 +167,20 @@ class BertTokenizer(PreTrainedTokenizer):
return out_string
def add_special_tokens_single_sentence(self, token_ids):
"""
Adds special tokens to the a sequence for sequence classification tasks.
A BERT sequence has the following format: [CLS] X [SEP]
"""
return [self._convert_token_to_id(self.cls_token)] + token_ids + [self._convert_token_to_id(self.sep_token)]
def add_special_tokens_sentences_pair(self, *token_ids):
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
"""
Adds special tokens to a sequence pair for sequence classification tasks.
A BERT sequence pair has the following format: [CLS] A [SEP] B [SEP]
"""
sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)]
return cls + token_ids[0] + sep + token_ids[1] + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
def save_vocabulary(self, vocab_path):
"""Save the tokenizer vocabulary to a directory or file."""

View File

@ -12,7 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for OpenAI GPT."""
"""Tokenization classes for RoBERTa."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
@ -57,15 +57,15 @@ PRETRAINED_VOCAB_FILES_MAP = {
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'roberta-base': 1024,
'roberta-large': 1024,
'roberta-large-mnli': 1024,
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
}
class RobertaTokenizer(PreTrainedTokenizer):
"""
GPT-2 BPE tokenizer. Peculiarities:
RoBERTa BPE tokenizer, derived from the GPT-2 tokenizer. Peculiarities:
- Byte-level BPE
"""
vocab_files_names = VOCAB_FILES_NAMES
@ -161,12 +161,20 @@ class RobertaTokenizer(PreTrainedTokenizer):
return text
def add_special_tokens_single_sentence(self, token_ids):
"""
Adds special tokens to a sequence for sequence classification tasks.
A RoBERTa sequence has the following format: [CLS] X [SEP]
"""
return [self._convert_token_to_id(self.cls_token)] + token_ids + [self._convert_token_to_id(self.sep_token)]
def add_special_tokens_sentences_pair(self, *token_ids):
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
"""
Adds special tokens to a sequence pair for sequence classification tasks.
A RoBERTa sequence pair has the following format: [CLS] A [SEP][SEP] B [SEP]
"""
sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)]
return cls + token_ids[0] + sep + sep + token_ids[1] + sep
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def save_vocabulary(self, save_directory):
"""Save the tokenizer vocabulary and merge files to a directory."""

View File

@ -546,7 +546,7 @@ class PreTrainedTokenizer(object):
def add_special_tokens_single_sentence(self, token_ids):
raise NotImplementedError
def add_special_tokens_sentences_pair(self, *token_ids):
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
raise NotImplementedError
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):

View File

@ -215,12 +215,20 @@ class XLMTokenizer(PreTrainedTokenizer):
return out_string
def add_special_tokens_single_sentence(self, token_ids):
"""
Adds special tokens to a sequence for sequence classification tasks.
An XLM sequence has the following format: [CLS] X [SEP]
"""
return [self._convert_token_to_id(self.cls_token)] + token_ids + [self._convert_token_to_id(self.sep_token)]
def add_special_tokens_sentences_pair(self, *token_ids):
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
"""
Adds special tokens to a sequence pair for sequence classification tasks.
An XLM sequence pair has the following format: [CLS] A [SEP] B [SEP]
"""
sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)]
return cls + token_ids[0] + sep + token_ids[1] + sep
return cls + token_ids_0 + sep + token_ids_1 + sep
def save_vocabulary(self, save_directory):
"""Save the tokenizer vocabulary and merge files to a directory."""

View File

@ -178,14 +178,22 @@ class XLNetTokenizer(PreTrainedTokenizer):
return out_string
def add_special_tokens_single_sentence(self, token_ids):
logger.warning("No method was defined for special tokens and single sentence streams in XLNet. "
"Returning token_ids")
return token_ids
def add_special_tokens_sentences_pair(self, *token_ids):
"""
Adds special tokens to a sequence pair for sequence classification tasks.
An XLNet sequence pair has the following format: A [SEP] B [SEP][CLS]
"""
sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)]
return token_ids[0] + sep + token_ids[1] + sep + cls
return token_ids + sep + cls
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
"""
Adds special tokens to a sequence for sequence classification tasks.
An XLNet sequence has the following format: X [SEP][CLS]
"""
sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)]
return token_ids_0 + sep + token_ids_1 + sep + cls
def save_vocabulary(self, save_directory):
""" Save the sentencepiece vocabulary (copy original file) and special tokens file