create_mask_from_sequences -> create_token_type_ids_from_sequences

This commit is contained in:
LysandreJik 2019-09-24 09:09:28 -04:00
parent c832f43a4d
commit d340e2329e
6 changed files with 6 additions and 17 deletions

View File

@ -204,7 +204,7 @@ class BertTokenizer(PreTrainedTokenizer):
return cls + token_ids_0 + sep + token_ids_1 + sep
def create_mask_from_sequences(self, sequence_0, sequence_1):
def create_token_type_ids_from_sequences(self, sequence_0, sequence_1):
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
A BERT sequence pair mask has the following format:

View File

@ -67,14 +67,3 @@ class DistilBertTokenizer(BertTokenizer):
def add_special_tokens_sequence_pair(self, token_ids_0, token_ids_1):
sep = [self.sep_token_id]
return token_ids_0 + sep + token_ids_1
def create_mask_from_sequences(self, sequence_0, sequence_1):
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
A BERT sequence pair mask has the following format:
0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1
| first sequence | second sequence
"""
sep = [self.sep_token_id]
return len(self.encode(sequence_0) + sep) * [0] + len(self.encode(sequence_1)) * [1]

View File

@ -97,7 +97,7 @@ class RobertaTokenizer(GPT2Tokenizer):
cls = [self.cls_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def create_mask_from_sequences(self, sequence_0, sequence_1):
def create_token_type_ids_from_sequences(self, sequence_0, sequence_1):
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
A RoBERTa sequence pair mask has the following format:

View File

@ -780,7 +780,7 @@ class PreTrainedTokenizer(object):
)
if output_token_type:
information["token_type_ids"] = self.create_mask_from_sequences(text, text_pair)
information["token_type_ids"] = self.create_token_type_ids_from_sequences(text, text_pair)
else:
logger.warning("No special tokens were added. The two sequences have been concatenated.")
sequence = first_sentence_tokens + second_sentence_tokens
@ -863,7 +863,7 @@ class PreTrainedTokenizer(object):
return information
def create_mask_from_sequences(self, sequence_0, sequence_1):
def create_token_type_ids_from_sequences(self, sequence_0, sequence_1):
logger.warning("This tokenizer does not make use of special tokens.")
return [0] * len(self.encode(sequence_0)) + [1] * len(self.encode(sequence_1))

View File

@ -770,7 +770,7 @@ class XLMTokenizer(PreTrainedTokenizer):
cls = [self.cls_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def create_mask_from_sequences(self, sequence_0, sequence_1):
def create_token_type_ids_from_sequences(self, sequence_0, sequence_1):
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
An XLM sequence pair mask has the following format:

View File

@ -200,7 +200,7 @@ class XLNetTokenizer(PreTrainedTokenizer):
cls = [self.cls_token_id]
return token_ids_0 + sep + token_ids_1 + sep + cls
def create_mask_from_sequences(self, sequence_0, sequence_1):
def create_token_type_ids_from_sequences(self, sequence_0, sequence_1):
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
A BERT sequence pair mask has the following format: