mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-03 03:31:05 +06:00
create_mask_from_sequences -> create_token_type_ids_from_sequences
This commit is contained in:
parent
c832f43a4d
commit
d340e2329e
@ -204,7 +204,7 @@ class BertTokenizer(PreTrainedTokenizer):
|
||||
|
||||
return cls + token_ids_0 + sep + token_ids_1 + sep
|
||||
|
||||
def create_mask_from_sequences(self, sequence_0, sequence_1):
|
||||
def create_token_type_ids_from_sequences(self, sequence_0, sequence_1):
|
||||
"""
|
||||
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
|
||||
A BERT sequence pair mask has the following format:
|
||||
|
@ -67,14 +67,3 @@ class DistilBertTokenizer(BertTokenizer):
|
||||
def add_special_tokens_sequence_pair(self, token_ids_0, token_ids_1):
|
||||
sep = [self.sep_token_id]
|
||||
return token_ids_0 + sep + token_ids_1
|
||||
|
||||
def create_mask_from_sequences(self, sequence_0, sequence_1):
|
||||
"""
|
||||
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
|
||||
A BERT sequence pair mask has the following format:
|
||||
0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1
|
||||
| first sequence | second sequence
|
||||
"""
|
||||
sep = [self.sep_token_id]
|
||||
|
||||
return len(self.encode(sequence_0) + sep) * [0] + len(self.encode(sequence_1)) * [1]
|
||||
|
@ -97,7 +97,7 @@ class RobertaTokenizer(GPT2Tokenizer):
|
||||
cls = [self.cls_token_id]
|
||||
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
|
||||
|
||||
def create_mask_from_sequences(self, sequence_0, sequence_1):
|
||||
def create_token_type_ids_from_sequences(self, sequence_0, sequence_1):
|
||||
"""
|
||||
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
|
||||
A RoBERTa sequence pair mask has the following format:
|
||||
|
@ -780,7 +780,7 @@ class PreTrainedTokenizer(object):
|
||||
)
|
||||
|
||||
if output_token_type:
|
||||
information["token_type_ids"] = self.create_mask_from_sequences(text, text_pair)
|
||||
information["token_type_ids"] = self.create_token_type_ids_from_sequences(text, text_pair)
|
||||
else:
|
||||
logger.warning("No special tokens were added. The two sequences have been concatenated.")
|
||||
sequence = first_sentence_tokens + second_sentence_tokens
|
||||
@ -863,7 +863,7 @@ class PreTrainedTokenizer(object):
|
||||
|
||||
return information
|
||||
|
||||
def create_mask_from_sequences(self, sequence_0, sequence_1):
|
||||
def create_token_type_ids_from_sequences(self, sequence_0, sequence_1):
|
||||
logger.warning("This tokenizer does not make use of special tokens.")
|
||||
return [0] * len(self.encode(sequence_0)) + [1] * len(self.encode(sequence_1))
|
||||
|
||||
|
@ -770,7 +770,7 @@ class XLMTokenizer(PreTrainedTokenizer):
|
||||
cls = [self.cls_token_id]
|
||||
return cls + token_ids_0 + sep + token_ids_1 + sep
|
||||
|
||||
def create_mask_from_sequences(self, sequence_0, sequence_1):
|
||||
def create_token_type_ids_from_sequences(self, sequence_0, sequence_1):
|
||||
"""
|
||||
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
|
||||
An XLM sequence pair mask has the following format:
|
||||
|
@ -200,7 +200,7 @@ class XLNetTokenizer(PreTrainedTokenizer):
|
||||
cls = [self.cls_token_id]
|
||||
return token_ids_0 + sep + token_ids_1 + sep + cls
|
||||
|
||||
def create_mask_from_sequences(self, sequence_0, sequence_1):
|
||||
def create_token_type_ids_from_sequences(self, sequence_0, sequence_1):
|
||||
"""
|
||||
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
|
||||
A BERT sequence pair mask has the following format:
|
||||
|
Loading…
Reference in New Issue
Block a user