mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-01 18:51:14 +06:00
add kwargs to base encode function
This commit is contained in:
parent
f1b018740c
commit
a175a9dc01
@ -563,7 +563,7 @@ class PreTrainedTokenizer(object):
|
|||||||
def _convert_token_to_id(self, token):
|
def _convert_token_to_id(self, token):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def encode(self, text, text_pair=None, add_special_tokens=False):
|
def encode(self, text, text_pair=None, add_special_tokens=False, **kwargs):
|
||||||
"""
|
"""
|
||||||
Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary.
|
Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary.
|
||||||
|
|
||||||
@ -574,15 +574,16 @@ class PreTrainedTokenizer(object):
|
|||||||
text_pair: Optional second sequence to be encoded.
|
text_pair: Optional second sequence to be encoded.
|
||||||
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
|
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
|
||||||
to their model.
|
to their model.
|
||||||
|
**kwargs: passed to the `self.tokenize()` method
|
||||||
"""
|
"""
|
||||||
if text_pair is None:
|
if text_pair is None:
|
||||||
if add_special_tokens:
|
if add_special_tokens:
|
||||||
return self.add_special_tokens_single_sentence(self.convert_tokens_to_ids(self.tokenize(text)))
|
return self.add_special_tokens_single_sentence(self.convert_tokens_to_ids(self.tokenize(text, **kwargs)))
|
||||||
else:
|
else:
|
||||||
return self.convert_tokens_to_ids(self.tokenize(text))
|
return self.convert_tokens_to_ids(self.tokenize(text, **kwargs))
|
||||||
|
|
||||||
first_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text)]
|
first_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text, **kwargs)]
|
||||||
second_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text_pair)]
|
second_sentence_tokens = [self._convert_token_to_id(token) for token in self.tokenize(text_pair, **kwargs)]
|
||||||
|
|
||||||
if add_special_tokens:
|
if add_special_tokens:
|
||||||
return self.add_special_tokens_sentences_pair(first_sentence_tokens, second_sentence_tokens)
|
return self.add_special_tokens_sentences_pair(first_sentence_tokens, second_sentence_tokens)
|
||||||
|
Loading…
Reference in New Issue
Block a user