Add PRETRAINED_INIT_CONFIGURATION to DistilBERT tokenizer

This commit is contained in:
Lysandre 2020-01-09 12:14:30 +01:00 committed by Lysandre Debut
parent bfec203d4e
commit 801f2ac8c7

View File

@ -41,6 +41,14 @@ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
}
PRETRAINED_INIT_CONFIGURATION = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class DistilBertTokenizer(BertTokenizer):
r"""
Constructs a DistilBertTokenizer.
@ -59,3 +67,4 @@ class DistilBertTokenizer(BertTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION