mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
module: add support for XLM-RoBERTa (__init__)
This commit is contained in:
parent
71b4750517
commit
d3549b66af
@ -49,6 +49,7 @@ from .tokenization_distilbert import DistilBertTokenizer
|
||||
from .tokenization_albert import AlbertTokenizer
|
||||
from .tokenization_camembert import CamembertTokenizer
|
||||
from .tokenization_t5 import T5Tokenizer
|
||||
from .tokenization_xlm_roberta import XLMRobertaTokenizer
|
||||
|
||||
# Configurations
|
||||
from .configuration_utils import PretrainedConfig
|
||||
@ -65,6 +66,7 @@ from .configuration_distilbert import DistilBertConfig, DISTILBERT_PRETRAINED_CO
|
||||
from .configuration_albert import AlbertConfig, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
|
||||
from .configuration_camembert import CamembertConfig, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
|
||||
from .configuration_t5 import T5Config, T5_PRETRAINED_CONFIG_ARCHIVE_MAP
|
||||
from .configuration_xlm_roberta import XLMRobertaConfig, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP
|
||||
|
||||
# Modeling
|
||||
if is_torch_available():
|
||||
@ -119,6 +121,9 @@ if is_torch_available():
|
||||
AlbertForQuestionAnswering,
|
||||
load_tf_weights_in_albert, ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
|
||||
|
||||
from .modeling_xlm_roberta import (XLMRobertaForMaskedLM, XLMRobertaModel, XLMRobertaForMultipleChoice,
|
||||
XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification)
|
||||
|
||||
# Optimization
|
||||
from .optimization import (AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup,
|
||||
get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup)
|
||||
|
Loading…
Reference in New Issue
Block a user