mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-16 11:08:23 +06:00
37 lines
2.0 KiB
Python
37 lines
2.0 KiB
Python
__version__ = "0.7.0"
|
|
from .tokenization_bert import BertTokenizer, BasicTokenizer, WordpieceTokenizer
|
|
from .tokenization_openai import OpenAIGPTTokenizer
|
|
from .tokenization_transfo_xl import (TransfoXLTokenizer, TransfoXLCorpus)
|
|
from .tokenization_gpt2 import GPT2Tokenizer
|
|
from .tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE
|
|
from .tokenization_xlm import XLMTokenizer
|
|
from .tokenization_utils import (PreTrainedTokenizer, clean_up_tokenization)
|
|
|
|
from .modeling_bert import (BertConfig, BertModel, BertForPreTraining,
|
|
BertForMaskedLM, BertForNextSentencePrediction,
|
|
BertForSequenceClassification, BertForMultipleChoice,
|
|
BertForTokenClassification, BertForQuestionAnswering,
|
|
load_tf_weights_in_bert)
|
|
from .modeling_openai import (OpenAIGPTConfig, OpenAIGPTModel,
|
|
OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel,
|
|
load_tf_weights_in_openai_gpt)
|
|
from .modeling_transfo_xl import (TransfoXLConfig, TransfoXLModel, TransfoXLLMHeadModel,
|
|
load_tf_weights_in_transfo_xl)
|
|
from .modeling_gpt2 import (GPT2Config, GPT2Model,
|
|
GPT2LMHeadModel, GPT2DoubleHeadsModel,
|
|
load_tf_weights_in_gpt2)
|
|
from .modeling_xlnet import (XLNetConfig,
|
|
XLNetPreTrainedModel, XLNetModel, XLNetLMHeadModel,
|
|
XLNetForSequenceClassification, XLNetForQuestionAnswering,
|
|
load_tf_weights_in_xlnet)
|
|
from .modeling_xlm import (XLMConfig, XLMModel,
|
|
XLMWithLMHeadModel, XLMForSequenceClassification,
|
|
XLMForQuestionAnswering)
|
|
from .modeling_utils import (WEIGHTS_NAME, CONFIG_NAME, TF_WEIGHTS_NAME,
|
|
PretrainedConfig, PreTrainedModel, prune_layer, Conv1D)
|
|
|
|
from .optimization import BertAdam
|
|
from .optimization_openai import OpenAIAdam
|
|
|
|
from .file_utils import (PYTORCH_PRETRAINED_BERT_CACHE, cached_path)
|