mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-03 12:50:06 +06:00
Reorganize repo (#8580)
* Put models in subfolders * Styling * Fix imports in tests * More fixes in test imports * Sneaky hidden imports * Fix imports in doc files * More sneaky imports * Finish fixing tests * Fix examples * Fix path for copies * More fixes for examples * Fix dummy files * More fixes for example * More model import fixes * Is this why you're unhappy GitHub? * Fix imports in conver command
This commit is contained in:
parent
901507335f
commit
c89bdfbe72
3
.github/workflows/github-torch-hub.yml
vendored
3
.github/workflows/github-torch-hub.yml
vendored
@ -8,6 +8,9 @@ on:
|
||||
jobs:
|
||||
torch_hub_integration:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
# TODO quickfix but may need more investigation
|
||||
ACTIONS_ALLOW_UNSECURE_COMMANDS: True
|
||||
steps:
|
||||
# no checkout necessary here.
|
||||
- name: Extract branch name
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -133,7 +133,6 @@ dmypy.json
|
||||
tensorflow_code
|
||||
|
||||
# Models
|
||||
models
|
||||
proc_data
|
||||
|
||||
# examples
|
||||
|
@ -51,10 +51,10 @@ AlbertTokenizer
|
||||
Albert specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_albert.AlbertForPreTrainingOutput
|
||||
.. autoclass:: transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_albert.TFAlbertForPreTrainingOutput
|
||||
.. autoclass:: transformers.models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
|
||||
|
@ -44,8 +44,8 @@ Implementation Notes
|
||||
- Bart doesn't use :obj:`token_type_ids` for sequence classification. Use :class:`~transformers.BartTokenizer` or
|
||||
:meth:`~transformers.BartTokenizer.encode` to get the proper splitting.
|
||||
- The forward pass of :class:`~transformers.BartModel` will create decoder inputs (using the helper function
|
||||
:func:`transformers.modeling_bart._prepare_bart_decoder_inputs`) if they are not passed. This is different than some
|
||||
other modeling APIs.
|
||||
:func:`transformers.models.bart.modeling_bart._prepare_bart_decoder_inputs`) if they are not passed. This is
|
||||
different than some other modeling APIs.
|
||||
- Model predictions are intended to be identical to the original implementation when
|
||||
:obj:`force_bos_token_to_be_generated=True`. This only works, however, if the string you pass to
|
||||
:func:`fairseq.encode` starts with a space.
|
||||
@ -93,7 +93,7 @@ BartModel
|
||||
.. autoclass:: transformers.BartModel
|
||||
:members: forward
|
||||
|
||||
.. autofunction:: transformers.modeling_bart._prepare_bart_decoder_inputs
|
||||
.. autofunction:: transformers.models.bart.modeling_bart._prepare_bart_decoder_inputs
|
||||
|
||||
|
||||
BartForConditionalGeneration
|
||||
|
@ -57,10 +57,10 @@ BertTokenizerFast
|
||||
Bert specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_bert.BertForPreTrainingOutput
|
||||
.. autoclass:: transformers.models.bert.modeling_bert.BertForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_bert.TFBertForPreTrainingOutput
|
||||
.. autoclass:: transformers.models.bert.modeling_tf_bert.TFBertForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
|
||||
|
@ -71,13 +71,13 @@ DPRReaderTokenizerFast
|
||||
DPR specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_dpr.DPRContextEncoderOutput
|
||||
.. autoclass:: transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_dpr.DPRQuestionEncoderOutput
|
||||
.. autoclass:: transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_dpr.DPRReaderOutput
|
||||
.. autoclass:: transformers.models.dpr.modeling_dpr.DPRReaderOutput
|
||||
:members:
|
||||
|
||||
|
||||
|
@ -69,10 +69,10 @@ ElectraTokenizerFast
|
||||
Electra specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_electra.ElectraForPreTrainingOutput
|
||||
.. autoclass:: transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_electra.TFElectraForPreTrainingOutput
|
||||
.. autoclass:: transformers.models.electra.modeling_tf_electra.TFElectraForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
|
||||
|
@ -65,10 +65,10 @@ FunnelTokenizerFast
|
||||
Funnel specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_funnel.FunnelForPreTrainingOutput
|
||||
.. autoclass:: transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_funnel.TFFunnelForPreTrainingOutput
|
||||
.. autoclass:: transformers.models.funnel.modeling_tf_funnel.TFFunnelForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
|
||||
|
@ -72,10 +72,10 @@ OpenAIGPTTokenizerFast
|
||||
OpenAI specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_openai.OpenAIGPTDoubleHeadsModelOutput
|
||||
.. autoclass:: transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput
|
||||
.. autoclass:: transformers.models.openai.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput
|
||||
:members:
|
||||
|
||||
|
||||
|
@ -60,10 +60,10 @@ GPT2TokenizerFast
|
||||
GPT2 specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_gpt2.GPT2DoubleHeadsModelOutput
|
||||
.. autoclass:: transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput
|
||||
.. autoclass:: transformers.models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput
|
||||
:members:
|
||||
|
||||
|
||||
|
@ -93,29 +93,27 @@ LongformerTokenizerFast
|
||||
Longformer specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_longformer.LongformerBaseModelOutput
|
||||
.. autoclass:: transformers.models.longformer.modeling_longformer.LongformerBaseModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_longformer.LongformerBaseModelOutputWithPooling
|
||||
.. autoclass:: transformers.models.longformer.modeling_longformer.LongformerBaseModelOutputWithPooling
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_longformer.LongformerMultipleChoiceModelOutput
|
||||
.. autoclass:: transformers.models.longformer.modeling_longformer.LongformerMultipleChoiceModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_longformer.LongformerQuestionAnsweringModelOutput
|
||||
.. autoclass:: transformers.models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_longformer.TFLongformerBaseModelOutput
|
||||
.. autoclass:: transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_longformer.TFLongformerBaseModelOutputWithPooling
|
||||
.. autoclass:: transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutputWithPooling
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput
|
||||
.. autoclass:: transformers.models.longformer.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput
|
||||
:members:
|
||||
|
||||
LongformerModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
LongformerModel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -67,19 +67,19 @@ LxmertTokenizerFast
|
||||
Lxmert specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_lxmert.LxmertModelOutput
|
||||
.. autoclass:: transformers.models.lxmert.modeling_lxmert.LxmertModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_lxmert.LxmertForPreTrainingOutput
|
||||
.. autoclass:: transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_lxmert.LxmertForQuestionAnsweringOutput
|
||||
.. autoclass:: transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_lxmert.TFLxmertModelOutput
|
||||
.. autoclass:: transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_lxmert.TFLxmertForPreTrainingOutput
|
||||
.. autoclass:: transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
|
||||
|
@ -58,10 +58,10 @@ MobileBertTokenizerFast
|
||||
MobileBert specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_mobilebert.MobileBertForPreTrainingOutput
|
||||
.. autoclass:: transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput
|
||||
.. autoclass:: transformers.models.mobilebert.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput
|
||||
:members:
|
||||
|
||||
|
||||
|
@ -47,16 +47,16 @@ ProphetNetTokenizer
|
||||
ProphetNet specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_prophetnet.ProphetNetSeq2SeqLMOutput
|
||||
.. autoclass:: transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_prophetnet.ProphetNetSeq2SeqModelOutput
|
||||
.. autoclass:: transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_prophetnet.ProphetNetDecoderModelOutput
|
||||
.. autoclass:: transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_prophetnet.ProphetNetDecoderLMOutput
|
||||
.. autoclass:: transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput
|
||||
:members:
|
||||
|
||||
ProphetNetModel
|
||||
|
@ -50,10 +50,10 @@ RagTokenizer
|
||||
Rag specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_rag.RetrievAugLMMarginOutput
|
||||
.. autoclass:: transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_rag.RetrievAugLMOutput
|
||||
.. autoclass:: transformers.models.rag.modeling_rag.RetrievAugLMOutput
|
||||
:members:
|
||||
|
||||
RagRetriever
|
||||
|
@ -49,16 +49,16 @@ TransfoXLTokenizer
|
||||
TransfoXL specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_transfo_xl.TransfoXLModelOutput
|
||||
.. autoclass:: transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_transfo_xl.TransfoXLLMHeadModelOutput
|
||||
.. autoclass:: transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_transfo_xl.TFTransfoXLModelOutput
|
||||
.. autoclass:: transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput
|
||||
.. autoclass:: transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput
|
||||
:members:
|
||||
|
||||
|
||||
|
@ -50,7 +50,7 @@ XLMTokenizer
|
||||
XLM specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_xlm.XLMForQuestionAnsweringOutput
|
||||
.. autoclass:: transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput
|
||||
:members:
|
||||
|
||||
|
||||
|
@ -53,43 +53,43 @@ XLNetTokenizer
|
||||
XLNet specific outputs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: transformers.modeling_xlnet.XLNetModelOutput
|
||||
.. autoclass:: transformers.models.xlnet.modeling_xlnet.XLNetModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_xlnet.XLNetLMHeadModelOutput
|
||||
.. autoclass:: transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_xlnet.XLNetForSequenceClassificationOutput
|
||||
.. autoclass:: transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_xlnet.XLNetForMultipleChoiceOutput
|
||||
.. autoclass:: transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_xlnet.XLNetForTokenClassificationOutput
|
||||
.. autoclass:: transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput
|
||||
.. autoclass:: transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_xlnet.XLNetForQuestionAnsweringOutput
|
||||
.. autoclass:: transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_xlnet.TFXLNetModelOutput
|
||||
.. autoclass:: transformers.models.xlnet.modeling_tf_xlnet.TFXLNetModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_xlnet.TFXLNetLMHeadModelOutput
|
||||
.. autoclass:: transformers.models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput
|
||||
.. autoclass:: transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput
|
||||
.. autoclass:: transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput
|
||||
.. autoclass:: transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput
|
||||
:members:
|
||||
|
||||
.. autoclass:: transformers.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput
|
||||
.. autoclass:: transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput
|
||||
:members:
|
||||
|
||||
|
||||
|
@ -21,7 +21,7 @@ import torch.nn as nn
|
||||
from torch.nn import CrossEntropyLoss, MSELoss
|
||||
|
||||
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
|
||||
from transformers.modeling_albert import (
|
||||
from transformers.models.albert.modeling_albert import (
|
||||
ALBERT_INPUTS_DOCSTRING,
|
||||
ALBERT_START_DOCSTRING,
|
||||
AlbertModel,
|
||||
|
@ -23,7 +23,7 @@ from torch import nn
|
||||
from torch.nn import CrossEntropyLoss, MSELoss
|
||||
|
||||
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
|
||||
from transformers.modeling_bert import (
|
||||
from transformers.models.bert.modeling_bert import (
|
||||
BERT_INPUTS_DOCSTRING,
|
||||
BERT_START_DOCSTRING,
|
||||
BertEncoder,
|
||||
|
@ -1,7 +1,6 @@
|
||||
import torch
|
||||
|
||||
from transformers.modeling_camembert import CamembertForMaskedLM
|
||||
from transformers.tokenization_camembert import CamembertTokenizer
|
||||
from transformers import CamembertForMaskedLM, CamembertTokenizer
|
||||
|
||||
|
||||
def fill_mask(masked_input, model, tokenizer, topk=5):
|
||||
|
@ -32,8 +32,14 @@ from torch.utils.data.distributed import DistributedSampler
|
||||
from tqdm import tqdm, trange
|
||||
|
||||
import transformers
|
||||
from transformers import WEIGHTS_NAME, AdamW, AutoConfig, AutoTokenizer, get_linear_schedule_with_warmup
|
||||
from transformers.modeling_auto import AutoModelForMultipleChoice
|
||||
from transformers import (
|
||||
WEIGHTS_NAME,
|
||||
AdamW,
|
||||
AutoConfig,
|
||||
AutoModelForMultipleChoice,
|
||||
AutoTokenizer,
|
||||
get_linear_schedule_with_warmup,
|
||||
)
|
||||
from transformers.trainer_utils import is_main_process
|
||||
|
||||
|
||||
|
@ -3,7 +3,7 @@ from torch import nn
|
||||
from torch.nn import CrossEntropyLoss, MSELoss
|
||||
|
||||
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
|
||||
from transformers.modeling_bert import (
|
||||
from transformers.models.bert.modeling_bert import (
|
||||
BERT_INPUTS_DOCSTRING,
|
||||
BERT_START_DOCSTRING,
|
||||
BertEmbeddings,
|
||||
|
@ -3,9 +3,13 @@ from __future__ import absolute_import, division, print_function, unicode_litera
|
||||
import torch.nn as nn
|
||||
from torch.nn import CrossEntropyLoss, MSELoss
|
||||
|
||||
from transformers.configuration_roberta import RobertaConfig
|
||||
from transformers import RobertaConfig
|
||||
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
|
||||
from transformers.modeling_roberta import ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings
|
||||
from transformers.models.roberta.modeling_roberta import (
|
||||
ROBERTA_INPUTS_DOCSTRING,
|
||||
ROBERTA_START_DOCSTRING,
|
||||
RobertaEmbeddings,
|
||||
)
|
||||
|
||||
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
"""Masked Version of BERT. It replaces the `torch.nn.Linear` layers with
|
||||
:class:`~emmental.MaskedLinear` and add an additional parameters in the forward pass to
|
||||
compute the adaptive mask.
|
||||
Built on top of `transformers.modeling_bert`"""
|
||||
Built on top of `transformers.models.bert.modeling_bert`"""
|
||||
|
||||
|
||||
import logging
|
||||
@ -29,8 +29,8 @@ from torch.nn import CrossEntropyLoss, MSELoss
|
||||
from emmental import MaskedBertConfig
|
||||
from emmental.modules import MaskedLinear
|
||||
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
|
||||
from transformers.modeling_bert import ACT2FN, BertLayerNorm, load_tf_weights_in_bert
|
||||
from transformers.modeling_utils import PreTrainedModel, prune_linear_layer
|
||||
from transformers.models.bert.modeling_bert import ACT2FN, BertLayerNorm, load_tf_weights_in_bert
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -27,7 +27,7 @@ class RagPyTorchDistributedRetriever(RagRetriever):
|
||||
It is used to decode the question and then use the generator_tokenizer.
|
||||
generator_tokenizer (:class:`~transformers.PretrainedTokenizer`):
|
||||
The tokenizer used for the generator part of the RagModel.
|
||||
index (:class:`~transformers.retrieval_rag.Index`, optional, defaults to the one defined by the configuration):
|
||||
index (:class:`~transformers.models.rag.retrieval_rag.Index`, optional, defaults to the one defined by the configuration):
|
||||
If specified, use this index instead of the one built using the configuration
|
||||
"""
|
||||
|
||||
|
@ -11,16 +11,12 @@ import numpy as np
|
||||
from datasets import Dataset
|
||||
|
||||
import faiss
|
||||
from transformers.configuration_bart import BartConfig
|
||||
from transformers.configuration_dpr import DPRConfig
|
||||
from transformers.configuration_rag import RagConfig
|
||||
from transformers import BartConfig, BartTokenizer, DPRConfig, DPRQuestionEncoderTokenizer, RagConfig
|
||||
from transformers.file_utils import is_datasets_available, is_faiss_available, is_psutil_available, is_torch_available
|
||||
from transformers.retrieval_rag import CustomHFIndex
|
||||
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
|
||||
from transformers.models.rag.retrieval_rag import CustomHFIndex
|
||||
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
|
||||
from transformers.testing_utils import require_torch_non_multi_gpu_but_fix_me
|
||||
from transformers.tokenization_bart import BartTokenizer
|
||||
from transformers.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
|
||||
from transformers.tokenization_dpr import DPRQuestionEncoderTokenizer
|
||||
from transformers.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
|
||||
|
||||
|
||||
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # noqa: E402 # isort:skip
|
||||
@ -137,7 +133,7 @@ class RagRetrieverTest(TestCase):
|
||||
question_encoder=DPRConfig().to_dict(),
|
||||
generator=BartConfig().to_dict(),
|
||||
)
|
||||
with patch("transformers.retrieval_rag.load_dataset") as mock_load_dataset:
|
||||
with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset:
|
||||
mock_load_dataset.return_value = dataset
|
||||
retriever = RagPyTorchDistributedRetriever(
|
||||
config,
|
||||
|
@ -16,7 +16,7 @@ from finetune import SummarizationModule, TranslationModule
|
||||
from finetune import main as ft_main
|
||||
from make_student import create_student_by_copying_alternating_layers, get_layers_to_supervise
|
||||
from transformers import AutoModelForSeq2SeqLM, MBartTokenizer, T5ForConditionalGeneration
|
||||
from transformers.modeling_bart import shift_tokens_right
|
||||
from transformers.models.bart.modeling_bart import shift_tokens_right
|
||||
from utils import calculate_bleu, check_output_dir, freeze_params, label_smoothed_nll_loss, use_task_specific_params
|
||||
|
||||
|
||||
|
@ -17,7 +17,7 @@ from torch.utils.data import DataLoader
|
||||
|
||||
from callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
|
||||
from transformers import MBartTokenizer, T5ForConditionalGeneration
|
||||
from transformers.modeling_bart import shift_tokens_right
|
||||
from transformers.models.bart.modeling_bart import shift_tokens_right
|
||||
from utils import (
|
||||
ROUGE_KEYS,
|
||||
LegacySeq2SeqDataset,
|
||||
|
@ -5,8 +5,8 @@ from torch import nn
|
||||
from torch.utils.data import DistributedSampler, RandomSampler
|
||||
|
||||
from transformers import PreTrainedModel, Trainer, logging
|
||||
from transformers.configuration_fsmt import FSMTConfig
|
||||
from transformers.file_utils import is_torch_tpu_available
|
||||
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
|
||||
from transformers.optimization import (
|
||||
Adafactor,
|
||||
AdamW,
|
||||
|
@ -10,7 +10,7 @@ from parameterized import parameterized
|
||||
from save_len_file import save_len_file
|
||||
from test_seq2seq_examples import ARTICLES, BART_TINY, MARIAN_TINY, MBART_TINY, SUMMARIES, T5_TINY, make_test_data_dir
|
||||
from transformers import AutoTokenizer
|
||||
from transformers.modeling_bart import shift_tokens_right
|
||||
from transformers.models.bart.modeling_bart import shift_tokens_right
|
||||
from transformers.testing_utils import TestCasePlus, require_torch_non_multi_gpu_but_fix_me, slow
|
||||
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeq2SeqDataset, Seq2SeqDataset
|
||||
|
||||
|
@ -2,8 +2,8 @@ import os
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
from transformers.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
|
||||
from transformers.file_utils import cached_property
|
||||
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
|
||||
from transformers.testing_utils import require_torch_non_multi_gpu_but_fix_me, slow
|
||||
|
||||
|
||||
|
@ -21,7 +21,7 @@ from torch.utils.data import Dataset, Sampler
|
||||
from sentence_splitter import add_newline_to_end_of_each_sentence
|
||||
from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer
|
||||
from transformers.file_utils import cached_property
|
||||
from transformers.modeling_bart import shift_tokens_right
|
||||
from transformers.models.bart.modeling_bart import shift_tokens_right
|
||||
|
||||
|
||||
try:
|
||||
|
@ -34,9 +34,8 @@ import torch.nn.functional as F
|
||||
from tqdm import trange
|
||||
|
||||
from pplm_classification_head import ClassificationHead
|
||||
from transformers import GPT2Tokenizer
|
||||
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
||||
from transformers.file_utils import cached_path
|
||||
from transformers.modeling_gpt2 import GPT2LMHeadModel
|
||||
|
||||
|
||||
PPLM_BOW = 1
|
||||
|
@ -35,8 +35,7 @@ All 3 models are available:
|
||||
#### How to use
|
||||
|
||||
```python
|
||||
from transformers.tokenization_fsmt import FSMTTokenizer
|
||||
from transformers.modeling_fsmt import FSMTForConditionalGeneration
|
||||
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
|
||||
mname = "allenai/wmt16-en-de-12-1"
|
||||
tokenizer = FSMTTokenizer.from_pretrained(mname)
|
||||
model = FSMTForConditionalGeneration.from_pretrained(mname)
|
||||
|
@ -35,8 +35,7 @@ All 3 models are available:
|
||||
#### How to use
|
||||
|
||||
```python
|
||||
from transformers.tokenization_fsmt import FSMTTokenizer
|
||||
from transformers.modeling_fsmt import FSMTForConditionalGeneration
|
||||
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
|
||||
mname = "allenai/wmt16-en-de-dist-12-1"
|
||||
tokenizer = FSMTTokenizer.from_pretrained(mname)
|
||||
model = FSMTForConditionalGeneration.from_pretrained(mname)
|
||||
|
@ -35,8 +35,7 @@ All 3 models are available:
|
||||
#### How to use
|
||||
|
||||
```python
|
||||
from transformers.tokenization_fsmt import FSMTTokenizer
|
||||
from transformers.modeling_fsmt import FSMTForConditionalGeneration
|
||||
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
|
||||
mname = "allenai/wmt16-en-de-dist-6-1"
|
||||
tokenizer = FSMTTokenizer.from_pretrained(mname)
|
||||
model = FSMTForConditionalGeneration.from_pretrained(mname)
|
||||
|
@ -35,8 +35,7 @@ For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the S
|
||||
#### How to use
|
||||
|
||||
```python
|
||||
from transformers.tokenization_fsmt import FSMTTokenizer
|
||||
from transformers.modeling_fsmt import FSMTForConditionalGeneration
|
||||
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
|
||||
mname = "allenai/wmt19-de-en-6-6-base"
|
||||
tokenizer = FSMTTokenizer.from_pretrained(mname)
|
||||
model = FSMTForConditionalGeneration.from_pretrained(mname)
|
||||
|
@ -35,8 +35,7 @@ For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the S
|
||||
#### How to use
|
||||
|
||||
```python
|
||||
from transformers.tokenization_fsmt import FSMTTokenizer
|
||||
from transformers.modeling_fsmt import FSMTForConditionalGeneration
|
||||
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
|
||||
mname = "allenai/wmt19-de-en-6-6-big"
|
||||
tokenizer = FSMTTokenizer.from_pretrained(mname)
|
||||
model = FSMTForConditionalGeneration.from_pretrained(mname)
|
||||
|
@ -47,9 +47,7 @@ Evaluated on the SQuAD 2.0 dev set with the [official eval script](https://works
|
||||
|
||||
### In Transformers
|
||||
```python
|
||||
from transformers.pipelines import pipeline
|
||||
from transformers.modeling_auto import AutoModelForQuestionAnswering
|
||||
from transformers.tokenization_auto import AutoTokenizer
|
||||
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
||||
|
||||
model_name = "deepset/electra-base-squad2"
|
||||
|
||||
|
@ -48,9 +48,7 @@ Evaluated on the SQuAD 2.0 dev set with the [official eval script](https://works
|
||||
|
||||
### In Transformers
|
||||
```python
|
||||
from transformers.pipelines import pipeline
|
||||
from transformers.modeling_auto import AutoModelForQuestionAnswering
|
||||
from transformers.tokenization_auto import AutoTokenizer
|
||||
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
||||
|
||||
model_name = "deepset/minilm-uncased-squad2"
|
||||
|
||||
|
@ -39,9 +39,8 @@ This model is the model obtained from the **third** fold of the cross-validation
|
||||
|
||||
### In Transformers
|
||||
```python
|
||||
from transformers.pipelines import pipeline
|
||||
from transformers.modeling_auto import AutoModelForQuestionAnswering
|
||||
from transformers.tokenization_auto import AutoTokenizer
|
||||
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
||||
|
||||
|
||||
model_name = "deepset/roberta-base-squad2-covid"
|
||||
|
||||
|
@ -48,9 +48,7 @@ Evaluated on the SQuAD 2.0 dev set with the [official eval script](https://works
|
||||
|
||||
### In Transformers
|
||||
```python
|
||||
from transformers.pipelines import pipeline
|
||||
from transformers.modeling_auto import AutoModelForQuestionAnswering
|
||||
from transformers.tokenization_auto import AutoTokenizer
|
||||
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
||||
|
||||
model_name = "deepset/roberta-base-squad2-v2"
|
||||
|
||||
|
@ -54,9 +54,7 @@ Evaluated on the SQuAD 2.0 dev set with the [official eval script](https://works
|
||||
|
||||
### In Transformers
|
||||
```python
|
||||
from transformers.pipelines import pipeline
|
||||
from transformers.modeling_auto import AutoModelForQuestionAnswering
|
||||
from transformers.tokenization_auto import AutoTokenizer
|
||||
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
||||
|
||||
model_name = "deepset/roberta-base-squad2"
|
||||
|
||||
|
@ -63,9 +63,7 @@ Evaluated on German [XQuAD: xquad.de.json](https://github.com/deepmind/xquad)
|
||||
|
||||
### In Transformers
|
||||
```python
|
||||
from transformers.pipelines import pipeline
|
||||
from transformers.modeling_auto import AutoModelForQuestionAnswering
|
||||
from transformers.tokenization_auto import AutoTokenizer
|
||||
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
||||
|
||||
model_name = "deepset/xlm-roberta-large-squad2"
|
||||
|
||||
|
@ -36,8 +36,7 @@ All four models are available:
|
||||
#### How to use
|
||||
|
||||
```python
|
||||
from transformers.tokenization_fsmt import FSMTTokenizer
|
||||
from transformers.modeling_fsmt import FSMTForConditionalGeneration
|
||||
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
|
||||
mname = "facebook/wmt19-de-en"
|
||||
tokenizer = FSMTTokenizer.from_pretrained(mname)
|
||||
model = FSMTForConditionalGeneration.from_pretrained(mname)
|
||||
|
@ -36,8 +36,7 @@ All four models are available:
|
||||
#### How to use
|
||||
|
||||
```python
|
||||
from transformers.tokenization_fsmt import FSMTTokenizer
|
||||
from transformers.modeling_fsmt import FSMTForConditionalGeneration
|
||||
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
|
||||
mname = "facebook/wmt19-en-de"
|
||||
tokenizer = FSMTTokenizer.from_pretrained(mname)
|
||||
model = FSMTForConditionalGeneration.from_pretrained(mname)
|
||||
|
@ -36,8 +36,7 @@ All four models are available:
|
||||
#### How to use
|
||||
|
||||
```python
|
||||
from transformers.tokenization_fsmt import FSMTTokenizer
|
||||
from transformers.modeling_fsmt import FSMTForConditionalGeneration
|
||||
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
|
||||
mname = "facebook/wmt19-en-ru"
|
||||
tokenizer = FSMTTokenizer.from_pretrained(mname)
|
||||
model = FSMTForConditionalGeneration.from_pretrained(mname)
|
||||
|
@ -36,8 +36,7 @@ All four models are available:
|
||||
#### How to use
|
||||
|
||||
```python
|
||||
from transformers.tokenization_fsmt import FSMTTokenizer
|
||||
from transformers.modeling_fsmt import FSMTForConditionalGeneration
|
||||
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
|
||||
mname = "facebook/wmt19-ru-en"
|
||||
tokenizer = FSMTTokenizer.from_pretrained(mname)
|
||||
model = FSMTForConditionalGeneration.from_pretrained(mname)
|
||||
|
@ -2,11 +2,7 @@
|
||||
|
||||
|
||||
```python
|
||||
from transformers.configuration_bert import BertConfig
|
||||
from transformers.modeling_bert import BertForMaskedLM
|
||||
from transformers.modeling_tf_bert import TFBertForMaskedLM
|
||||
from transformers.tokenization_bert import BertTokenizer
|
||||
|
||||
from transformers BertConfig, BertForMaskedLM, BertTokenizer, TFBertForMaskedLM
|
||||
|
||||
SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
|
||||
DIRNAME = "./bert-xsmall-dummy"
|
||||
|
@ -60,8 +60,7 @@ All 3 models are available:
|
||||
#### How to use
|
||||
|
||||
```python
|
||||
from transformers.tokenization_fsmt import FSMTTokenizer
|
||||
from transformers.modeling_fsmt import FSMTForConditionalGeneration
|
||||
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
|
||||
mname = "allenai/{model_name}"
|
||||
tokenizer = FSMTTokenizer.from_pretrained(mname)
|
||||
model = FSMTForConditionalGeneration.from_pretrained(mname)
|
||||
|
@ -59,8 +59,7 @@ For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the S
|
||||
#### How to use
|
||||
|
||||
```python
|
||||
from transformers.tokenization_fsmt import FSMTTokenizer
|
||||
from transformers.modeling_fsmt import FSMTForConditionalGeneration
|
||||
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
|
||||
mname = "allenai/{model_name}"
|
||||
tokenizer = FSMTTokenizer.from_pretrained(mname)
|
||||
model = FSMTForConditionalGeneration.from_pretrained(mname)
|
||||
|
@ -63,8 +63,7 @@ All four models are available:
|
||||
#### How to use
|
||||
|
||||
```python
|
||||
from transformers.tokenization_fsmt import FSMTTokenizer
|
||||
from transformers.modeling_fsmt import FSMTForConditionalGeneration
|
||||
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
|
||||
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
|
||||
tokenizer = FSMTTokenizer.from_pretrained(mname)
|
||||
model = FSMTForConditionalGeneration.from_pretrained(mname)
|
||||
|
@ -27,46 +27,10 @@ from .integrations import ( # isort:skip
|
||||
is_wandb_available,
|
||||
)
|
||||
|
||||
# Configurations
|
||||
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig
|
||||
from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, AutoConfig
|
||||
from .configuration_bart import BartConfig
|
||||
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig
|
||||
from .configuration_bert_generation import BertGenerationConfig
|
||||
from .configuration_blenderbot import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig
|
||||
from .configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig
|
||||
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
|
||||
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig
|
||||
from .configuration_distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig
|
||||
from .configuration_dpr import DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DPRConfig
|
||||
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig
|
||||
from .configuration_encoder_decoder import EncoderDecoderConfig
|
||||
from .configuration_flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig
|
||||
from .configuration_fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig
|
||||
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
|
||||
from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config
|
||||
from .configuration_layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig
|
||||
from .configuration_longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig
|
||||
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
|
||||
from .configuration_marian import MarianConfig
|
||||
from .configuration_mbart import MBartConfig
|
||||
from .configuration_mmbt import MMBTConfig
|
||||
from .configuration_mobilebert import MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig
|
||||
from .configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig
|
||||
from .configuration_pegasus import PegasusConfig
|
||||
from .configuration_prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig
|
||||
from .configuration_rag import RagConfig
|
||||
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
|
||||
from .configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig
|
||||
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig
|
||||
from .configuration_squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig
|
||||
from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config
|
||||
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
|
||||
# Configuration
|
||||
from .configuration_utils import PretrainedConfig
|
||||
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig
|
||||
from .configuration_xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig
|
||||
from .configuration_xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig
|
||||
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
|
||||
|
||||
# Data
|
||||
from .data import (
|
||||
DataProcessor,
|
||||
InputExample,
|
||||
@ -130,6 +94,77 @@ from .modeling_tf_pytorch_utils import (
|
||||
load_tf2_model_in_pytorch_model,
|
||||
load_tf2_weights_in_pytorch_model,
|
||||
)
|
||||
from .models.albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig
|
||||
from .models.auto import (
|
||||
ALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
||||
CONFIG_MAPPING,
|
||||
TOKENIZER_MAPPING,
|
||||
AutoConfig,
|
||||
AutoTokenizer,
|
||||
)
|
||||
from .models.bart import BartConfig, BartTokenizer
|
||||
from .models.bert import (
|
||||
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
||||
BasicTokenizer,
|
||||
BertConfig,
|
||||
BertTokenizer,
|
||||
WordpieceTokenizer,
|
||||
)
|
||||
from .models.bert_generation import BertGenerationConfig
|
||||
from .models.bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer
|
||||
from .models.bertweet import BertweetTokenizer
|
||||
from .models.blenderbot import (
|
||||
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
||||
BlenderbotConfig,
|
||||
BlenderbotSmallTokenizer,
|
||||
BlenderbotTokenizer,
|
||||
)
|
||||
from .models.camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig
|
||||
from .models.ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig, CTRLTokenizer
|
||||
from .models.deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaTokenizer
|
||||
from .models.distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertTokenizer
|
||||
from .models.dpr import (
|
||||
DPR_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
||||
DPRConfig,
|
||||
DPRContextEncoderTokenizer,
|
||||
DPRQuestionEncoderTokenizer,
|
||||
DPRReaderOutput,
|
||||
DPRReaderTokenizer,
|
||||
)
|
||||
from .models.electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraTokenizer
|
||||
from .models.encoder_decoder import EncoderDecoderConfig
|
||||
from .models.flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig, FlaubertTokenizer
|
||||
from .models.fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig, FSMTTokenizer
|
||||
from .models.funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig, FunnelTokenizer
|
||||
from .models.gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2Tokenizer
|
||||
from .models.herbert import HerbertTokenizer
|
||||
from .models.layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig, LayoutLMTokenizer
|
||||
from .models.longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerTokenizer
|
||||
from .models.lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig, LxmertTokenizer
|
||||
from .models.marian import MarianConfig
|
||||
from .models.mbart import MBartConfig
|
||||
from .models.mmbt import MMBTConfig
|
||||
from .models.mobilebert import MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertTokenizer
|
||||
from .models.openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig, OpenAIGPTTokenizer
|
||||
from .models.pegasus import PegasusConfig
|
||||
from .models.phobert import PhobertTokenizer
|
||||
from .models.prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig, ProphetNetTokenizer
|
||||
from .models.rag import RagConfig, RagRetriever, RagTokenizer
|
||||
from .models.reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
|
||||
from .models.retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig, RetriBertTokenizer
|
||||
from .models.roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaTokenizer
|
||||
from .models.squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertTokenizer
|
||||
from .models.t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config
|
||||
from .models.transfo_xl import (
|
||||
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
||||
TransfoXLConfig,
|
||||
TransfoXLCorpus,
|
||||
TransfoXLTokenizer,
|
||||
)
|
||||
from .models.xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMTokenizer
|
||||
from .models.xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig
|
||||
from .models.xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig
|
||||
from .models.xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
|
||||
|
||||
# Pipelines
|
||||
from .pipelines import (
|
||||
@ -154,43 +189,7 @@ from .pipelines import (
|
||||
pipeline,
|
||||
)
|
||||
|
||||
# Retriever
|
||||
from .retrieval_rag import RagRetriever
|
||||
|
||||
# Tokenizers
|
||||
from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer
|
||||
from .tokenization_bart import BartTokenizer
|
||||
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
|
||||
from .tokenization_bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer
|
||||
from .tokenization_bertweet import BertweetTokenizer
|
||||
from .tokenization_blenderbot import BlenderbotSmallTokenizer, BlenderbotTokenizer
|
||||
from .tokenization_ctrl import CTRLTokenizer
|
||||
from .tokenization_deberta import DebertaTokenizer
|
||||
from .tokenization_distilbert import DistilBertTokenizer
|
||||
from .tokenization_dpr import (
|
||||
DPRContextEncoderTokenizer,
|
||||
DPRQuestionEncoderTokenizer,
|
||||
DPRReaderOutput,
|
||||
DPRReaderTokenizer,
|
||||
)
|
||||
from .tokenization_electra import ElectraTokenizer
|
||||
from .tokenization_flaubert import FlaubertTokenizer
|
||||
from .tokenization_fsmt import FSMTTokenizer
|
||||
from .tokenization_funnel import FunnelTokenizer
|
||||
from .tokenization_gpt2 import GPT2Tokenizer
|
||||
from .tokenization_herbert import HerbertTokenizer
|
||||
from .tokenization_layoutlm import LayoutLMTokenizer
|
||||
from .tokenization_longformer import LongformerTokenizer
|
||||
from .tokenization_lxmert import LxmertTokenizer
|
||||
from .tokenization_mobilebert import MobileBertTokenizer
|
||||
from .tokenization_openai import OpenAIGPTTokenizer
|
||||
from .tokenization_phobert import PhobertTokenizer
|
||||
from .tokenization_prophetnet import ProphetNetTokenizer
|
||||
from .tokenization_rag import RagTokenizer
|
||||
from .tokenization_retribert import RetriBertTokenizer
|
||||
from .tokenization_roberta import RobertaTokenizer
|
||||
from .tokenization_squeezebert import SqueezeBertTokenizer
|
||||
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
|
||||
# Tokenization
|
||||
from .tokenization_utils import PreTrainedTokenizer
|
||||
from .tokenization_utils_base import (
|
||||
AddedToken,
|
||||
@ -201,54 +200,49 @@ from .tokenization_utils_base import (
|
||||
TensorType,
|
||||
TokenSpan,
|
||||
)
|
||||
from .tokenization_xlm import XLMTokenizer
|
||||
|
||||
|
||||
if is_sentencepiece_available():
|
||||
from .tokenization_albert import AlbertTokenizer
|
||||
from .tokenization_bert_generation import BertGenerationTokenizer
|
||||
from .tokenization_camembert import CamembertTokenizer
|
||||
from .tokenization_marian import MarianTokenizer
|
||||
from .tokenization_mbart import MBartTokenizer
|
||||
from .tokenization_pegasus import PegasusTokenizer
|
||||
from .tokenization_reformer import ReformerTokenizer
|
||||
from .tokenization_t5 import T5Tokenizer
|
||||
from .tokenization_xlm_prophetnet import XLMProphetNetTokenizer
|
||||
from .tokenization_xlm_roberta import XLMRobertaTokenizer
|
||||
from .tokenization_xlnet import XLNetTokenizer
|
||||
from .models.albert import AlbertTokenizer
|
||||
from .models.bert_generation import BertGenerationTokenizer
|
||||
from .models.camembert import CamembertTokenizer
|
||||
from .models.marian import MarianTokenizer
|
||||
from .models.mbart import MBartTokenizer
|
||||
from .models.pegasus import PegasusTokenizer
|
||||
from .models.reformer import ReformerTokenizer
|
||||
from .models.t5 import T5Tokenizer
|
||||
from .models.xlm_prophetnet import XLMProphetNetTokenizer
|
||||
from .models.xlm_roberta import XLMRobertaTokenizer
|
||||
from .models.xlnet import XLNetTokenizer
|
||||
else:
|
||||
from .utils.dummy_sentencepiece_objects import *
|
||||
|
||||
if is_tokenizers_available():
|
||||
from .tokenization_albert_fast import AlbertTokenizerFast
|
||||
from .tokenization_bart_fast import BartTokenizerFast
|
||||
from .tokenization_bert_fast import BertTokenizerFast
|
||||
from .tokenization_camembert_fast import CamembertTokenizerFast
|
||||
from .tokenization_distilbert_fast import DistilBertTokenizerFast
|
||||
from .tokenization_dpr_fast import (
|
||||
DPRContextEncoderTokenizerFast,
|
||||
DPRQuestionEncoderTokenizerFast,
|
||||
DPRReaderTokenizerFast,
|
||||
)
|
||||
from .tokenization_electra_fast import ElectraTokenizerFast
|
||||
from .tokenization_funnel_fast import FunnelTokenizerFast
|
||||
from .tokenization_gpt2_fast import GPT2TokenizerFast
|
||||
from .tokenization_herbert_fast import HerbertTokenizerFast
|
||||
from .tokenization_layoutlm_fast import LayoutLMTokenizerFast
|
||||
from .tokenization_longformer_fast import LongformerTokenizerFast
|
||||
from .tokenization_lxmert_fast import LxmertTokenizerFast
|
||||
from .tokenization_mbart_fast import MBartTokenizerFast
|
||||
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
|
||||
from .tokenization_openai_fast import OpenAIGPTTokenizerFast
|
||||
from .tokenization_pegasus_fast import PegasusTokenizerFast
|
||||
from .tokenization_reformer_fast import ReformerTokenizerFast
|
||||
from .tokenization_retribert_fast import RetriBertTokenizerFast
|
||||
from .tokenization_roberta_fast import RobertaTokenizerFast
|
||||
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
|
||||
from .tokenization_t5_fast import T5TokenizerFast
|
||||
from .models.albert import AlbertTokenizerFast
|
||||
from .models.bart import BartTokenizerFast
|
||||
from .models.bert import BertTokenizerFast
|
||||
from .models.camembert import CamembertTokenizerFast
|
||||
from .models.distilbert import DistilBertTokenizerFast
|
||||
from .models.dpr import DPRContextEncoderTokenizerFast, DPRQuestionEncoderTokenizerFast, DPRReaderTokenizerFast
|
||||
from .models.electra import ElectraTokenizerFast
|
||||
from .models.funnel import FunnelTokenizerFast
|
||||
from .models.gpt2 import GPT2TokenizerFast
|
||||
from .models.herbert import HerbertTokenizerFast
|
||||
from .models.layoutlm import LayoutLMTokenizerFast
|
||||
from .models.longformer import LongformerTokenizerFast
|
||||
from .models.lxmert import LxmertTokenizerFast
|
||||
from .models.mbart import MBartTokenizerFast
|
||||
from .models.mobilebert import MobileBertTokenizerFast
|
||||
from .models.openai import OpenAIGPTTokenizerFast
|
||||
from .models.pegasus import PegasusTokenizerFast
|
||||
from .models.reformer import ReformerTokenizerFast
|
||||
from .models.retribert import RetriBertTokenizerFast
|
||||
from .models.roberta import RobertaTokenizerFast
|
||||
from .models.squeezebert import SqueezeBertTokenizerFast
|
||||
from .models.t5 import T5TokenizerFast
|
||||
from .models.xlm_roberta import XLMRobertaTokenizerFast
|
||||
from .models.xlnet import XLNetTokenizerFast
|
||||
from .tokenization_utils_fast import PreTrainedTokenizerFast
|
||||
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
|
||||
from .tokenization_xlnet_fast import XLNetTokenizerFast
|
||||
|
||||
if is_sentencepiece_available():
|
||||
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, convert_slow_tokenizer
|
||||
@ -313,7 +307,8 @@ if is_torch_available():
|
||||
TopPLogitsWarper,
|
||||
)
|
||||
from .generation_utils import top_k_top_p_filtering
|
||||
from .modeling_albert import (
|
||||
from .modeling_utils import Conv1D, PreTrainedModel, apply_chunking_to_forward, prune_layer
|
||||
from .models.albert import (
|
||||
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
AlbertForMaskedLM,
|
||||
AlbertForMultipleChoice,
|
||||
@ -325,7 +320,7 @@ if is_torch_available():
|
||||
AlbertPreTrainedModel,
|
||||
load_tf_weights_in_albert,
|
||||
)
|
||||
from .modeling_auto import (
|
||||
from .models.auto import (
|
||||
MODEL_FOR_CAUSAL_LM_MAPPING,
|
||||
MODEL_FOR_MASKED_LM_MAPPING,
|
||||
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
|
||||
@ -349,7 +344,7 @@ if is_torch_available():
|
||||
AutoModelForTokenClassification,
|
||||
AutoModelWithLMHead,
|
||||
)
|
||||
from .modeling_bart import (
|
||||
from .models.bart import (
|
||||
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
BartForConditionalGeneration,
|
||||
BartForQuestionAnswering,
|
||||
@ -357,7 +352,7 @@ if is_torch_available():
|
||||
BartModel,
|
||||
PretrainedBartModel,
|
||||
)
|
||||
from .modeling_bert import (
|
||||
from .models.bert import (
|
||||
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
BertForMaskedLM,
|
||||
BertForMultipleChoice,
|
||||
@ -372,13 +367,13 @@ if is_torch_available():
|
||||
BertPreTrainedModel,
|
||||
load_tf_weights_in_bert,
|
||||
)
|
||||
from .modeling_bert_generation import (
|
||||
from .models.bert_generation import (
|
||||
BertGenerationDecoder,
|
||||
BertGenerationEncoder,
|
||||
load_tf_weights_in_bert_generation,
|
||||
)
|
||||
from .modeling_blenderbot import BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForConditionalGeneration
|
||||
from .modeling_camembert import (
|
||||
from .models.blenderbot import BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForConditionalGeneration
|
||||
from .models.camembert import (
|
||||
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
CamembertForCausalLM,
|
||||
CamembertForMaskedLM,
|
||||
@ -388,14 +383,14 @@ if is_torch_available():
|
||||
CamembertForTokenClassification,
|
||||
CamembertModel,
|
||||
)
|
||||
from .modeling_ctrl import CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel
|
||||
from .modeling_deberta import (
|
||||
from .models.ctrl import CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel
|
||||
from .models.deberta import (
|
||||
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
DebertaForSequenceClassification,
|
||||
DebertaModel,
|
||||
DebertaPreTrainedModel,
|
||||
)
|
||||
from .modeling_distilbert import (
|
||||
from .models.distilbert import (
|
||||
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
DistilBertForMaskedLM,
|
||||
DistilBertForMultipleChoice,
|
||||
@ -405,7 +400,7 @@ if is_torch_available():
|
||||
DistilBertModel,
|
||||
DistilBertPreTrainedModel,
|
||||
)
|
||||
from .modeling_dpr import (
|
||||
from .models.dpr import (
|
||||
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
@ -416,7 +411,7 @@ if is_torch_available():
|
||||
DPRQuestionEncoder,
|
||||
DPRReader,
|
||||
)
|
||||
from .modeling_electra import (
|
||||
from .models.electra import (
|
||||
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
ElectraForMaskedLM,
|
||||
ElectraForMultipleChoice,
|
||||
@ -428,8 +423,8 @@ if is_torch_available():
|
||||
ElectraPreTrainedModel,
|
||||
load_tf_weights_in_electra,
|
||||
)
|
||||
from .modeling_encoder_decoder import EncoderDecoderModel
|
||||
from .modeling_flaubert import (
|
||||
from .models.encoder_decoder import EncoderDecoderModel
|
||||
from .models.flaubert import (
|
||||
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
FlaubertForMultipleChoice,
|
||||
FlaubertForQuestionAnswering,
|
||||
@ -439,8 +434,8 @@ if is_torch_available():
|
||||
FlaubertModel,
|
||||
FlaubertWithLMHeadModel,
|
||||
)
|
||||
from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel
|
||||
from .modeling_funnel import (
|
||||
from .models.fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel
|
||||
from .models.funnel import (
|
||||
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
FunnelBaseModel,
|
||||
FunnelForMaskedLM,
|
||||
@ -452,7 +447,7 @@ if is_torch_available():
|
||||
FunnelModel,
|
||||
load_tf_weights_in_funnel,
|
||||
)
|
||||
from .modeling_gpt2 import (
|
||||
from .models.gpt2 import (
|
||||
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
GPT2DoubleHeadsModel,
|
||||
GPT2ForSequenceClassification,
|
||||
@ -461,13 +456,13 @@ if is_torch_available():
|
||||
GPT2PreTrainedModel,
|
||||
load_tf_weights_in_gpt2,
|
||||
)
|
||||
from .modeling_layoutlm import (
|
||||
from .models.layoutlm import (
|
||||
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
LayoutLMForMaskedLM,
|
||||
LayoutLMForTokenClassification,
|
||||
LayoutLMModel,
|
||||
)
|
||||
from .modeling_longformer import (
|
||||
from .models.longformer import (
|
||||
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
LongformerForMaskedLM,
|
||||
LongformerForMultipleChoice,
|
||||
@ -477,7 +472,7 @@ if is_torch_available():
|
||||
LongformerModel,
|
||||
LongformerSelfAttention,
|
||||
)
|
||||
from .modeling_lxmert import (
|
||||
from .models.lxmert import (
|
||||
LxmertEncoder,
|
||||
LxmertForPreTraining,
|
||||
LxmertForQuestionAnswering,
|
||||
@ -486,10 +481,10 @@ if is_torch_available():
|
||||
LxmertVisualFeatureEncoder,
|
||||
LxmertXLayer,
|
||||
)
|
||||
from .modeling_marian import MarianMTModel
|
||||
from .modeling_mbart import MBartForConditionalGeneration
|
||||
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
|
||||
from .modeling_mobilebert import (
|
||||
from .models.marian import MarianMTModel
|
||||
from .models.mbart import MBartForConditionalGeneration
|
||||
from .models.mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
|
||||
from .models.mobilebert import (
|
||||
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
MobileBertForMaskedLM,
|
||||
MobileBertForMultipleChoice,
|
||||
@ -503,7 +498,7 @@ if is_torch_available():
|
||||
MobileBertPreTrainedModel,
|
||||
load_tf_weights_in_mobilebert,
|
||||
)
|
||||
from .modeling_openai import (
|
||||
from .models.openai import (
|
||||
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
OpenAIGPTDoubleHeadsModel,
|
||||
OpenAIGPTForSequenceClassification,
|
||||
@ -512,8 +507,8 @@ if is_torch_available():
|
||||
OpenAIGPTPreTrainedModel,
|
||||
load_tf_weights_in_openai_gpt,
|
||||
)
|
||||
from .modeling_pegasus import PegasusForConditionalGeneration
|
||||
from .modeling_prophetnet import (
|
||||
from .models.pegasus import PegasusForConditionalGeneration
|
||||
from .models.prophetnet import (
|
||||
PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
ProphetNetDecoder,
|
||||
ProphetNetEncoder,
|
||||
@ -522,8 +517,8 @@ if is_torch_available():
|
||||
ProphetNetModel,
|
||||
ProphetNetPreTrainedModel,
|
||||
)
|
||||
from .modeling_rag import RagModel, RagSequenceForGeneration, RagTokenForGeneration
|
||||
from .modeling_reformer import (
|
||||
from .models.rag import RagModel, RagSequenceForGeneration, RagTokenForGeneration
|
||||
from .models.reformer import (
|
||||
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
ReformerAttention,
|
||||
ReformerForMaskedLM,
|
||||
@ -533,8 +528,8 @@ if is_torch_available():
|
||||
ReformerModel,
|
||||
ReformerModelWithLMHead,
|
||||
)
|
||||
from .modeling_retribert import RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RetriBertModel, RetriBertPreTrainedModel
|
||||
from .modeling_roberta import (
|
||||
from .models.retribert import RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RetriBertModel, RetriBertPreTrainedModel
|
||||
from .models.roberta import (
|
||||
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
RobertaForCausalLM,
|
||||
RobertaForMaskedLM,
|
||||
@ -544,7 +539,7 @@ if is_torch_available():
|
||||
RobertaForTokenClassification,
|
||||
RobertaModel,
|
||||
)
|
||||
from .modeling_squeezebert import (
|
||||
from .models.squeezebert import (
|
||||
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
SqueezeBertForMaskedLM,
|
||||
SqueezeBertForMultipleChoice,
|
||||
@ -555,14 +550,14 @@ if is_torch_available():
|
||||
SqueezeBertModule,
|
||||
SqueezeBertPreTrainedModel,
|
||||
)
|
||||
from .modeling_t5 import (
|
||||
from .models.t5 import (
|
||||
T5_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
T5ForConditionalGeneration,
|
||||
T5Model,
|
||||
T5PreTrainedModel,
|
||||
load_tf_weights_in_t5,
|
||||
)
|
||||
from .modeling_transfo_xl import (
|
||||
from .models.transfo_xl import (
|
||||
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
AdaptiveEmbedding,
|
||||
TransfoXLLMHeadModel,
|
||||
@ -570,8 +565,7 @@ if is_torch_available():
|
||||
TransfoXLPreTrainedModel,
|
||||
load_tf_weights_in_transfo_xl,
|
||||
)
|
||||
from .modeling_utils import Conv1D, PreTrainedModel, apply_chunking_to_forward, prune_layer
|
||||
from .modeling_xlm import (
|
||||
from .models.xlm import (
|
||||
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
XLMForMultipleChoice,
|
||||
XLMForQuestionAnswering,
|
||||
@ -582,7 +576,7 @@ if is_torch_available():
|
||||
XLMPreTrainedModel,
|
||||
XLMWithLMHeadModel,
|
||||
)
|
||||
from .modeling_xlm_prophetnet import (
|
||||
from .models.xlm_prophetnet import (
|
||||
XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
XLMProphetNetDecoder,
|
||||
XLMProphetNetEncoder,
|
||||
@ -590,7 +584,7 @@ if is_torch_available():
|
||||
XLMProphetNetForConditionalGeneration,
|
||||
XLMProphetNetModel,
|
||||
)
|
||||
from .modeling_xlm_roberta import (
|
||||
from .models.xlm_roberta import (
|
||||
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
XLMRobertaForCausalLM,
|
||||
XLMRobertaForMaskedLM,
|
||||
@ -600,7 +594,7 @@ if is_torch_available():
|
||||
XLMRobertaForTokenClassification,
|
||||
XLMRobertaModel,
|
||||
)
|
||||
from .modeling_xlnet import (
|
||||
from .models.xlnet import (
|
||||
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
XLNetForMultipleChoice,
|
||||
XLNetForQuestionAnswering,
|
||||
@ -638,7 +632,8 @@ if is_tf_available():
|
||||
# Benchmarks
|
||||
from .benchmark.benchmark_tf import TensorFlowBenchmark
|
||||
from .generation_tf_utils import tf_top_k_top_p_filtering
|
||||
from .modeling_tf_albert import (
|
||||
from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, shape_list
|
||||
from .models.albert import (
|
||||
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFAlbertForMaskedLM,
|
||||
TFAlbertForMultipleChoice,
|
||||
@ -650,7 +645,7 @@ if is_tf_available():
|
||||
TFAlbertModel,
|
||||
TFAlbertPreTrainedModel,
|
||||
)
|
||||
from .modeling_tf_auto import (
|
||||
from .models.auto import (
|
||||
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
|
||||
TF_MODEL_FOR_MASKED_LM_MAPPING,
|
||||
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
|
||||
@ -673,8 +668,8 @@ if is_tf_available():
|
||||
TFAutoModelForTokenClassification,
|
||||
TFAutoModelWithLMHead,
|
||||
)
|
||||
from .modeling_tf_bart import TFBartForConditionalGeneration, TFBartModel
|
||||
from .modeling_tf_bert import (
|
||||
from .models.bart import TFBartForConditionalGeneration, TFBartModel
|
||||
from .models.bert import (
|
||||
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFBertEmbeddings,
|
||||
TFBertForMaskedLM,
|
||||
@ -689,8 +684,8 @@ if is_tf_available():
|
||||
TFBertModel,
|
||||
TFBertPreTrainedModel,
|
||||
)
|
||||
from .modeling_tf_blenderbot import TFBlenderbotForConditionalGeneration
|
||||
from .modeling_tf_camembert import (
|
||||
from .models.blenderbot import TFBlenderbotForConditionalGeneration
|
||||
from .models.camembert import (
|
||||
TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFCamembertForMaskedLM,
|
||||
TFCamembertForMultipleChoice,
|
||||
@ -699,13 +694,13 @@ if is_tf_available():
|
||||
TFCamembertForTokenClassification,
|
||||
TFCamembertModel,
|
||||
)
|
||||
from .modeling_tf_ctrl import (
|
||||
from .models.ctrl import (
|
||||
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFCTRLLMHeadModel,
|
||||
TFCTRLModel,
|
||||
TFCTRLPreTrainedModel,
|
||||
)
|
||||
from .modeling_tf_distilbert import (
|
||||
from .models.distilbert import (
|
||||
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFDistilBertForMaskedLM,
|
||||
TFDistilBertForMultipleChoice,
|
||||
@ -716,7 +711,7 @@ if is_tf_available():
|
||||
TFDistilBertModel,
|
||||
TFDistilBertPreTrainedModel,
|
||||
)
|
||||
from .modeling_tf_dpr import (
|
||||
from .models.dpr import (
|
||||
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
@ -727,7 +722,7 @@ if is_tf_available():
|
||||
TFDPRQuestionEncoder,
|
||||
TFDPRReader,
|
||||
)
|
||||
from .modeling_tf_electra import (
|
||||
from .models.electra import (
|
||||
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFElectraForMaskedLM,
|
||||
TFElectraForMultipleChoice,
|
||||
@ -738,7 +733,7 @@ if is_tf_available():
|
||||
TFElectraModel,
|
||||
TFElectraPreTrainedModel,
|
||||
)
|
||||
from .modeling_tf_flaubert import (
|
||||
from .models.flaubert import (
|
||||
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFFlaubertForMultipleChoice,
|
||||
TFFlaubertForQuestionAnsweringSimple,
|
||||
@ -747,7 +742,7 @@ if is_tf_available():
|
||||
TFFlaubertModel,
|
||||
TFFlaubertWithLMHeadModel,
|
||||
)
|
||||
from .modeling_tf_funnel import (
|
||||
from .models.funnel import (
|
||||
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFFunnelBaseModel,
|
||||
TFFunnelForMaskedLM,
|
||||
@ -758,7 +753,7 @@ if is_tf_available():
|
||||
TFFunnelForTokenClassification,
|
||||
TFFunnelModel,
|
||||
)
|
||||
from .modeling_tf_gpt2 import (
|
||||
from .models.gpt2 import (
|
||||
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFGPT2DoubleHeadsModel,
|
||||
TFGPT2LMHeadModel,
|
||||
@ -766,14 +761,14 @@ if is_tf_available():
|
||||
TFGPT2Model,
|
||||
TFGPT2PreTrainedModel,
|
||||
)
|
||||
from .modeling_tf_longformer import (
|
||||
from .models.longformer import (
|
||||
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFLongformerForMaskedLM,
|
||||
TFLongformerForQuestionAnswering,
|
||||
TFLongformerModel,
|
||||
TFLongformerSelfAttention,
|
||||
)
|
||||
from .modeling_tf_lxmert import (
|
||||
from .models.lxmert import (
|
||||
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFLxmertForPreTraining,
|
||||
TFLxmertMainLayer,
|
||||
@ -781,9 +776,9 @@ if is_tf_available():
|
||||
TFLxmertPreTrainedModel,
|
||||
TFLxmertVisualFeatureEncoder,
|
||||
)
|
||||
from .modeling_tf_marian import TFMarianMTModel
|
||||
from .modeling_tf_mbart import TFMBartForConditionalGeneration
|
||||
from .modeling_tf_mobilebert import (
|
||||
from .models.marian import TFMarianMTModel
|
||||
from .models.mbart import TFMBartForConditionalGeneration
|
||||
from .models.mobilebert import (
|
||||
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFMobileBertForMaskedLM,
|
||||
TFMobileBertForMultipleChoice,
|
||||
@ -796,7 +791,7 @@ if is_tf_available():
|
||||
TFMobileBertModel,
|
||||
TFMobileBertPreTrainedModel,
|
||||
)
|
||||
from .modeling_tf_openai import (
|
||||
from .models.openai import (
|
||||
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFOpenAIGPTDoubleHeadsModel,
|
||||
TFOpenAIGPTLMHeadModel,
|
||||
@ -804,8 +799,8 @@ if is_tf_available():
|
||||
TFOpenAIGPTModel,
|
||||
TFOpenAIGPTPreTrainedModel,
|
||||
)
|
||||
from .modeling_tf_pegasus import TFPegasusForConditionalGeneration
|
||||
from .modeling_tf_roberta import (
|
||||
from .models.pegasus import TFPegasusForConditionalGeneration
|
||||
from .models.roberta import (
|
||||
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFRobertaForMaskedLM,
|
||||
TFRobertaForMultipleChoice,
|
||||
@ -816,13 +811,13 @@ if is_tf_available():
|
||||
TFRobertaModel,
|
||||
TFRobertaPreTrainedModel,
|
||||
)
|
||||
from .modeling_tf_t5 import (
|
||||
from .models.t5 import (
|
||||
TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFT5ForConditionalGeneration,
|
||||
TFT5Model,
|
||||
TFT5PreTrainedModel,
|
||||
)
|
||||
from .modeling_tf_transfo_xl import (
|
||||
from .models.transfo_xl import (
|
||||
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFAdaptiveEmbedding,
|
||||
TFTransfoXLLMHeadModel,
|
||||
@ -830,8 +825,7 @@ if is_tf_available():
|
||||
TFTransfoXLModel,
|
||||
TFTransfoXLPreTrainedModel,
|
||||
)
|
||||
from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, shape_list
|
||||
from .modeling_tf_xlm import (
|
||||
from .models.xlm import (
|
||||
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFXLMForMultipleChoice,
|
||||
TFXLMForQuestionAnsweringSimple,
|
||||
@ -842,7 +836,7 @@ if is_tf_available():
|
||||
TFXLMPreTrainedModel,
|
||||
TFXLMWithLMHeadModel,
|
||||
)
|
||||
from .modeling_tf_xlm_roberta import (
|
||||
from .models.xlm_roberta import (
|
||||
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFXLMRobertaForMaskedLM,
|
||||
TFXLMRobertaForMultipleChoice,
|
||||
@ -851,7 +845,7 @@ if is_tf_available():
|
||||
TFXLMRobertaForTokenClassification,
|
||||
TFXLMRobertaModel,
|
||||
)
|
||||
from .modeling_tf_xlnet import (
|
||||
from .models.xlnet import (
|
||||
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFXLNetForMultipleChoice,
|
||||
TFXLNetForQuestionAnsweringSimple,
|
||||
@ -876,8 +870,8 @@ else:
|
||||
|
||||
|
||||
if is_flax_available():
|
||||
from .modeling_flax_bert import FlaxBertModel
|
||||
from .modeling_flax_roberta import FlaxRobertaModel
|
||||
from .models.bert import FlaxBertModel
|
||||
from .models.roberta import FlaxRobertaModel
|
||||
else:
|
||||
# Import the same objects as dummies to get them in the namespace.
|
||||
# They will raise an import error if the user tries to instantiate / use them.
|
||||
|
@ -23,7 +23,7 @@ from typing import Callable, Optional
|
||||
|
||||
from ..configuration_utils import PretrainedConfig
|
||||
from ..file_utils import is_py3nvml_available, is_torch_available
|
||||
from ..modeling_auto import MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING
|
||||
from ..models.auto.modeling_auto import MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING
|
||||
from ..utils import logging
|
||||
from .benchmark_utils import (
|
||||
Benchmark,
|
||||
|
@ -25,7 +25,7 @@ from typing import Callable, Optional
|
||||
|
||||
from ..configuration_utils import PretrainedConfig
|
||||
from ..file_utils import is_py3nvml_available, is_tf_available
|
||||
from ..modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
|
||||
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
|
||||
from ..utils import logging
|
||||
from .benchmark_utils import (
|
||||
Benchmark,
|
||||
|
@ -327,7 +327,7 @@ def start_memory_tracing(
|
||||
|
||||
- `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list
|
||||
of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or
|
||||
'transformers.modeling_gpt2')
|
||||
'transformers.models.gpt2.modeling_gpt2')
|
||||
- `modules_not_to_trace`: (None, string, list/tuple of string) if None, no module is avoided if string or list
|
||||
of strings: events from the listed module/sub-module will not be recorded (e.g. 'torch')
|
||||
- `events_to_trace`: string or list of string of events to be recorded (see official python doc for
|
||||
|
@ -73,7 +73,7 @@ class ConvertCommand(BaseTransformersCLICommand):
|
||||
def run(self):
|
||||
if self._model_type == "albert":
|
||||
try:
|
||||
from transformers.convert_albert_original_tf_checkpoint_to_pytorch import (
|
||||
from transformers.models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
|
||||
convert_tf_checkpoint_to_pytorch,
|
||||
)
|
||||
except ImportError:
|
||||
@ -82,7 +82,7 @@ class ConvertCommand(BaseTransformersCLICommand):
|
||||
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
|
||||
elif self._model_type == "bert":
|
||||
try:
|
||||
from transformers.convert_bert_original_tf_checkpoint_to_pytorch import (
|
||||
from transformers.models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
|
||||
convert_tf_checkpoint_to_pytorch,
|
||||
)
|
||||
except ImportError:
|
||||
@ -91,7 +91,7 @@ class ConvertCommand(BaseTransformersCLICommand):
|
||||
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
|
||||
elif self._model_type == "funnel":
|
||||
try:
|
||||
from transformers.convert_funnel_original_tf_checkpoint_to_pytorch import (
|
||||
from transformers.models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
|
||||
convert_tf_checkpoint_to_pytorch,
|
||||
)
|
||||
except ImportError:
|
||||
@ -99,14 +99,14 @@ class ConvertCommand(BaseTransformersCLICommand):
|
||||
|
||||
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
|
||||
elif self._model_type == "gpt":
|
||||
from transformers.convert_openai_original_tf_checkpoint_to_pytorch import (
|
||||
from transformers.models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
|
||||
convert_openai_checkpoint_to_pytorch,
|
||||
)
|
||||
|
||||
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
|
||||
elif self._model_type == "transfo_xl":
|
||||
try:
|
||||
from transformers.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
|
||||
from transformers.models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
|
||||
convert_transfo_xl_checkpoint_to_pytorch,
|
||||
)
|
||||
except ImportError:
|
||||
@ -123,7 +123,7 @@ class ConvertCommand(BaseTransformersCLICommand):
|
||||
)
|
||||
elif self._model_type == "gpt2":
|
||||
try:
|
||||
from transformers.convert_gpt2_original_tf_checkpoint_to_pytorch import (
|
||||
from transformers.models.gpt2.convert_gpt2_original_tf_checkpoint_to_pytorch import (
|
||||
convert_gpt2_checkpoint_to_pytorch,
|
||||
)
|
||||
except ImportError:
|
||||
@ -132,7 +132,7 @@ class ConvertCommand(BaseTransformersCLICommand):
|
||||
convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
|
||||
elif self._model_type == "xlnet":
|
||||
try:
|
||||
from transformers.convert_xlnet_original_tf_checkpoint_to_pytorch import (
|
||||
from transformers.models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
|
||||
convert_xlnet_checkpoint_to_pytorch,
|
||||
)
|
||||
except ImportError:
|
||||
@ -142,13 +142,13 @@ class ConvertCommand(BaseTransformersCLICommand):
|
||||
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name
|
||||
)
|
||||
elif self._model_type == "xlm":
|
||||
from transformers.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
|
||||
from transformers.models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
|
||||
convert_xlm_checkpoint_to_pytorch,
|
||||
)
|
||||
|
||||
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
|
||||
elif self._model_type == "lxmert":
|
||||
from transformers.convert_lxmert_original_pytorch_checkpoint_to_pytorch import (
|
||||
from transformers.models.lxmert.convert_lxmert_original_pytorch_checkpoint_to_pytorch import (
|
||||
convert_lxmert_checkpoint_to_pytorch,
|
||||
)
|
||||
|
||||
|
@ -9,7 +9,7 @@ from torch.utils.data.dataset import Dataset
|
||||
|
||||
from filelock import FileLock
|
||||
|
||||
from ...modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
|
||||
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
|
||||
from ...tokenization_utils import PreTrainedTokenizer
|
||||
from ...utils import logging
|
||||
from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
|
||||
|
@ -14,7 +14,7 @@ import math
|
||||
import re
|
||||
import string
|
||||
|
||||
from transformers.tokenization_bert import BasicTokenizer
|
||||
from transformers import BasicTokenizer
|
||||
|
||||
from ...utils import logging
|
||||
|
||||
|
@ -7,7 +7,7 @@ import numpy as np
|
||||
from tqdm import tqdm
|
||||
|
||||
from ...file_utils import is_tf_available, is_torch_available
|
||||
from ...tokenization_bert import whitespace_tokenize
|
||||
from ...models.bert.tokenization_bert import whitespace_tokenize
|
||||
from ...tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase, TruncationStrategy
|
||||
from ...utils import logging
|
||||
from .utils import DataProcessor
|
||||
|
@ -19,7 +19,6 @@ import copy
|
||||
import json
|
||||
import os
|
||||
|
||||
from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP
|
||||
from .file_utils import (
|
||||
CONFIG_NAME,
|
||||
MODEL_CARD_NAME,
|
||||
@ -29,6 +28,7 @@ from .file_utils import (
|
||||
hf_bucket_url,
|
||||
is_remote_url,
|
||||
)
|
||||
from .models.auto.configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP
|
||||
from .utils import logging
|
||||
|
||||
|
||||
|
41
src/transformers/models/albert/__init__.py
Normal file
41
src/transformers/models/albert/__init__.py
Normal file
@ -0,0 +1,41 @@
|
||||
# flake8: noqa
|
||||
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
||||
# module, but to preserve other warnings. So, don't check this module at all.
|
||||
|
||||
from ...file_utils import is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available
|
||||
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig
|
||||
|
||||
|
||||
if is_sentencepiece_available():
|
||||
from .tokenization_albert import AlbertTokenizer
|
||||
|
||||
if is_tokenizers_available():
|
||||
from .tokenization_albert_fast import AlbertTokenizerFast
|
||||
|
||||
if is_torch_available():
|
||||
from .modeling_albert import (
|
||||
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
AlbertForMaskedLM,
|
||||
AlbertForMultipleChoice,
|
||||
AlbertForPreTraining,
|
||||
AlbertForQuestionAnswering,
|
||||
AlbertForSequenceClassification,
|
||||
AlbertForTokenClassification,
|
||||
AlbertModel,
|
||||
AlbertPreTrainedModel,
|
||||
load_tf_weights_in_albert,
|
||||
)
|
||||
|
||||
if is_tf_available():
|
||||
from .modeling_tf_albert import (
|
||||
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFAlbertForMaskedLM,
|
||||
TFAlbertForMultipleChoice,
|
||||
TFAlbertForPreTraining,
|
||||
TFAlbertForQuestionAnswering,
|
||||
TFAlbertForSequenceClassification,
|
||||
TFAlbertForTokenClassification,
|
||||
TFAlbertMainLayer,
|
||||
TFAlbertModel,
|
||||
TFAlbertPreTrainedModel,
|
||||
)
|
@ -15,7 +15,7 @@
|
||||
# limitations under the License.
|
||||
""" ALBERT model configuration """
|
||||
|
||||
from .configuration_utils import PretrainedConfig
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
|
||||
|
||||
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
@ -24,16 +24,15 @@ import torch
|
||||
import torch.nn as nn
|
||||
from torch.nn import CrossEntropyLoss, MSELoss
|
||||
|
||||
from .activations import ACT2FN
|
||||
from .configuration_albert import AlbertConfig
|
||||
from .file_utils import (
|
||||
from ...activations import ACT2FN
|
||||
from ...file_utils import (
|
||||
ModelOutput,
|
||||
add_code_sample_docstrings,
|
||||
add_start_docstrings,
|
||||
add_start_docstrings_to_model_forward,
|
||||
replace_return_docstrings,
|
||||
)
|
||||
from .modeling_outputs import (
|
||||
from ...modeling_outputs import (
|
||||
BaseModelOutput,
|
||||
BaseModelOutputWithPooling,
|
||||
MaskedLMOutput,
|
||||
@ -42,13 +41,14 @@ from .modeling_outputs import (
|
||||
SequenceClassifierOutput,
|
||||
TokenClassifierOutput,
|
||||
)
|
||||
from .modeling_utils import (
|
||||
from ...modeling_utils import (
|
||||
PreTrainedModel,
|
||||
apply_chunking_to_forward,
|
||||
find_pruneable_heads_and_indices,
|
||||
prune_linear_layer,
|
||||
)
|
||||
from .utils import logging
|
||||
from ...utils import logging
|
||||
from .configuration_albert import AlbertConfig
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
@ -216,7 +216,7 @@ class AlbertEmbeddings(nn.Module):
|
||||
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
||||
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
|
||||
|
||||
# Copied from transformers.modeling_bert.BertEmbeddings.forward
|
||||
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward
|
||||
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
|
||||
if input_ids is not None:
|
||||
input_shape = input_ids.size()
|
||||
@ -266,7 +266,7 @@ class AlbertAttention(nn.Module):
|
||||
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
||||
self.pruned_heads = set()
|
||||
|
||||
# Copied from transformers.modeling_bert.BertSelfAttention.transpose_for_scores
|
||||
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores
|
||||
def transpose_for_scores(self, x):
|
||||
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
|
||||
x = x.view(*new_x_shape)
|
@ -21,9 +21,8 @@ from typing import Optional, Tuple
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
from .activations_tf import get_tf_activation
|
||||
from .configuration_albert import AlbertConfig
|
||||
from .file_utils import (
|
||||
from ...activations_tf import get_tf_activation
|
||||
from ...file_utils import (
|
||||
MULTIPLE_CHOICE_DUMMY_INPUTS,
|
||||
ModelOutput,
|
||||
add_code_sample_docstrings,
|
||||
@ -31,7 +30,7 @@ from .file_utils import (
|
||||
add_start_docstrings_to_model_forward,
|
||||
replace_return_docstrings,
|
||||
)
|
||||
from .modeling_tf_outputs import (
|
||||
from ...modeling_tf_outputs import (
|
||||
TFBaseModelOutput,
|
||||
TFBaseModelOutputWithPooling,
|
||||
TFMaskedLMOutput,
|
||||
@ -40,7 +39,7 @@ from .modeling_tf_outputs import (
|
||||
TFSequenceClassifierOutput,
|
||||
TFTokenClassifierOutput,
|
||||
)
|
||||
from .modeling_tf_utils import (
|
||||
from ...modeling_tf_utils import (
|
||||
TFMaskedLanguageModelingLoss,
|
||||
TFMultipleChoiceLoss,
|
||||
TFPreTrainedModel,
|
||||
@ -51,8 +50,9 @@ from .modeling_tf_utils import (
|
||||
keras_serializable,
|
||||
shape_list,
|
||||
)
|
||||
from .tokenization_utils import BatchEncoding
|
||||
from .utils import logging
|
||||
from ...tokenization_utils import BatchEncoding
|
||||
from ...utils import logging
|
||||
from .configuration_albert import AlbertConfig
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
@ -22,8 +22,8 @@ from typing import List, Optional, Tuple
|
||||
|
||||
import sentencepiece as spm
|
||||
|
||||
from .tokenization_utils import PreTrainedTokenizer
|
||||
from .utils import logging
|
||||
from ...tokenization_utils import PreTrainedTokenizer
|
||||
from ...utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
@ -19,9 +19,9 @@ import os
|
||||
from shutil import copyfile
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from .file_utils import is_sentencepiece_available
|
||||
from .tokenization_utils_fast import PreTrainedTokenizerFast
|
||||
from .utils import logging
|
||||
from ...file_utils import is_sentencepiece_available
|
||||
from ...tokenization_utils_fast import PreTrainedTokenizerFast
|
||||
from ...utils import logging
|
||||
|
||||
|
||||
if is_sentencepiece_available():
|
59
src/transformers/models/auto/__init__.py
Normal file
59
src/transformers/models/auto/__init__.py
Normal file
@ -0,0 +1,59 @@
|
||||
# flake8: noqa
|
||||
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
||||
# module, but to preserve other warnings. So, don't check this module at all.
|
||||
|
||||
from ...file_utils import is_tf_available, is_torch_available
|
||||
from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, AutoConfig
|
||||
from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer
|
||||
|
||||
|
||||
if is_torch_available():
|
||||
from .modeling_auto import (
|
||||
MODEL_FOR_CAUSAL_LM_MAPPING,
|
||||
MODEL_FOR_MASKED_LM_MAPPING,
|
||||
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
|
||||
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
|
||||
MODEL_FOR_PRETRAINING_MAPPING,
|
||||
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
|
||||
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
|
||||
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
|
||||
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
|
||||
MODEL_MAPPING,
|
||||
MODEL_WITH_LM_HEAD_MAPPING,
|
||||
AutoModel,
|
||||
AutoModelForCausalLM,
|
||||
AutoModelForMaskedLM,
|
||||
AutoModelForMultipleChoice,
|
||||
AutoModelForNextSentencePrediction,
|
||||
AutoModelForPreTraining,
|
||||
AutoModelForQuestionAnswering,
|
||||
AutoModelForSeq2SeqLM,
|
||||
AutoModelForSequenceClassification,
|
||||
AutoModelForTokenClassification,
|
||||
AutoModelWithLMHead,
|
||||
)
|
||||
|
||||
if is_tf_available():
|
||||
from .modeling_tf_auto import (
|
||||
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
|
||||
TF_MODEL_FOR_MASKED_LM_MAPPING,
|
||||
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
|
||||
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
|
||||
TF_MODEL_FOR_PRETRAINING_MAPPING,
|
||||
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
|
||||
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
|
||||
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
|
||||
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
|
||||
TF_MODEL_MAPPING,
|
||||
TF_MODEL_WITH_LM_HEAD_MAPPING,
|
||||
TFAutoModel,
|
||||
TFAutoModelForCausalLM,
|
||||
TFAutoModelForMaskedLM,
|
||||
TFAutoModelForMultipleChoice,
|
||||
TFAutoModelForPreTraining,
|
||||
TFAutoModelForQuestionAnswering,
|
||||
TFAutoModelForSeq2SeqLM,
|
||||
TFAutoModelForSequenceClassification,
|
||||
TFAutoModelForTokenClassification,
|
||||
TFAutoModelWithLMHead,
|
||||
)
|
@ -17,43 +17,46 @@
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
|
||||
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig
|
||||
from .configuration_bart import BART_PRETRAINED_CONFIG_ARCHIVE_MAP, BartConfig
|
||||
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig
|
||||
from .configuration_bert_generation import BertGenerationConfig
|
||||
from .configuration_blenderbot import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig
|
||||
from .configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig
|
||||
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
|
||||
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig
|
||||
from .configuration_distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig
|
||||
from .configuration_dpr import DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DPRConfig
|
||||
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig
|
||||
from .configuration_encoder_decoder import EncoderDecoderConfig
|
||||
from .configuration_flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig
|
||||
from .configuration_fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig
|
||||
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
|
||||
from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config
|
||||
from .configuration_layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig
|
||||
from .configuration_longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig
|
||||
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
|
||||
from .configuration_marian import MarianConfig
|
||||
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig
|
||||
from .configuration_mobilebert import MobileBertConfig
|
||||
from .configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig
|
||||
from .configuration_pegasus import PegasusConfig
|
||||
from .configuration_prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig
|
||||
from .configuration_rag import RagConfig
|
||||
from .configuration_reformer import ReformerConfig
|
||||
from .configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig
|
||||
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig
|
||||
from .configuration_squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig
|
||||
from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config
|
||||
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
|
||||
from .configuration_utils import PretrainedConfig
|
||||
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig
|
||||
from .configuration_xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig
|
||||
from .configuration_xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig
|
||||
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ..albert.configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig
|
||||
from ..bart.configuration_bart import BART_PRETRAINED_CONFIG_ARCHIVE_MAP, BartConfig
|
||||
from ..bert.configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig
|
||||
from ..bert_generation.configuration_bert_generation import BertGenerationConfig
|
||||
from ..blenderbot.configuration_blenderbot import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig
|
||||
from ..camembert.configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig
|
||||
from ..ctrl.configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
|
||||
from ..deberta.configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig
|
||||
from ..distilbert.configuration_distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig
|
||||
from ..dpr.configuration_dpr import DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DPRConfig
|
||||
from ..electra.configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig
|
||||
from ..encoder_decoder.configuration_encoder_decoder import EncoderDecoderConfig
|
||||
from ..flaubert.configuration_flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig
|
||||
from ..fsmt.configuration_fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig
|
||||
from ..funnel.configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
|
||||
from ..gpt2.configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config
|
||||
from ..layoutlm.configuration_layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig
|
||||
from ..longformer.configuration_longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig
|
||||
from ..lxmert.configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
|
||||
from ..marian.configuration_marian import MarianConfig
|
||||
from ..mbart.configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig
|
||||
from ..mobilebert.configuration_mobilebert import MobileBertConfig
|
||||
from ..openai.configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig
|
||||
from ..pegasus.configuration_pegasus import PegasusConfig
|
||||
from ..prophetnet.configuration_prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig
|
||||
from ..rag.configuration_rag import RagConfig
|
||||
from ..reformer.configuration_reformer import ReformerConfig
|
||||
from ..retribert.configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig
|
||||
from ..roberta.configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig
|
||||
from ..squeezebert.configuration_squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig
|
||||
from ..t5.configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config
|
||||
from ..transfo_xl.configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
|
||||
from ..xlm.configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig
|
||||
from ..xlm_prophetnet.configuration_xlm_prophetnet import (
|
||||
XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
||||
XLMProphetNetConfig,
|
||||
)
|
||||
from ..xlm_roberta.configuration_xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig
|
||||
from ..xlnet.configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
|
||||
|
||||
|
||||
ALL_PRETRAINED_CONFIG_ARCHIVE_MAP = dict(
|
@ -18,6 +18,173 @@
|
||||
import warnings
|
||||
from collections import OrderedDict
|
||||
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...file_utils import add_start_docstrings
|
||||
from ...utils import logging
|
||||
from ..albert.modeling_albert import (
|
||||
AlbertForMaskedLM,
|
||||
AlbertForMultipleChoice,
|
||||
AlbertForPreTraining,
|
||||
AlbertForQuestionAnswering,
|
||||
AlbertForSequenceClassification,
|
||||
AlbertForTokenClassification,
|
||||
AlbertModel,
|
||||
)
|
||||
from ..bart.modeling_bart import (
|
||||
BartForConditionalGeneration,
|
||||
BartForQuestionAnswering,
|
||||
BartForSequenceClassification,
|
||||
BartModel,
|
||||
)
|
||||
from ..bert.modeling_bert import (
|
||||
BertForMaskedLM,
|
||||
BertForMultipleChoice,
|
||||
BertForNextSentencePrediction,
|
||||
BertForPreTraining,
|
||||
BertForQuestionAnswering,
|
||||
BertForSequenceClassification,
|
||||
BertForTokenClassification,
|
||||
BertLMHeadModel,
|
||||
BertModel,
|
||||
)
|
||||
from ..bert_generation.modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder
|
||||
from ..blenderbot.modeling_blenderbot import BlenderbotForConditionalGeneration
|
||||
from ..camembert.modeling_camembert import (
|
||||
CamembertForCausalLM,
|
||||
CamembertForMaskedLM,
|
||||
CamembertForMultipleChoice,
|
||||
CamembertForQuestionAnswering,
|
||||
CamembertForSequenceClassification,
|
||||
CamembertForTokenClassification,
|
||||
CamembertModel,
|
||||
)
|
||||
from ..ctrl.modeling_ctrl import CTRLLMHeadModel, CTRLModel
|
||||
from ..deberta.modeling_deberta import DebertaForSequenceClassification, DebertaModel
|
||||
from ..distilbert.modeling_distilbert import (
|
||||
DistilBertForMaskedLM,
|
||||
DistilBertForMultipleChoice,
|
||||
DistilBertForQuestionAnswering,
|
||||
DistilBertForSequenceClassification,
|
||||
DistilBertForTokenClassification,
|
||||
DistilBertModel,
|
||||
)
|
||||
from ..dpr.modeling_dpr import DPRQuestionEncoder
|
||||
from ..electra.modeling_electra import (
|
||||
ElectraForMaskedLM,
|
||||
ElectraForMultipleChoice,
|
||||
ElectraForPreTraining,
|
||||
ElectraForQuestionAnswering,
|
||||
ElectraForSequenceClassification,
|
||||
ElectraForTokenClassification,
|
||||
ElectraModel,
|
||||
)
|
||||
from ..encoder_decoder.modeling_encoder_decoder import EncoderDecoderModel
|
||||
from ..flaubert.modeling_flaubert import (
|
||||
FlaubertForMultipleChoice,
|
||||
FlaubertForQuestionAnsweringSimple,
|
||||
FlaubertForSequenceClassification,
|
||||
FlaubertForTokenClassification,
|
||||
FlaubertModel,
|
||||
FlaubertWithLMHeadModel,
|
||||
)
|
||||
from ..fsmt.modeling_fsmt import FSMTForConditionalGeneration, FSMTModel
|
||||
from ..funnel.modeling_funnel import (
|
||||
FunnelForMaskedLM,
|
||||
FunnelForMultipleChoice,
|
||||
FunnelForPreTraining,
|
||||
FunnelForQuestionAnswering,
|
||||
FunnelForSequenceClassification,
|
||||
FunnelForTokenClassification,
|
||||
FunnelModel,
|
||||
)
|
||||
from ..gpt2.modeling_gpt2 import GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model
|
||||
from ..layoutlm.modeling_layoutlm import LayoutLMForMaskedLM, LayoutLMForTokenClassification, LayoutLMModel
|
||||
from ..longformer.modeling_longformer import (
|
||||
LongformerForMaskedLM,
|
||||
LongformerForMultipleChoice,
|
||||
LongformerForQuestionAnswering,
|
||||
LongformerForSequenceClassification,
|
||||
LongformerForTokenClassification,
|
||||
LongformerModel,
|
||||
)
|
||||
from ..lxmert.modeling_lxmert import LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel
|
||||
from ..marian.modeling_marian import MarianMTModel
|
||||
from ..mbart.modeling_mbart import MBartForConditionalGeneration
|
||||
from ..mobilebert.modeling_mobilebert import (
|
||||
MobileBertForMaskedLM,
|
||||
MobileBertForMultipleChoice,
|
||||
MobileBertForNextSentencePrediction,
|
||||
MobileBertForPreTraining,
|
||||
MobileBertForQuestionAnswering,
|
||||
MobileBertForSequenceClassification,
|
||||
MobileBertForTokenClassification,
|
||||
MobileBertModel,
|
||||
)
|
||||
from ..openai.modeling_openai import OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel
|
||||
from ..pegasus.modeling_pegasus import PegasusForConditionalGeneration
|
||||
from ..prophetnet.modeling_prophetnet import ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel
|
||||
from ..rag.modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function
|
||||
RagModel,
|
||||
RagSequenceForGeneration,
|
||||
RagTokenForGeneration,
|
||||
)
|
||||
from ..reformer.modeling_reformer import (
|
||||
ReformerForMaskedLM,
|
||||
ReformerForQuestionAnswering,
|
||||
ReformerForSequenceClassification,
|
||||
ReformerModel,
|
||||
ReformerModelWithLMHead,
|
||||
)
|
||||
from ..retribert.modeling_retribert import RetriBertModel
|
||||
from ..roberta.modeling_roberta import (
|
||||
RobertaForCausalLM,
|
||||
RobertaForMaskedLM,
|
||||
RobertaForMultipleChoice,
|
||||
RobertaForQuestionAnswering,
|
||||
RobertaForSequenceClassification,
|
||||
RobertaForTokenClassification,
|
||||
RobertaModel,
|
||||
)
|
||||
from ..squeezebert.modeling_squeezebert import (
|
||||
SqueezeBertForMaskedLM,
|
||||
SqueezeBertForMultipleChoice,
|
||||
SqueezeBertForQuestionAnswering,
|
||||
SqueezeBertForSequenceClassification,
|
||||
SqueezeBertForTokenClassification,
|
||||
SqueezeBertModel,
|
||||
)
|
||||
from ..t5.modeling_t5 import T5ForConditionalGeneration, T5Model
|
||||
from ..transfo_xl.modeling_transfo_xl import TransfoXLLMHeadModel, TransfoXLModel
|
||||
from ..xlm.modeling_xlm import (
|
||||
XLMForMultipleChoice,
|
||||
XLMForQuestionAnsweringSimple,
|
||||
XLMForSequenceClassification,
|
||||
XLMForTokenClassification,
|
||||
XLMModel,
|
||||
XLMWithLMHeadModel,
|
||||
)
|
||||
from ..xlm_prophetnet.modeling_xlm_prophetnet import (
|
||||
XLMProphetNetForCausalLM,
|
||||
XLMProphetNetForConditionalGeneration,
|
||||
XLMProphetNetModel,
|
||||
)
|
||||
from ..xlm_roberta.modeling_xlm_roberta import (
|
||||
XLMRobertaForCausalLM,
|
||||
XLMRobertaForMaskedLM,
|
||||
XLMRobertaForMultipleChoice,
|
||||
XLMRobertaForQuestionAnswering,
|
||||
XLMRobertaForSequenceClassification,
|
||||
XLMRobertaForTokenClassification,
|
||||
XLMRobertaModel,
|
||||
)
|
||||
from ..xlnet.modeling_xlnet import (
|
||||
XLNetForMultipleChoice,
|
||||
XLNetForQuestionAnsweringSimple,
|
||||
XLNetForSequenceClassification,
|
||||
XLNetForTokenClassification,
|
||||
XLNetLMHeadModel,
|
||||
XLNetModel,
|
||||
)
|
||||
from .configuration_auto import (
|
||||
AlbertConfig,
|
||||
AutoConfig,
|
||||
@ -39,6 +206,7 @@ from .configuration_auto import (
|
||||
LayoutLMConfig,
|
||||
LongformerConfig,
|
||||
LxmertConfig,
|
||||
MarianConfig,
|
||||
MBartConfig,
|
||||
MobileBertConfig,
|
||||
OpenAIGPTConfig,
|
||||
@ -56,174 +224,6 @@ from .configuration_auto import (
|
||||
XLNetConfig,
|
||||
replace_list_option_in_docstrings,
|
||||
)
|
||||
from .configuration_marian import MarianConfig
|
||||
from .configuration_utils import PretrainedConfig
|
||||
from .file_utils import add_start_docstrings
|
||||
from .modeling_albert import (
|
||||
AlbertForMaskedLM,
|
||||
AlbertForMultipleChoice,
|
||||
AlbertForPreTraining,
|
||||
AlbertForQuestionAnswering,
|
||||
AlbertForSequenceClassification,
|
||||
AlbertForTokenClassification,
|
||||
AlbertModel,
|
||||
)
|
||||
from .modeling_bart import (
|
||||
BartForConditionalGeneration,
|
||||
BartForQuestionAnswering,
|
||||
BartForSequenceClassification,
|
||||
BartModel,
|
||||
)
|
||||
from .modeling_bert import (
|
||||
BertForMaskedLM,
|
||||
BertForMultipleChoice,
|
||||
BertForNextSentencePrediction,
|
||||
BertForPreTraining,
|
||||
BertForQuestionAnswering,
|
||||
BertForSequenceClassification,
|
||||
BertForTokenClassification,
|
||||
BertLMHeadModel,
|
||||
BertModel,
|
||||
)
|
||||
from .modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder
|
||||
from .modeling_blenderbot import BlenderbotForConditionalGeneration
|
||||
from .modeling_camembert import (
|
||||
CamembertForCausalLM,
|
||||
CamembertForMaskedLM,
|
||||
CamembertForMultipleChoice,
|
||||
CamembertForQuestionAnswering,
|
||||
CamembertForSequenceClassification,
|
||||
CamembertForTokenClassification,
|
||||
CamembertModel,
|
||||
)
|
||||
from .modeling_ctrl import CTRLLMHeadModel, CTRLModel
|
||||
from .modeling_deberta import DebertaForSequenceClassification, DebertaModel
|
||||
from .modeling_distilbert import (
|
||||
DistilBertForMaskedLM,
|
||||
DistilBertForMultipleChoice,
|
||||
DistilBertForQuestionAnswering,
|
||||
DistilBertForSequenceClassification,
|
||||
DistilBertForTokenClassification,
|
||||
DistilBertModel,
|
||||
)
|
||||
from .modeling_dpr import DPRQuestionEncoder
|
||||
from .modeling_electra import (
|
||||
ElectraForMaskedLM,
|
||||
ElectraForMultipleChoice,
|
||||
ElectraForPreTraining,
|
||||
ElectraForQuestionAnswering,
|
||||
ElectraForSequenceClassification,
|
||||
ElectraForTokenClassification,
|
||||
ElectraModel,
|
||||
)
|
||||
from .modeling_encoder_decoder import EncoderDecoderModel
|
||||
from .modeling_flaubert import (
|
||||
FlaubertForMultipleChoice,
|
||||
FlaubertForQuestionAnsweringSimple,
|
||||
FlaubertForSequenceClassification,
|
||||
FlaubertForTokenClassification,
|
||||
FlaubertModel,
|
||||
FlaubertWithLMHeadModel,
|
||||
)
|
||||
from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel
|
||||
from .modeling_funnel import (
|
||||
FunnelForMaskedLM,
|
||||
FunnelForMultipleChoice,
|
||||
FunnelForPreTraining,
|
||||
FunnelForQuestionAnswering,
|
||||
FunnelForSequenceClassification,
|
||||
FunnelForTokenClassification,
|
||||
FunnelModel,
|
||||
)
|
||||
from .modeling_gpt2 import GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model
|
||||
from .modeling_layoutlm import LayoutLMForMaskedLM, LayoutLMForTokenClassification, LayoutLMModel
|
||||
from .modeling_longformer import (
|
||||
LongformerForMaskedLM,
|
||||
LongformerForMultipleChoice,
|
||||
LongformerForQuestionAnswering,
|
||||
LongformerForSequenceClassification,
|
||||
LongformerForTokenClassification,
|
||||
LongformerModel,
|
||||
)
|
||||
from .modeling_lxmert import LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel
|
||||
from .modeling_marian import MarianMTModel
|
||||
from .modeling_mbart import MBartForConditionalGeneration
|
||||
from .modeling_mobilebert import (
|
||||
MobileBertForMaskedLM,
|
||||
MobileBertForMultipleChoice,
|
||||
MobileBertForNextSentencePrediction,
|
||||
MobileBertForPreTraining,
|
||||
MobileBertForQuestionAnswering,
|
||||
MobileBertForSequenceClassification,
|
||||
MobileBertForTokenClassification,
|
||||
MobileBertModel,
|
||||
)
|
||||
from .modeling_openai import OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel
|
||||
from .modeling_pegasus import PegasusForConditionalGeneration
|
||||
from .modeling_prophetnet import ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel
|
||||
from .modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function
|
||||
RagModel,
|
||||
RagSequenceForGeneration,
|
||||
RagTokenForGeneration,
|
||||
)
|
||||
from .modeling_reformer import (
|
||||
ReformerForMaskedLM,
|
||||
ReformerForQuestionAnswering,
|
||||
ReformerForSequenceClassification,
|
||||
ReformerModel,
|
||||
ReformerModelWithLMHead,
|
||||
)
|
||||
from .modeling_retribert import RetriBertModel
|
||||
from .modeling_roberta import (
|
||||
RobertaForCausalLM,
|
||||
RobertaForMaskedLM,
|
||||
RobertaForMultipleChoice,
|
||||
RobertaForQuestionAnswering,
|
||||
RobertaForSequenceClassification,
|
||||
RobertaForTokenClassification,
|
||||
RobertaModel,
|
||||
)
|
||||
from .modeling_squeezebert import (
|
||||
SqueezeBertForMaskedLM,
|
||||
SqueezeBertForMultipleChoice,
|
||||
SqueezeBertForQuestionAnswering,
|
||||
SqueezeBertForSequenceClassification,
|
||||
SqueezeBertForTokenClassification,
|
||||
SqueezeBertModel,
|
||||
)
|
||||
from .modeling_t5 import T5ForConditionalGeneration, T5Model
|
||||
from .modeling_transfo_xl import TransfoXLLMHeadModel, TransfoXLModel
|
||||
from .modeling_xlm import (
|
||||
XLMForMultipleChoice,
|
||||
XLMForQuestionAnsweringSimple,
|
||||
XLMForSequenceClassification,
|
||||
XLMForTokenClassification,
|
||||
XLMModel,
|
||||
XLMWithLMHeadModel,
|
||||
)
|
||||
from .modeling_xlm_prophetnet import (
|
||||
XLMProphetNetForCausalLM,
|
||||
XLMProphetNetForConditionalGeneration,
|
||||
XLMProphetNetModel,
|
||||
)
|
||||
from .modeling_xlm_roberta import (
|
||||
XLMRobertaForCausalLM,
|
||||
XLMRobertaForMaskedLM,
|
||||
XLMRobertaForMultipleChoice,
|
||||
XLMRobertaForQuestionAnswering,
|
||||
XLMRobertaForSequenceClassification,
|
||||
XLMRobertaForTokenClassification,
|
||||
XLMRobertaModel,
|
||||
)
|
||||
from .modeling_xlnet import (
|
||||
XLNetForMultipleChoice,
|
||||
XLNetForQuestionAnsweringSimple,
|
||||
XLNetForSequenceClassification,
|
||||
XLNetForTokenClassification,
|
||||
XLNetLMHeadModel,
|
||||
XLNetModel,
|
||||
)
|
||||
from .utils import logging
|
||||
|
||||
|
||||
# Add modeling imports here
|
@ -17,11 +17,11 @@
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...utils import logging
|
||||
from ..bert.modeling_flax_bert import FlaxBertModel
|
||||
from ..roberta.modeling_flax_roberta import FlaxRobertaModel
|
||||
from .configuration_auto import AutoConfig, BertConfig, RobertaConfig
|
||||
from .configuration_utils import PretrainedConfig
|
||||
from .modeling_flax_bert import FlaxBertModel
|
||||
from .modeling_flax_roberta import FlaxRobertaModel
|
||||
from .utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
@ -18,38 +18,10 @@
|
||||
import warnings
|
||||
from collections import OrderedDict
|
||||
|
||||
from .configuration_auto import (
|
||||
AlbertConfig,
|
||||
AutoConfig,
|
||||
BartConfig,
|
||||
BertConfig,
|
||||
CamembertConfig,
|
||||
CTRLConfig,
|
||||
DistilBertConfig,
|
||||
ElectraConfig,
|
||||
FlaubertConfig,
|
||||
FunnelConfig,
|
||||
GPT2Config,
|
||||
LongformerConfig,
|
||||
LxmertConfig,
|
||||
MobileBertConfig,
|
||||
OpenAIGPTConfig,
|
||||
RobertaConfig,
|
||||
T5Config,
|
||||
TransfoXLConfig,
|
||||
XLMConfig,
|
||||
XLMRobertaConfig,
|
||||
XLNetConfig,
|
||||
replace_list_option_in_docstrings,
|
||||
)
|
||||
from .configuration_blenderbot import BlenderbotConfig
|
||||
from .configuration_dpr import DPRConfig
|
||||
from .configuration_marian import MarianConfig
|
||||
from .configuration_mbart import MBartConfig
|
||||
from .configuration_pegasus import PegasusConfig
|
||||
from .configuration_utils import PretrainedConfig
|
||||
from .file_utils import add_start_docstrings
|
||||
from .modeling_tf_albert import (
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...file_utils import add_start_docstrings
|
||||
from ...utils import logging
|
||||
from ..albert.modeling_tf_albert import (
|
||||
TFAlbertForMaskedLM,
|
||||
TFAlbertForMultipleChoice,
|
||||
TFAlbertForPreTraining,
|
||||
@ -58,8 +30,8 @@ from .modeling_tf_albert import (
|
||||
TFAlbertForTokenClassification,
|
||||
TFAlbertModel,
|
||||
)
|
||||
from .modeling_tf_bart import TFBartForConditionalGeneration, TFBartModel
|
||||
from .modeling_tf_bert import (
|
||||
from ..bart.modeling_tf_bart import TFBartForConditionalGeneration, TFBartModel
|
||||
from ..bert.modeling_tf_bert import (
|
||||
TFBertForMaskedLM,
|
||||
TFBertForMultipleChoice,
|
||||
TFBertForNextSentencePrediction,
|
||||
@ -70,8 +42,8 @@ from .modeling_tf_bert import (
|
||||
TFBertLMHeadModel,
|
||||
TFBertModel,
|
||||
)
|
||||
from .modeling_tf_blenderbot import TFBlenderbotForConditionalGeneration
|
||||
from .modeling_tf_camembert import (
|
||||
from ..blenderbot.modeling_tf_blenderbot import TFBlenderbotForConditionalGeneration
|
||||
from ..camembert.modeling_tf_camembert import (
|
||||
TFCamembertForMaskedLM,
|
||||
TFCamembertForMultipleChoice,
|
||||
TFCamembertForQuestionAnswering,
|
||||
@ -79,8 +51,8 @@ from .modeling_tf_camembert import (
|
||||
TFCamembertForTokenClassification,
|
||||
TFCamembertModel,
|
||||
)
|
||||
from .modeling_tf_ctrl import TFCTRLLMHeadModel, TFCTRLModel
|
||||
from .modeling_tf_distilbert import (
|
||||
from ..ctrl.modeling_tf_ctrl import TFCTRLLMHeadModel, TFCTRLModel
|
||||
from ..distilbert.modeling_tf_distilbert import (
|
||||
TFDistilBertForMaskedLM,
|
||||
TFDistilBertForMultipleChoice,
|
||||
TFDistilBertForQuestionAnswering,
|
||||
@ -88,8 +60,8 @@ from .modeling_tf_distilbert import (
|
||||
TFDistilBertForTokenClassification,
|
||||
TFDistilBertModel,
|
||||
)
|
||||
from .modeling_tf_dpr import TFDPRQuestionEncoder
|
||||
from .modeling_tf_electra import (
|
||||
from ..dpr.modeling_tf_dpr import TFDPRQuestionEncoder
|
||||
from ..electra.modeling_tf_electra import (
|
||||
TFElectraForMaskedLM,
|
||||
TFElectraForMultipleChoice,
|
||||
TFElectraForPreTraining,
|
||||
@ -98,7 +70,7 @@ from .modeling_tf_electra import (
|
||||
TFElectraForTokenClassification,
|
||||
TFElectraModel,
|
||||
)
|
||||
from .modeling_tf_flaubert import (
|
||||
from ..flaubert.modeling_tf_flaubert import (
|
||||
TFFlaubertForMultipleChoice,
|
||||
TFFlaubertForQuestionAnsweringSimple,
|
||||
TFFlaubertForSequenceClassification,
|
||||
@ -106,7 +78,7 @@ from .modeling_tf_flaubert import (
|
||||
TFFlaubertModel,
|
||||
TFFlaubertWithLMHeadModel,
|
||||
)
|
||||
from .modeling_tf_funnel import (
|
||||
from ..funnel.modeling_tf_funnel import (
|
||||
TFFunnelForMaskedLM,
|
||||
TFFunnelForMultipleChoice,
|
||||
TFFunnelForPreTraining,
|
||||
@ -115,12 +87,16 @@ from .modeling_tf_funnel import (
|
||||
TFFunnelForTokenClassification,
|
||||
TFFunnelModel,
|
||||
)
|
||||
from .modeling_tf_gpt2 import TFGPT2LMHeadModel, TFGPT2Model
|
||||
from .modeling_tf_longformer import TFLongformerForMaskedLM, TFLongformerForQuestionAnswering, TFLongformerModel
|
||||
from .modeling_tf_lxmert import TFLxmertForPreTraining, TFLxmertModel
|
||||
from .modeling_tf_marian import TFMarianMTModel
|
||||
from .modeling_tf_mbart import TFMBartForConditionalGeneration
|
||||
from .modeling_tf_mobilebert import (
|
||||
from ..gpt2.modeling_tf_gpt2 import TFGPT2LMHeadModel, TFGPT2Model
|
||||
from ..longformer.modeling_tf_longformer import (
|
||||
TFLongformerForMaskedLM,
|
||||
TFLongformerForQuestionAnswering,
|
||||
TFLongformerModel,
|
||||
)
|
||||
from ..lxmert.modeling_tf_lxmert import TFLxmertForPreTraining, TFLxmertModel
|
||||
from ..marian.modeling_tf_marian import TFMarianMTModel
|
||||
from ..mbart.modeling_tf_mbart import TFMBartForConditionalGeneration
|
||||
from ..mobilebert.modeling_tf_mobilebert import (
|
||||
TFMobileBertForMaskedLM,
|
||||
TFMobileBertForMultipleChoice,
|
||||
TFMobileBertForNextSentencePrediction,
|
||||
@ -130,9 +106,9 @@ from .modeling_tf_mobilebert import (
|
||||
TFMobileBertForTokenClassification,
|
||||
TFMobileBertModel,
|
||||
)
|
||||
from .modeling_tf_openai import TFOpenAIGPTLMHeadModel, TFOpenAIGPTModel
|
||||
from .modeling_tf_pegasus import TFPegasusForConditionalGeneration
|
||||
from .modeling_tf_roberta import (
|
||||
from ..openai.modeling_tf_openai import TFOpenAIGPTLMHeadModel, TFOpenAIGPTModel
|
||||
from ..pegasus.modeling_tf_pegasus import TFPegasusForConditionalGeneration
|
||||
from ..roberta.modeling_tf_roberta import (
|
||||
TFRobertaForMaskedLM,
|
||||
TFRobertaForMultipleChoice,
|
||||
TFRobertaForQuestionAnswering,
|
||||
@ -140,9 +116,9 @@ from .modeling_tf_roberta import (
|
||||
TFRobertaForTokenClassification,
|
||||
TFRobertaModel,
|
||||
)
|
||||
from .modeling_tf_t5 import TFT5ForConditionalGeneration, TFT5Model
|
||||
from .modeling_tf_transfo_xl import TFTransfoXLLMHeadModel, TFTransfoXLModel
|
||||
from .modeling_tf_xlm import (
|
||||
from ..t5.modeling_tf_t5 import TFT5ForConditionalGeneration, TFT5Model
|
||||
from ..transfo_xl.modeling_tf_transfo_xl import TFTransfoXLLMHeadModel, TFTransfoXLModel
|
||||
from ..xlm.modeling_tf_xlm import (
|
||||
TFXLMForMultipleChoice,
|
||||
TFXLMForQuestionAnsweringSimple,
|
||||
TFXLMForSequenceClassification,
|
||||
@ -150,7 +126,7 @@ from .modeling_tf_xlm import (
|
||||
TFXLMModel,
|
||||
TFXLMWithLMHeadModel,
|
||||
)
|
||||
from .modeling_tf_xlm_roberta import (
|
||||
from ..xlm_roberta.modeling_tf_xlm_roberta import (
|
||||
TFXLMRobertaForMaskedLM,
|
||||
TFXLMRobertaForMultipleChoice,
|
||||
TFXLMRobertaForQuestionAnswering,
|
||||
@ -158,7 +134,7 @@ from .modeling_tf_xlm_roberta import (
|
||||
TFXLMRobertaForTokenClassification,
|
||||
TFXLMRobertaModel,
|
||||
)
|
||||
from .modeling_tf_xlnet import (
|
||||
from ..xlnet.modeling_tf_xlnet import (
|
||||
TFXLNetForMultipleChoice,
|
||||
TFXLNetForQuestionAnsweringSimple,
|
||||
TFXLNetForSequenceClassification,
|
||||
@ -166,7 +142,35 @@ from .modeling_tf_xlnet import (
|
||||
TFXLNetLMHeadModel,
|
||||
TFXLNetModel,
|
||||
)
|
||||
from .utils import logging
|
||||
from .configuration_auto import (
|
||||
AlbertConfig,
|
||||
AutoConfig,
|
||||
BartConfig,
|
||||
BertConfig,
|
||||
BlenderbotConfig,
|
||||
CamembertConfig,
|
||||
CTRLConfig,
|
||||
DistilBertConfig,
|
||||
DPRConfig,
|
||||
ElectraConfig,
|
||||
FlaubertConfig,
|
||||
FunnelConfig,
|
||||
GPT2Config,
|
||||
LongformerConfig,
|
||||
LxmertConfig,
|
||||
MarianConfig,
|
||||
MBartConfig,
|
||||
MobileBertConfig,
|
||||
OpenAIGPTConfig,
|
||||
PegasusConfig,
|
||||
RobertaConfig,
|
||||
T5Config,
|
||||
TransfoXLConfig,
|
||||
XLMConfig,
|
||||
XLMRobertaConfig,
|
||||
XLNetConfig,
|
||||
replace_list_option_in_docstrings,
|
||||
)
|
||||
|
||||
|
||||
# Add modeling imports here
|
@ -17,6 +17,37 @@
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...file_utils import is_sentencepiece_available, is_tokenizers_available
|
||||
from ...utils import logging
|
||||
from ..bart.tokenization_bart import BartTokenizer
|
||||
from ..bert.tokenization_bert import BertTokenizer
|
||||
from ..bert_japanese.tokenization_bert_japanese import BertJapaneseTokenizer
|
||||
from ..bertweet.tokenization_bertweet import BertweetTokenizer
|
||||
from ..blenderbot.tokenization_blenderbot import BlenderbotSmallTokenizer
|
||||
from ..ctrl.tokenization_ctrl import CTRLTokenizer
|
||||
from ..deberta.tokenization_deberta import DebertaTokenizer
|
||||
from ..distilbert.tokenization_distilbert import DistilBertTokenizer
|
||||
from ..dpr.tokenization_dpr import DPRQuestionEncoderTokenizer
|
||||
from ..electra.tokenization_electra import ElectraTokenizer
|
||||
from ..flaubert.tokenization_flaubert import FlaubertTokenizer
|
||||
from ..fsmt.tokenization_fsmt import FSMTTokenizer
|
||||
from ..funnel.tokenization_funnel import FunnelTokenizer
|
||||
from ..gpt2.tokenization_gpt2 import GPT2Tokenizer
|
||||
from ..herbert.tokenization_herbert import HerbertTokenizer
|
||||
from ..layoutlm.tokenization_layoutlm import LayoutLMTokenizer
|
||||
from ..longformer.tokenization_longformer import LongformerTokenizer
|
||||
from ..lxmert.tokenization_lxmert import LxmertTokenizer
|
||||
from ..mobilebert.tokenization_mobilebert import MobileBertTokenizer
|
||||
from ..openai.tokenization_openai import OpenAIGPTTokenizer
|
||||
from ..phobert.tokenization_phobert import PhobertTokenizer
|
||||
from ..prophetnet.tokenization_prophetnet import ProphetNetTokenizer
|
||||
from ..rag.tokenization_rag import RagTokenizer
|
||||
from ..retribert.tokenization_retribert import RetriBertTokenizer
|
||||
from ..roberta.tokenization_roberta import RobertaTokenizer
|
||||
from ..squeezebert.tokenization_squeezebert import SqueezeBertTokenizer
|
||||
from ..transfo_xl.tokenization_transfo_xl import TransfoXLTokenizer
|
||||
from ..xlm.tokenization_xlm import XLMTokenizer
|
||||
from .configuration_auto import (
|
||||
AlbertConfig,
|
||||
AutoConfig,
|
||||
@ -57,51 +88,20 @@ from .configuration_auto import (
|
||||
XLNetConfig,
|
||||
replace_list_option_in_docstrings,
|
||||
)
|
||||
from .configuration_utils import PretrainedConfig
|
||||
from .file_utils import is_sentencepiece_available, is_tokenizers_available
|
||||
from .tokenization_bart import BartTokenizer
|
||||
from .tokenization_bert import BertTokenizer
|
||||
from .tokenization_bert_japanese import BertJapaneseTokenizer
|
||||
from .tokenization_bertweet import BertweetTokenizer
|
||||
from .tokenization_blenderbot import BlenderbotSmallTokenizer
|
||||
from .tokenization_ctrl import CTRLTokenizer
|
||||
from .tokenization_deberta import DebertaTokenizer
|
||||
from .tokenization_distilbert import DistilBertTokenizer
|
||||
from .tokenization_dpr import DPRQuestionEncoderTokenizer
|
||||
from .tokenization_electra import ElectraTokenizer
|
||||
from .tokenization_flaubert import FlaubertTokenizer
|
||||
from .tokenization_fsmt import FSMTTokenizer
|
||||
from .tokenization_funnel import FunnelTokenizer
|
||||
from .tokenization_gpt2 import GPT2Tokenizer
|
||||
from .tokenization_herbert import HerbertTokenizer
|
||||
from .tokenization_layoutlm import LayoutLMTokenizer
|
||||
from .tokenization_longformer import LongformerTokenizer
|
||||
from .tokenization_lxmert import LxmertTokenizer
|
||||
from .tokenization_mobilebert import MobileBertTokenizer
|
||||
from .tokenization_openai import OpenAIGPTTokenizer
|
||||
from .tokenization_phobert import PhobertTokenizer
|
||||
from .tokenization_prophetnet import ProphetNetTokenizer
|
||||
from .tokenization_rag import RagTokenizer
|
||||
from .tokenization_retribert import RetriBertTokenizer
|
||||
from .tokenization_roberta import RobertaTokenizer
|
||||
from .tokenization_squeezebert import SqueezeBertTokenizer
|
||||
from .tokenization_transfo_xl import TransfoXLTokenizer
|
||||
from .tokenization_xlm import XLMTokenizer
|
||||
from .utils import logging
|
||||
|
||||
|
||||
if is_sentencepiece_available():
|
||||
from .tokenization_albert import AlbertTokenizer
|
||||
from .tokenization_bert_generation import BertGenerationTokenizer
|
||||
from .tokenization_camembert import CamembertTokenizer
|
||||
from .tokenization_marian import MarianTokenizer
|
||||
from .tokenization_mbart import MBartTokenizer
|
||||
from .tokenization_pegasus import PegasusTokenizer
|
||||
from .tokenization_reformer import ReformerTokenizer
|
||||
from .tokenization_t5 import T5Tokenizer
|
||||
from .tokenization_xlm_prophetnet import XLMProphetNetTokenizer
|
||||
from .tokenization_xlm_roberta import XLMRobertaTokenizer
|
||||
from .tokenization_xlnet import XLNetTokenizer
|
||||
from ..albert.tokenization_albert import AlbertTokenizer
|
||||
from ..bert_generation.tokenization_bert_generation import BertGenerationTokenizer
|
||||
from ..camembert.tokenization_camembert import CamembertTokenizer
|
||||
from ..marian.tokenization_marian import MarianTokenizer
|
||||
from ..mbart.tokenization_mbart import MBartTokenizer
|
||||
from ..pegasus.tokenization_pegasus import PegasusTokenizer
|
||||
from ..reformer.tokenization_reformer import ReformerTokenizer
|
||||
from ..t5.tokenization_t5 import T5Tokenizer
|
||||
from ..xlm_prophetnet.tokenization_xlm_prophetnet import XLMProphetNetTokenizer
|
||||
from ..xlm_roberta.tokenization_xlm_roberta import XLMRobertaTokenizer
|
||||
from ..xlnet.tokenization_xlnet import XLNetTokenizer
|
||||
else:
|
||||
AlbertTokenizer = None
|
||||
BertGenerationTokenizer = None
|
||||
@ -116,30 +116,30 @@ else:
|
||||
XLMProphetNetTokenizer = None
|
||||
|
||||
if is_tokenizers_available():
|
||||
from .tokenization_albert_fast import AlbertTokenizerFast
|
||||
from .tokenization_bart_fast import BartTokenizerFast
|
||||
from .tokenization_bert_fast import BertTokenizerFast
|
||||
from .tokenization_camembert_fast import CamembertTokenizerFast
|
||||
from .tokenization_distilbert_fast import DistilBertTokenizerFast
|
||||
from .tokenization_dpr_fast import DPRQuestionEncoderTokenizerFast
|
||||
from .tokenization_electra_fast import ElectraTokenizerFast
|
||||
from .tokenization_funnel_fast import FunnelTokenizerFast
|
||||
from .tokenization_gpt2_fast import GPT2TokenizerFast
|
||||
from .tokenization_herbert_fast import HerbertTokenizerFast
|
||||
from .tokenization_layoutlm_fast import LayoutLMTokenizerFast
|
||||
from .tokenization_longformer_fast import LongformerTokenizerFast
|
||||
from .tokenization_lxmert_fast import LxmertTokenizerFast
|
||||
from .tokenization_mbart_fast import MBartTokenizerFast
|
||||
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
|
||||
from .tokenization_openai_fast import OpenAIGPTTokenizerFast
|
||||
from .tokenization_pegasus_fast import PegasusTokenizerFast
|
||||
from .tokenization_reformer_fast import ReformerTokenizerFast
|
||||
from .tokenization_retribert_fast import RetriBertTokenizerFast
|
||||
from .tokenization_roberta_fast import RobertaTokenizerFast
|
||||
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
|
||||
from .tokenization_t5_fast import T5TokenizerFast
|
||||
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
|
||||
from .tokenization_xlnet_fast import XLNetTokenizerFast
|
||||
from ..albert.tokenization_albert_fast import AlbertTokenizerFast
|
||||
from ..bart.tokenization_bart_fast import BartTokenizerFast
|
||||
from ..bert.tokenization_bert_fast import BertTokenizerFast
|
||||
from ..camembert.tokenization_camembert_fast import CamembertTokenizerFast
|
||||
from ..distilbert.tokenization_distilbert_fast import DistilBertTokenizerFast
|
||||
from ..dpr.tokenization_dpr_fast import DPRQuestionEncoderTokenizerFast
|
||||
from ..electra.tokenization_electra_fast import ElectraTokenizerFast
|
||||
from ..funnel.tokenization_funnel_fast import FunnelTokenizerFast
|
||||
from ..gpt2.tokenization_gpt2_fast import GPT2TokenizerFast
|
||||
from ..herbert.tokenization_herbert_fast import HerbertTokenizerFast
|
||||
from ..layoutlm.tokenization_layoutlm_fast import LayoutLMTokenizerFast
|
||||
from ..longformer.tokenization_longformer_fast import LongformerTokenizerFast
|
||||
from ..lxmert.tokenization_lxmert_fast import LxmertTokenizerFast
|
||||
from ..mbart.tokenization_mbart_fast import MBartTokenizerFast
|
||||
from ..mobilebert.tokenization_mobilebert_fast import MobileBertTokenizerFast
|
||||
from ..openai.tokenization_openai_fast import OpenAIGPTTokenizerFast
|
||||
from ..pegasus.tokenization_pegasus_fast import PegasusTokenizerFast
|
||||
from ..reformer.tokenization_reformer_fast import ReformerTokenizerFast
|
||||
from ..retribert.tokenization_retribert_fast import RetriBertTokenizerFast
|
||||
from ..roberta.tokenization_roberta_fast import RobertaTokenizerFast
|
||||
from ..squeezebert.tokenization_squeezebert_fast import SqueezeBertTokenizerFast
|
||||
from ..t5.tokenization_t5_fast import T5TokenizerFast
|
||||
from ..xlm_roberta.tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
|
||||
from ..xlnet.tokenization_xlnet_fast import XLNetTokenizerFast
|
||||
else:
|
||||
AlbertTokenizerFast = None
|
||||
BartTokenizerFast = None
|
24
src/transformers/models/bart/__init__.py
Normal file
24
src/transformers/models/bart/__init__.py
Normal file
@ -0,0 +1,24 @@
|
||||
# flake8: noqa
|
||||
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
||||
# module, but to preserve other warnings. So, don't check this module at all.
|
||||
|
||||
from ...file_utils import is_tf_available, is_tokenizers_available, is_torch_available
|
||||
from .configuration_bart import BartConfig
|
||||
from .tokenization_bart import BartTokenizer
|
||||
|
||||
|
||||
if is_tokenizers_available():
|
||||
from .tokenization_bart_fast import BartTokenizerFast
|
||||
|
||||
if is_torch_available():
|
||||
from .modeling_bart import (
|
||||
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
BartForConditionalGeneration,
|
||||
BartForQuestionAnswering,
|
||||
BartForSequenceClassification,
|
||||
BartModel,
|
||||
PretrainedBartModel,
|
||||
)
|
||||
|
||||
if is_tf_available():
|
||||
from .modeling_tf_bart import TFBartForConditionalGeneration, TFBartModel
|
@ -14,8 +14,8 @@
|
||||
# limitations under the License.
|
||||
""" BART configuration """
|
||||
|
||||
from .configuration_utils import PretrainedConfig
|
||||
from .utils import logging
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
@ -30,7 +30,7 @@ from transformers import (
|
||||
BartModel,
|
||||
BartTokenizer,
|
||||
)
|
||||
from transformers.modeling_bart import _make_linear_from_emb
|
||||
from transformers.models.bart.modeling_bart import _make_linear_from_emb
|
||||
from transformers.utils import logging
|
||||
|
||||
|
@ -24,16 +24,15 @@ import torch.nn.functional as F
|
||||
from torch import Tensor, nn
|
||||
from torch.nn import CrossEntropyLoss
|
||||
|
||||
from .activations import ACT2FN
|
||||
from .configuration_bart import BartConfig
|
||||
from .file_utils import (
|
||||
from ...activations import ACT2FN
|
||||
from ...file_utils import (
|
||||
add_code_sample_docstrings,
|
||||
add_end_docstrings,
|
||||
add_start_docstrings,
|
||||
add_start_docstrings_to_model_forward,
|
||||
replace_return_docstrings,
|
||||
)
|
||||
from .modeling_outputs import (
|
||||
from ...modeling_outputs import (
|
||||
BaseModelOutput,
|
||||
BaseModelOutputWithPastAndCrossAttentions,
|
||||
Seq2SeqLMOutput,
|
||||
@ -41,8 +40,9 @@ from .modeling_outputs import (
|
||||
Seq2SeqQuestionAnsweringModelOutput,
|
||||
Seq2SeqSequenceClassifierOutput,
|
||||
)
|
||||
from .modeling_utils import PreTrainedModel
|
||||
from .utils import logging
|
||||
from ...modeling_utils import PreTrainedModel
|
||||
from ...utils import logging
|
||||
from .configuration_bart import BartConfig
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
@ -24,13 +24,17 @@ import tensorflow as tf
|
||||
from tensorflow import Tensor
|
||||
from tensorflow.keras.layers import Dense, Layer, LayerNormalization
|
||||
|
||||
from .activations_tf import ACT2FN
|
||||
from .configuration_bart import BartConfig
|
||||
from .file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
|
||||
from .modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPast, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput
|
||||
from ...activations_tf import ACT2FN
|
||||
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
|
||||
from ...modeling_tf_outputs import (
|
||||
TFBaseModelOutput,
|
||||
TFBaseModelOutputWithPast,
|
||||
TFSeq2SeqLMOutput,
|
||||
TFSeq2SeqModelOutput,
|
||||
)
|
||||
|
||||
# Public API
|
||||
from .modeling_tf_utils import (
|
||||
from ...modeling_tf_utils import (
|
||||
DUMMY_INPUTS,
|
||||
TFPreTrainedModel,
|
||||
TFSharedEmbeddings,
|
||||
@ -39,8 +43,9 @@ from .modeling_tf_utils import (
|
||||
keras_serializable,
|
||||
shape_list,
|
||||
)
|
||||
from .tokenization_utils_base import BatchEncoding
|
||||
from .utils import logging
|
||||
from ...tokenization_utils_base import BatchEncoding
|
||||
from ...utils import logging
|
||||
from .configuration_bart import BartConfig
|
||||
|
||||
|
||||
_CONFIG_FOR_DOC = "BartConfig"
|
@ -15,9 +15,9 @@
|
||||
|
||||
from typing import List, Optional
|
||||
|
||||
from .tokenization_roberta import RobertaTokenizer
|
||||
from .tokenization_utils_base import BatchEncoding
|
||||
from .utils import logging
|
||||
from ...tokenization_utils_base import BatchEncoding
|
||||
from ...utils import logging
|
||||
from ..roberta.tokenization_roberta import RobertaTokenizer
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
@ -15,10 +15,10 @@
|
||||
|
||||
from typing import List, Optional
|
||||
|
||||
from ...tokenization_utils_base import BatchEncoding
|
||||
from ...utils import logging
|
||||
from ..roberta.tokenization_roberta_fast import RobertaTokenizerFast
|
||||
from .tokenization_bart import BartTokenizer
|
||||
from .tokenization_roberta_fast import RobertaTokenizerFast
|
||||
from .tokenization_utils_base import BatchEncoding
|
||||
from .utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
48
src/transformers/models/bert/__init__.py
Normal file
48
src/transformers/models/bert/__init__.py
Normal file
@ -0,0 +1,48 @@
|
||||
# flake8: noqa
|
||||
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
||||
# module, but to preserve other warnings. So, don't check this module at all.
|
||||
|
||||
from ...file_utils import is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available
|
||||
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig
|
||||
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
|
||||
|
||||
|
||||
if is_tokenizers_available():
|
||||
from .tokenization_bert_fast import BertTokenizerFast
|
||||
|
||||
if is_torch_available():
|
||||
from .modeling_bert import (
|
||||
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
BertForMaskedLM,
|
||||
BertForMultipleChoice,
|
||||
BertForNextSentencePrediction,
|
||||
BertForPreTraining,
|
||||
BertForQuestionAnswering,
|
||||
BertForSequenceClassification,
|
||||
BertForTokenClassification,
|
||||
BertLayer,
|
||||
BertLMHeadModel,
|
||||
BertModel,
|
||||
BertPreTrainedModel,
|
||||
load_tf_weights_in_bert,
|
||||
)
|
||||
|
||||
if is_tf_available():
|
||||
from .modeling_tf_bert import (
|
||||
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFBertEmbeddings,
|
||||
TFBertForMaskedLM,
|
||||
TFBertForMultipleChoice,
|
||||
TFBertForNextSentencePrediction,
|
||||
TFBertForPreTraining,
|
||||
TFBertForQuestionAnswering,
|
||||
TFBertForSequenceClassification,
|
||||
TFBertForTokenClassification,
|
||||
TFBertLMHeadModel,
|
||||
TFBertMainLayer,
|
||||
TFBertModel,
|
||||
TFBertPreTrainedModel,
|
||||
)
|
||||
|
||||
if is_flax_available():
|
||||
from .modeling_flax_bert import FlaxBertModel
|
@ -15,8 +15,8 @@
|
||||
# limitations under the License.
|
||||
""" BERT model configuration """
|
||||
|
||||
from .configuration_utils import PretrainedConfig
|
||||
from .utils import logging
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
@ -27,16 +27,15 @@ import torch.utils.checkpoint
|
||||
from torch import nn
|
||||
from torch.nn import CrossEntropyLoss, MSELoss
|
||||
|
||||
from .activations import ACT2FN
|
||||
from .configuration_bert import BertConfig
|
||||
from .file_utils import (
|
||||
from ...activations import ACT2FN
|
||||
from ...file_utils import (
|
||||
ModelOutput,
|
||||
add_code_sample_docstrings,
|
||||
add_start_docstrings,
|
||||
add_start_docstrings_to_model_forward,
|
||||
replace_return_docstrings,
|
||||
)
|
||||
from .modeling_outputs import (
|
||||
from ...modeling_outputs import (
|
||||
BaseModelOutputWithCrossAttentions,
|
||||
BaseModelOutputWithPoolingAndCrossAttentions,
|
||||
CausalLMOutputWithCrossAttentions,
|
||||
@ -47,13 +46,14 @@ from .modeling_outputs import (
|
||||
SequenceClassifierOutput,
|
||||
TokenClassifierOutput,
|
||||
)
|
||||
from .modeling_utils import (
|
||||
from ...modeling_utils import (
|
||||
PreTrainedModel,
|
||||
apply_chunking_to_forward,
|
||||
find_pruneable_heads_and_indices,
|
||||
prune_linear_layer,
|
||||
)
|
||||
from .utils import logging
|
||||
from ...utils import logging
|
||||
from .configuration_bert import BertConfig
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
@ -21,10 +21,10 @@ import flax.linen as nn
|
||||
import jax
|
||||
import jax.numpy as jnp
|
||||
|
||||
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
|
||||
from ...modeling_flax_utils import FlaxPreTrainedModel, gelu
|
||||
from ...utils import logging
|
||||
from .configuration_bert import BertConfig
|
||||
from .file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
|
||||
from .modeling_flax_utils import FlaxPreTrainedModel, gelu
|
||||
from .utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
@ -21,9 +21,8 @@ from typing import Optional, Tuple
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
from .activations_tf import get_tf_activation
|
||||
from .configuration_bert import BertConfig
|
||||
from .file_utils import (
|
||||
from ...activations_tf import get_tf_activation
|
||||
from ...file_utils import (
|
||||
MULTIPLE_CHOICE_DUMMY_INPUTS,
|
||||
ModelOutput,
|
||||
add_code_sample_docstrings,
|
||||
@ -31,7 +30,7 @@ from .file_utils import (
|
||||
add_start_docstrings_to_model_forward,
|
||||
replace_return_docstrings,
|
||||
)
|
||||
from .modeling_tf_outputs import (
|
||||
from ...modeling_tf_outputs import (
|
||||
TFBaseModelOutput,
|
||||
TFBaseModelOutputWithPooling,
|
||||
TFCausalLMOutput,
|
||||
@ -42,7 +41,7 @@ from .modeling_tf_outputs import (
|
||||
TFSequenceClassifierOutput,
|
||||
TFTokenClassifierOutput,
|
||||
)
|
||||
from .modeling_tf_utils import (
|
||||
from ...modeling_tf_utils import (
|
||||
TFCausalLanguageModelingLoss,
|
||||
TFMaskedLanguageModelingLoss,
|
||||
TFMultipleChoiceLoss,
|
||||
@ -55,8 +54,9 @@ from .modeling_tf_utils import (
|
||||
keras_serializable,
|
||||
shape_list,
|
||||
)
|
||||
from .tokenization_utils import BatchEncoding
|
||||
from .utils import logging
|
||||
from ...tokenization_utils import BatchEncoding
|
||||
from ...utils import logging
|
||||
from .configuration_bert import BertConfig
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
@ -20,8 +20,8 @@ import os
|
||||
import unicodedata
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from .tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
|
||||
from .utils import logging
|
||||
from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
|
||||
from ...utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
@ -19,9 +19,9 @@ from typing import List, Optional, Tuple
|
||||
|
||||
from tokenizers import normalizers
|
||||
|
||||
from ...tokenization_utils_fast import PreTrainedTokenizerFast
|
||||
from ...utils import logging
|
||||
from .tokenization_bert import BertTokenizer
|
||||
from .tokenization_utils_fast import PreTrainedTokenizerFast
|
||||
from .utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
17
src/transformers/models/bert_generation/__init__.py
Normal file
17
src/transformers/models/bert_generation/__init__.py
Normal file
@ -0,0 +1,17 @@
|
||||
# flake8: noqa
|
||||
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
||||
# module, but to preserve other warnings. So, don't check this module at all.
|
||||
|
||||
from ...file_utils import is_sentencepiece_available, is_torch_available
|
||||
from .configuration_bert_generation import BertGenerationConfig
|
||||
|
||||
|
||||
if is_sentencepiece_available():
|
||||
from .tokenization_bert_generation import BertGenerationTokenizer
|
||||
|
||||
if is_torch_available():
|
||||
from .modeling_bert_generation import (
|
||||
BertGenerationDecoder,
|
||||
BertGenerationEncoder,
|
||||
load_tf_weights_in_bert_generation,
|
||||
)
|
@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
""" BertGeneration model configuration """
|
||||
|
||||
from .configuration_utils import PretrainedConfig
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
|
||||
|
||||
class BertGenerationConfig(PretrainedConfig):
|
@ -20,17 +20,17 @@ import torch.utils.checkpoint
|
||||
from torch import nn
|
||||
from torch.nn import CrossEntropyLoss
|
||||
|
||||
from .configuration_bert_generation import BertGenerationConfig
|
||||
from .file_utils import (
|
||||
from ...file_utils import (
|
||||
add_code_sample_docstrings,
|
||||
add_start_docstrings,
|
||||
add_start_docstrings_to_model_forward,
|
||||
replace_return_docstrings,
|
||||
)
|
||||
from .modeling_bert import BertEncoder
|
||||
from .modeling_outputs import BaseModelOutputWithCrossAttentions, CausalLMOutputWithCrossAttentions
|
||||
from .modeling_utils import PreTrainedModel
|
||||
from .utils import logging
|
||||
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, CausalLMOutputWithCrossAttentions
|
||||
from ...modeling_utils import PreTrainedModel
|
||||
from ...utils import logging
|
||||
from ..bert.modeling_bert import BertEncoder
|
||||
from .configuration_bert_generation import BertGenerationConfig
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
@ -21,8 +21,8 @@ from typing import List, Optional, Tuple
|
||||
|
||||
import sentencepiece as spm
|
||||
|
||||
from .tokenization_utils import PreTrainedTokenizer
|
||||
from .utils import logging
|
||||
from ...tokenization_utils import PreTrainedTokenizer
|
||||
from ...utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
5
src/transformers/models/bert_japanese/__init__.py
Normal file
5
src/transformers/models/bert_japanese/__init__.py
Normal file
@ -0,0 +1,5 @@
|
||||
# flake8: noqa
|
||||
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
||||
# module, but to preserve other warnings. So, don't check this module at all.
|
||||
|
||||
from .tokenization_bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer
|
@ -21,8 +21,8 @@ import os
|
||||
import unicodedata
|
||||
from typing import Optional
|
||||
|
||||
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer, load_vocab
|
||||
from .utils import logging
|
||||
from ...utils import logging
|
||||
from ..bert.tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer, load_vocab
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
5
src/transformers/models/bertweet/__init__.py
Normal file
5
src/transformers/models/bertweet/__init__.py
Normal file
@ -0,0 +1,5 @@
|
||||
# flake8: noqa
|
||||
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
||||
# module, but to preserve other warnings. So, don't check this module at all.
|
||||
|
||||
from .tokenization_bertweet import BertweetTokenizer
|
@ -24,8 +24,8 @@ from typing import List, Optional, Tuple
|
||||
|
||||
import regex
|
||||
|
||||
from .tokenization_utils import PreTrainedTokenizer
|
||||
from .utils import logging
|
||||
from ...tokenization_utils import PreTrainedTokenizer
|
||||
from ...utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user