diff --git a/.github/workflows/github-torch-hub.yml b/.github/workflows/github-torch-hub.yml index 9fa1a58b909..93b9c777bfe 100644 --- a/.github/workflows/github-torch-hub.yml +++ b/.github/workflows/github-torch-hub.yml @@ -8,6 +8,9 @@ on: jobs: torch_hub_integration: runs-on: ubuntu-latest + env: + # TODO quickfix but may need more investigation + ACTIONS_ALLOW_UNSECURE_COMMANDS: True steps: # no checkout necessary here. - name: Extract branch name diff --git a/.gitignore b/.gitignore index b5d8fc1ecdd..4137137f285 100644 --- a/.gitignore +++ b/.gitignore @@ -133,7 +133,6 @@ dmypy.json tensorflow_code # Models -models proc_data # examples diff --git a/docs/source/model_doc/albert.rst b/docs/source/model_doc/albert.rst index 15339e92f8a..1a2165ae25b 100644 --- a/docs/source/model_doc/albert.rst +++ b/docs/source/model_doc/albert.rst @@ -51,10 +51,10 @@ AlbertTokenizer Albert specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: transformers.modeling_albert.AlbertForPreTrainingOutput +.. autoclass:: transformers.models.albert.modeling_albert.AlbertForPreTrainingOutput :members: -.. autoclass:: transformers.modeling_tf_albert.TFAlbertForPreTrainingOutput +.. autoclass:: transformers.models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput :members: diff --git a/docs/source/model_doc/bart.rst b/docs/source/model_doc/bart.rst index f75b7810c91..f2a111086fb 100644 --- a/docs/source/model_doc/bart.rst +++ b/docs/source/model_doc/bart.rst @@ -44,8 +44,8 @@ Implementation Notes - Bart doesn't use :obj:`token_type_ids` for sequence classification. Use :class:`~transformers.BartTokenizer` or :meth:`~transformers.BartTokenizer.encode` to get the proper splitting. - The forward pass of :class:`~transformers.BartModel` will create decoder inputs (using the helper function - :func:`transformers.modeling_bart._prepare_bart_decoder_inputs`) if they are not passed. This is different than some - other modeling APIs. + :func:`transformers.models.bart.modeling_bart._prepare_bart_decoder_inputs`) if they are not passed. This is + different than some other modeling APIs. - Model predictions are intended to be identical to the original implementation when :obj:`force_bos_token_to_be_generated=True`. This only works, however, if the string you pass to :func:`fairseq.encode` starts with a space. @@ -93,7 +93,7 @@ BartModel .. autoclass:: transformers.BartModel :members: forward -.. autofunction:: transformers.modeling_bart._prepare_bart_decoder_inputs +.. autofunction:: transformers.models.bart.modeling_bart._prepare_bart_decoder_inputs BartForConditionalGeneration diff --git a/docs/source/model_doc/bert.rst b/docs/source/model_doc/bert.rst index 8d1322bb095..589f6277f8e 100644 --- a/docs/source/model_doc/bert.rst +++ b/docs/source/model_doc/bert.rst @@ -57,10 +57,10 @@ BertTokenizerFast Bert specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: transformers.modeling_bert.BertForPreTrainingOutput +.. autoclass:: transformers.models.bert.modeling_bert.BertForPreTrainingOutput :members: -.. autoclass:: transformers.modeling_tf_bert.TFBertForPreTrainingOutput +.. autoclass:: transformers.models.bert.modeling_tf_bert.TFBertForPreTrainingOutput :members: diff --git a/docs/source/model_doc/dpr.rst b/docs/source/model_doc/dpr.rst index a9e6fdf7a8a..86a60ff15da 100644 --- a/docs/source/model_doc/dpr.rst +++ b/docs/source/model_doc/dpr.rst @@ -71,13 +71,13 @@ DPRReaderTokenizerFast DPR specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: transformers.modeling_dpr.DPRContextEncoderOutput +.. autoclass:: transformers.models.dpr.modeling_dpr.DPRContextEncoderOutput :members: -.. autoclass:: transformers.modeling_dpr.DPRQuestionEncoderOutput +.. autoclass:: transformers.models.dpr.modeling_dpr.DPRQuestionEncoderOutput :members: -.. autoclass:: transformers.modeling_dpr.DPRReaderOutput +.. autoclass:: transformers.models.dpr.modeling_dpr.DPRReaderOutput :members: diff --git a/docs/source/model_doc/electra.rst b/docs/source/model_doc/electra.rst index 51156a86ecc..35ed4412216 100644 --- a/docs/source/model_doc/electra.rst +++ b/docs/source/model_doc/electra.rst @@ -69,10 +69,10 @@ ElectraTokenizerFast Electra specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: transformers.modeling_electra.ElectraForPreTrainingOutput +.. autoclass:: transformers.models.electra.modeling_electra.ElectraForPreTrainingOutput :members: -.. autoclass:: transformers.modeling_tf_electra.TFElectraForPreTrainingOutput +.. autoclass:: transformers.models.electra.modeling_tf_electra.TFElectraForPreTrainingOutput :members: diff --git a/docs/source/model_doc/funnel.rst b/docs/source/model_doc/funnel.rst index acea6e4e771..5d120449e9c 100644 --- a/docs/source/model_doc/funnel.rst +++ b/docs/source/model_doc/funnel.rst @@ -65,10 +65,10 @@ FunnelTokenizerFast Funnel specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: transformers.modeling_funnel.FunnelForPreTrainingOutput +.. autoclass:: transformers.models.funnel.modeling_funnel.FunnelForPreTrainingOutput :members: -.. autoclass:: transformers.modeling_tf_funnel.TFFunnelForPreTrainingOutput +.. autoclass:: transformers.models.funnel.modeling_tf_funnel.TFFunnelForPreTrainingOutput :members: diff --git a/docs/source/model_doc/gpt.rst b/docs/source/model_doc/gpt.rst index 902073f56ce..9e7e1151094 100644 --- a/docs/source/model_doc/gpt.rst +++ b/docs/source/model_doc/gpt.rst @@ -72,10 +72,10 @@ OpenAIGPTTokenizerFast OpenAI specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: transformers.modeling_openai.OpenAIGPTDoubleHeadsModelOutput +.. autoclass:: transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModelOutput :members: -.. autoclass:: transformers.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput +.. autoclass:: transformers.models.openai.modeling_tf_openai.TFOpenAIGPTDoubleHeadsModelOutput :members: diff --git a/docs/source/model_doc/gpt2.rst b/docs/source/model_doc/gpt2.rst index cf0fe6efdb6..5572e087844 100644 --- a/docs/source/model_doc/gpt2.rst +++ b/docs/source/model_doc/gpt2.rst @@ -60,10 +60,10 @@ GPT2TokenizerFast GPT2 specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: transformers.modeling_gpt2.GPT2DoubleHeadsModelOutput +.. autoclass:: transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput :members: -.. autoclass:: transformers.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput +.. autoclass:: transformers.models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput :members: diff --git a/docs/source/model_doc/longformer.rst b/docs/source/model_doc/longformer.rst index 696a13c1808..0707255f2a0 100644 --- a/docs/source/model_doc/longformer.rst +++ b/docs/source/model_doc/longformer.rst @@ -93,29 +93,27 @@ LongformerTokenizerFast Longformer specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: transformers.modeling_longformer.LongformerBaseModelOutput +.. autoclass:: transformers.models.longformer.modeling_longformer.LongformerBaseModelOutput :members: -.. autoclass:: transformers.modeling_longformer.LongformerBaseModelOutputWithPooling +.. autoclass:: transformers.models.longformer.modeling_longformer.LongformerBaseModelOutputWithPooling :members: -.. autoclass:: transformers.modeling_longformer.LongformerMultipleChoiceModelOutput +.. autoclass:: transformers.models.longformer.modeling_longformer.LongformerMultipleChoiceModelOutput :members: -.. autoclass:: transformers.modeling_longformer.LongformerQuestionAnsweringModelOutput +.. autoclass:: transformers.models.longformer.modeling_longformer.LongformerQuestionAnsweringModelOutput :members: -.. autoclass:: transformers.modeling_tf_longformer.TFLongformerBaseModelOutput +.. autoclass:: transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutput :members: -.. autoclass:: transformers.modeling_tf_longformer.TFLongformerBaseModelOutputWithPooling +.. autoclass:: transformers.models.longformer.modeling_tf_longformer.TFLongformerBaseModelOutputWithPooling :members: -.. autoclass:: transformers.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput +.. autoclass:: transformers.models.longformer.modeling_tf_longformer.TFLongformerQuestionAnsweringModelOutput :members: -LongformerModel -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LongformerModel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/source/model_doc/lxmert.rst b/docs/source/model_doc/lxmert.rst index adb97f0294f..d7d18767c9a 100644 --- a/docs/source/model_doc/lxmert.rst +++ b/docs/source/model_doc/lxmert.rst @@ -67,19 +67,19 @@ LxmertTokenizerFast Lxmert specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: transformers.modeling_lxmert.LxmertModelOutput +.. autoclass:: transformers.models.lxmert.modeling_lxmert.LxmertModelOutput :members: -.. autoclass:: transformers.modeling_lxmert.LxmertForPreTrainingOutput +.. autoclass:: transformers.models.lxmert.modeling_lxmert.LxmertForPreTrainingOutput :members: -.. autoclass:: transformers.modeling_lxmert.LxmertForQuestionAnsweringOutput +.. autoclass:: transformers.models.lxmert.modeling_lxmert.LxmertForQuestionAnsweringOutput :members: -.. autoclass:: transformers.modeling_tf_lxmert.TFLxmertModelOutput +.. autoclass:: transformers.models.lxmert.modeling_tf_lxmert.TFLxmertModelOutput :members: -.. autoclass:: transformers.modeling_tf_lxmert.TFLxmertForPreTrainingOutput +.. autoclass:: transformers.models.lxmert.modeling_tf_lxmert.TFLxmertForPreTrainingOutput :members: diff --git a/docs/source/model_doc/mobilebert.rst b/docs/source/model_doc/mobilebert.rst index 62afb3ca2c1..66f7f6dff25 100644 --- a/docs/source/model_doc/mobilebert.rst +++ b/docs/source/model_doc/mobilebert.rst @@ -58,10 +58,10 @@ MobileBertTokenizerFast MobileBert specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: transformers.modeling_mobilebert.MobileBertForPreTrainingOutput +.. autoclass:: transformers.models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput :members: -.. autoclass:: transformers.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput +.. autoclass:: transformers.models.mobilebert.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput :members: diff --git a/docs/source/model_doc/prophetnet.rst b/docs/source/model_doc/prophetnet.rst index 113387e273d..22814978697 100644 --- a/docs/source/model_doc/prophetnet.rst +++ b/docs/source/model_doc/prophetnet.rst @@ -47,16 +47,16 @@ ProphetNetTokenizer ProphetNet specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: transformers.modeling_prophetnet.ProphetNetSeq2SeqLMOutput +.. autoclass:: transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput :members: -.. autoclass:: transformers.modeling_prophetnet.ProphetNetSeq2SeqModelOutput +.. autoclass:: transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput :members: -.. autoclass:: transformers.modeling_prophetnet.ProphetNetDecoderModelOutput +.. autoclass:: transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput :members: -.. autoclass:: transformers.modeling_prophetnet.ProphetNetDecoderLMOutput +.. autoclass:: transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput :members: ProphetNetModel diff --git a/docs/source/model_doc/rag.rst b/docs/source/model_doc/rag.rst index 87340e5ffd1..eb83c83e68c 100644 --- a/docs/source/model_doc/rag.rst +++ b/docs/source/model_doc/rag.rst @@ -50,10 +50,10 @@ RagTokenizer Rag specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: transformers.modeling_rag.RetrievAugLMMarginOutput +.. autoclass:: transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput :members: -.. autoclass:: transformers.modeling_rag.RetrievAugLMOutput +.. autoclass:: transformers.models.rag.modeling_rag.RetrievAugLMOutput :members: RagRetriever diff --git a/docs/source/model_doc/transformerxl.rst b/docs/source/model_doc/transformerxl.rst index 3b5eb24c6f7..e12847da611 100644 --- a/docs/source/model_doc/transformerxl.rst +++ b/docs/source/model_doc/transformerxl.rst @@ -49,16 +49,16 @@ TransfoXLTokenizer TransfoXL specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: transformers.modeling_transfo_xl.TransfoXLModelOutput +.. autoclass:: transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput :members: -.. autoclass:: transformers.modeling_transfo_xl.TransfoXLLMHeadModelOutput +.. autoclass:: transformers.models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput :members: -.. autoclass:: transformers.modeling_tf_transfo_xl.TFTransfoXLModelOutput +.. autoclass:: transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput :members: -.. autoclass:: transformers.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput +.. autoclass:: transformers.models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput :members: diff --git a/docs/source/model_doc/xlm.rst b/docs/source/model_doc/xlm.rst index 6b76df1760c..df9eec59a24 100644 --- a/docs/source/model_doc/xlm.rst +++ b/docs/source/model_doc/xlm.rst @@ -50,7 +50,7 @@ XLMTokenizer XLM specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: transformers.modeling_xlm.XLMForQuestionAnsweringOutput +.. autoclass:: transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput :members: diff --git a/docs/source/model_doc/xlnet.rst b/docs/source/model_doc/xlnet.rst index ef0e6097a14..2d20ac2e73f 100644 --- a/docs/source/model_doc/xlnet.rst +++ b/docs/source/model_doc/xlnet.rst @@ -53,43 +53,43 @@ XLNetTokenizer XLNet specific outputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autoclass:: transformers.modeling_xlnet.XLNetModelOutput +.. autoclass:: transformers.models.xlnet.modeling_xlnet.XLNetModelOutput :members: -.. autoclass:: transformers.modeling_xlnet.XLNetLMHeadModelOutput +.. autoclass:: transformers.models.xlnet.modeling_xlnet.XLNetLMHeadModelOutput :members: -.. autoclass:: transformers.modeling_xlnet.XLNetForSequenceClassificationOutput +.. autoclass:: transformers.models.xlnet.modeling_xlnet.XLNetForSequenceClassificationOutput :members: -.. autoclass:: transformers.modeling_xlnet.XLNetForMultipleChoiceOutput +.. autoclass:: transformers.models.xlnet.modeling_xlnet.XLNetForMultipleChoiceOutput :members: -.. autoclass:: transformers.modeling_xlnet.XLNetForTokenClassificationOutput +.. autoclass:: transformers.models.xlnet.modeling_xlnet.XLNetForTokenClassificationOutput :members: -.. autoclass:: transformers.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput +.. autoclass:: transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringSimpleOutput :members: -.. autoclass:: transformers.modeling_xlnet.XLNetForQuestionAnsweringOutput +.. autoclass:: transformers.models.xlnet.modeling_xlnet.XLNetForQuestionAnsweringOutput :members: -.. autoclass:: transformers.modeling_tf_xlnet.TFXLNetModelOutput +.. autoclass:: transformers.models.xlnet.modeling_tf_xlnet.TFXLNetModelOutput :members: -.. autoclass:: transformers.modeling_tf_xlnet.TFXLNetLMHeadModelOutput +.. autoclass:: transformers.models.xlnet.modeling_tf_xlnet.TFXLNetLMHeadModelOutput :members: -.. autoclass:: transformers.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput +.. autoclass:: transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForSequenceClassificationOutput :members: -.. autoclass:: transformers.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput +.. autoclass:: transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForMultipleChoiceOutput :members: -.. autoclass:: transformers.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput +.. autoclass:: transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForTokenClassificationOutput :members: -.. autoclass:: transformers.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput +.. autoclass:: transformers.models.xlnet.modeling_tf_xlnet.TFXLNetForQuestionAnsweringSimpleOutput :members: diff --git a/examples/bert-loses-patience/pabee/modeling_pabee_albert.py b/examples/bert-loses-patience/pabee/modeling_pabee_albert.py index 48abd8a73b6..960dd4d830b 100644 --- a/examples/bert-loses-patience/pabee/modeling_pabee_albert.py +++ b/examples/bert-loses-patience/pabee/modeling_pabee_albert.py @@ -21,7 +21,7 @@ import torch.nn as nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward -from transformers.modeling_albert import ( +from transformers.models.albert.modeling_albert import ( ALBERT_INPUTS_DOCSTRING, ALBERT_START_DOCSTRING, AlbertModel, diff --git a/examples/bert-loses-patience/pabee/modeling_pabee_bert.py b/examples/bert-loses-patience/pabee/modeling_pabee_bert.py index 6852ab0bd95..89de6168ec1 100644 --- a/examples/bert-loses-patience/pabee/modeling_pabee_bert.py +++ b/examples/bert-loses-patience/pabee/modeling_pabee_bert.py @@ -23,7 +23,7 @@ from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward -from transformers.modeling_bert import ( +from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, diff --git a/examples/contrib/run_camembert.py b/examples/contrib/run_camembert.py index 32ccce0e7e5..05e36c2517a 100644 --- a/examples/contrib/run_camembert.py +++ b/examples/contrib/run_camembert.py @@ -1,7 +1,6 @@ import torch -from transformers.modeling_camembert import CamembertForMaskedLM -from transformers.tokenization_camembert import CamembertTokenizer +from transformers import CamembertForMaskedLM, CamembertTokenizer def fill_mask(masked_input, model, tokenizer, topk=5): diff --git a/examples/contrib/run_swag.py b/examples/contrib/run_swag.py index e2b49a4128e..c699ffa6069 100644 --- a/examples/contrib/run_swag.py +++ b/examples/contrib/run_swag.py @@ -32,8 +32,14 @@ from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange import transformers -from transformers import WEIGHTS_NAME, AdamW, AutoConfig, AutoTokenizer, get_linear_schedule_with_warmup -from transformers.modeling_auto import AutoModelForMultipleChoice +from transformers import ( + WEIGHTS_NAME, + AdamW, + AutoConfig, + AutoModelForMultipleChoice, + AutoTokenizer, + get_linear_schedule_with_warmup, +) from transformers.trainer_utils import is_main_process diff --git a/examples/deebert/src/modeling_highway_bert.py b/examples/deebert/src/modeling_highway_bert.py index 5635fbee5f3..37d81248ed4 100644 --- a/examples/deebert/src/modeling_highway_bert.py +++ b/examples/deebert/src/modeling_highway_bert.py @@ -3,7 +3,7 @@ from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward -from transformers.modeling_bert import ( +from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, diff --git a/examples/deebert/src/modeling_highway_roberta.py b/examples/deebert/src/modeling_highway_roberta.py index 643da941e2e..7534026595c 100644 --- a/examples/deebert/src/modeling_highway_roberta.py +++ b/examples/deebert/src/modeling_highway_roberta.py @@ -3,9 +3,13 @@ from __future__ import absolute_import, division, print_function, unicode_litera import torch.nn as nn from torch.nn import CrossEntropyLoss, MSELoss -from transformers.configuration_roberta import RobertaConfig +from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward -from transformers.modeling_roberta import ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings +from transformers.models.roberta.modeling_roberta import ( + ROBERTA_INPUTS_DOCSTRING, + ROBERTA_START_DOCSTRING, + RobertaEmbeddings, +) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy diff --git a/examples/movement-pruning/emmental/modeling_bert_masked.py b/examples/movement-pruning/emmental/modeling_bert_masked.py index c4f5a422a24..c686d39e344 100644 --- a/examples/movement-pruning/emmental/modeling_bert_masked.py +++ b/examples/movement-pruning/emmental/modeling_bert_masked.py @@ -16,7 +16,7 @@ """Masked Version of BERT. It replaces the `torch.nn.Linear` layers with :class:`~emmental.MaskedLinear` and add an additional parameters in the forward pass to compute the adaptive mask. -Built on top of `transformers.modeling_bert`""" +Built on top of `transformers.models.bert.modeling_bert`""" import logging @@ -29,8 +29,8 @@ from torch.nn import CrossEntropyLoss, MSELoss from emmental import MaskedBertConfig from emmental.modules import MaskedLinear from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward -from transformers.modeling_bert import ACT2FN, BertLayerNorm, load_tf_weights_in_bert from transformers.modeling_utils import PreTrainedModel, prune_linear_layer +from transformers.models.bert.modeling_bert import ACT2FN, BertLayerNorm, load_tf_weights_in_bert logger = logging.getLogger(__name__) diff --git a/examples/rag/distributed_retriever.py b/examples/rag/distributed_retriever.py index 738ebda99eb..a931f183aa2 100644 --- a/examples/rag/distributed_retriever.py +++ b/examples/rag/distributed_retriever.py @@ -27,7 +27,7 @@ class RagPyTorchDistributedRetriever(RagRetriever): It is used to decode the question and then use the generator_tokenizer. generator_tokenizer (:class:`~transformers.PretrainedTokenizer`): The tokenizer used for the generator part of the RagModel. - index (:class:`~transformers.retrieval_rag.Index`, optional, defaults to the one defined by the configuration): + index (:class:`~transformers.models.rag.retrieval_rag.Index`, optional, defaults to the one defined by the configuration): If specified, use this index instead of the one built using the configuration """ diff --git a/examples/rag/test_distributed_retriever.py b/examples/rag/test_distributed_retriever.py index f7d1417a64a..e7a5d9ba91a 100644 --- a/examples/rag/test_distributed_retriever.py +++ b/examples/rag/test_distributed_retriever.py @@ -11,16 +11,12 @@ import numpy as np from datasets import Dataset import faiss -from transformers.configuration_bart import BartConfig -from transformers.configuration_dpr import DPRConfig -from transformers.configuration_rag import RagConfig +from transformers import BartConfig, BartTokenizer, DPRConfig, DPRQuestionEncoderTokenizer, RagConfig from transformers.file_utils import is_datasets_available, is_faiss_available, is_psutil_available, is_torch_available -from transformers.retrieval_rag import CustomHFIndex +from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES +from transformers.models.rag.retrieval_rag import CustomHFIndex +from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_torch_non_multi_gpu_but_fix_me -from transformers.tokenization_bart import BartTokenizer -from transformers.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES -from transformers.tokenization_dpr import DPRQuestionEncoderTokenizer -from transformers.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # noqa: E402 # isort:skip @@ -137,7 +133,7 @@ class RagRetrieverTest(TestCase): question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), ) - with patch("transformers.retrieval_rag.load_dataset") as mock_load_dataset: + with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = dataset retriever = RagPyTorchDistributedRetriever( config, diff --git a/examples/seq2seq/distillation.py b/examples/seq2seq/distillation.py index 58f23345ede..9f2b9b713c4 100755 --- a/examples/seq2seq/distillation.py +++ b/examples/seq2seq/distillation.py @@ -16,7 +16,7 @@ from finetune import SummarizationModule, TranslationModule from finetune import main as ft_main from make_student import create_student_by_copying_alternating_layers, get_layers_to_supervise from transformers import AutoModelForSeq2SeqLM, MBartTokenizer, T5ForConditionalGeneration -from transformers.modeling_bart import shift_tokens_right +from transformers.models.bart.modeling_bart import shift_tokens_right from utils import calculate_bleu, check_output_dir, freeze_params, label_smoothed_nll_loss, use_task_specific_params diff --git a/examples/seq2seq/finetune.py b/examples/seq2seq/finetune.py index 7e57f7ba40f..a095c318b12 100755 --- a/examples/seq2seq/finetune.py +++ b/examples/seq2seq/finetune.py @@ -17,7 +17,7 @@ from torch.utils.data import DataLoader from callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from transformers import MBartTokenizer, T5ForConditionalGeneration -from transformers.modeling_bart import shift_tokens_right +from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeq2SeqDataset, diff --git a/examples/seq2seq/seq2seq_trainer.py b/examples/seq2seq/seq2seq_trainer.py index 805a73871fb..520df0e87b1 100644 --- a/examples/seq2seq/seq2seq_trainer.py +++ b/examples/seq2seq/seq2seq_trainer.py @@ -5,8 +5,8 @@ from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging -from transformers.configuration_fsmt import FSMTConfig from transformers.file_utils import is_torch_tpu_available +from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, diff --git a/examples/seq2seq/test_datasets.py b/examples/seq2seq/test_datasets.py index 407be8dfdbb..61e5d7aa55d 100644 --- a/examples/seq2seq/test_datasets.py +++ b/examples/seq2seq/test_datasets.py @@ -10,7 +10,7 @@ from parameterized import parameterized from save_len_file import save_len_file from test_seq2seq_examples import ARTICLES, BART_TINY, MARIAN_TINY, MBART_TINY, SUMMARIES, T5_TINY, make_test_data_dir from transformers import AutoTokenizer -from transformers.modeling_bart import shift_tokens_right +from transformers.models.bart.modeling_bart import shift_tokens_right from transformers.testing_utils import TestCasePlus, require_torch_non_multi_gpu_but_fix_me, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeq2SeqDataset, Seq2SeqDataset diff --git a/examples/seq2seq/test_tatoeba_conversion.py b/examples/seq2seq/test_tatoeba_conversion.py index 22adc6b2e62..065aed287a0 100644 --- a/examples/seq2seq/test_tatoeba_conversion.py +++ b/examples/seq2seq/test_tatoeba_conversion.py @@ -2,8 +2,8 @@ import os import tempfile import unittest -from transformers.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.file_utils import cached_property +from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import require_torch_non_multi_gpu_but_fix_me, slow diff --git a/examples/seq2seq/utils.py b/examples/seq2seq/utils.py index a96535e4641..b6994a1831d 100644 --- a/examples/seq2seq/utils.py +++ b/examples/seq2seq/utils.py @@ -21,7 +21,7 @@ from torch.utils.data import Dataset, Sampler from sentence_splitter import add_newline_to_end_of_each_sentence from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer from transformers.file_utils import cached_property -from transformers.modeling_bart import shift_tokens_right +from transformers.models.bart.modeling_bart import shift_tokens_right try: diff --git a/examples/text-generation/pplm/run_pplm.py b/examples/text-generation/pplm/run_pplm.py index 55a2a946274..96aee8be068 100644 --- a/examples/text-generation/pplm/run_pplm.py +++ b/examples/text-generation/pplm/run_pplm.py @@ -34,9 +34,8 @@ import torch.nn.functional as F from tqdm import trange from pplm_classification_head import ClassificationHead -from transformers import GPT2Tokenizer +from transformers import GPT2LMHeadModel, GPT2Tokenizer from transformers.file_utils import cached_path -from transformers.modeling_gpt2 import GPT2LMHeadModel PPLM_BOW = 1 diff --git a/model_cards/allenai/wmt16-en-de-12-1/README.md b/model_cards/allenai/wmt16-en-de-12-1/README.md index 848c2627e4b..4f896ea9d2e 100644 --- a/model_cards/allenai/wmt16-en-de-12-1/README.md +++ b/model_cards/allenai/wmt16-en-de-12-1/README.md @@ -35,8 +35,7 @@ All 3 models are available: #### How to use ```python -from transformers.tokenization_fsmt import FSMTTokenizer -from transformers.modeling_fsmt import FSMTForConditionalGeneration +from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/wmt16-en-de-12-1" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) diff --git a/model_cards/allenai/wmt16-en-de-dist-12-1/README.md b/model_cards/allenai/wmt16-en-de-dist-12-1/README.md index ea235789ca4..16c7900387c 100644 --- a/model_cards/allenai/wmt16-en-de-dist-12-1/README.md +++ b/model_cards/allenai/wmt16-en-de-dist-12-1/README.md @@ -35,8 +35,7 @@ All 3 models are available: #### How to use ```python -from transformers.tokenization_fsmt import FSMTTokenizer -from transformers.modeling_fsmt import FSMTForConditionalGeneration +from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/wmt16-en-de-dist-12-1" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) diff --git a/model_cards/allenai/wmt16-en-de-dist-6-1/README.md b/model_cards/allenai/wmt16-en-de-dist-6-1/README.md index a9c1459d188..426231dd81f 100644 --- a/model_cards/allenai/wmt16-en-de-dist-6-1/README.md +++ b/model_cards/allenai/wmt16-en-de-dist-6-1/README.md @@ -35,8 +35,7 @@ All 3 models are available: #### How to use ```python -from transformers.tokenization_fsmt import FSMTTokenizer -from transformers.modeling_fsmt import FSMTForConditionalGeneration +from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/wmt16-en-de-dist-6-1" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) diff --git a/model_cards/allenai/wmt19-de-en-6-6-base/README.md b/model_cards/allenai/wmt19-de-en-6-6-base/README.md index c946ad9f2f1..e5339bbc8a3 100644 --- a/model_cards/allenai/wmt19-de-en-6-6-base/README.md +++ b/model_cards/allenai/wmt19-de-en-6-6-base/README.md @@ -35,8 +35,7 @@ For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the S #### How to use ```python -from transformers.tokenization_fsmt import FSMTTokenizer -from transformers.modeling_fsmt import FSMTForConditionalGeneration +from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/wmt19-de-en-6-6-base" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) diff --git a/model_cards/allenai/wmt19-de-en-6-6-big/README.md b/model_cards/allenai/wmt19-de-en-6-6-big/README.md index f675f899a16..f348a772d03 100644 --- a/model_cards/allenai/wmt19-de-en-6-6-big/README.md +++ b/model_cards/allenai/wmt19-de-en-6-6-big/README.md @@ -35,8 +35,7 @@ For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the S #### How to use ```python -from transformers.tokenization_fsmt import FSMTTokenizer -from transformers.modeling_fsmt import FSMTForConditionalGeneration +from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/wmt19-de-en-6-6-big" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) diff --git a/model_cards/deepset/electra-base-squad2/README.md b/model_cards/deepset/electra-base-squad2/README.md index 99e654e85a3..06cfb52b111 100644 --- a/model_cards/deepset/electra-base-squad2/README.md +++ b/model_cards/deepset/electra-base-squad2/README.md @@ -47,9 +47,7 @@ Evaluated on the SQuAD 2.0 dev set with the [official eval script](https://works ### In Transformers ```python -from transformers.pipelines import pipeline -from transformers.modeling_auto import AutoModelForQuestionAnswering -from transformers.tokenization_auto import AutoTokenizer +from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline model_name = "deepset/electra-base-squad2" diff --git a/model_cards/deepset/minilm-uncased-squad2/README.md b/model_cards/deepset/minilm-uncased-squad2/README.md index ad5b46a8bfe..4c6604f7f68 100644 --- a/model_cards/deepset/minilm-uncased-squad2/README.md +++ b/model_cards/deepset/minilm-uncased-squad2/README.md @@ -48,9 +48,7 @@ Evaluated on the SQuAD 2.0 dev set with the [official eval script](https://works ### In Transformers ```python -from transformers.pipelines import pipeline -from transformers.modeling_auto import AutoModelForQuestionAnswering -from transformers.tokenization_auto import AutoTokenizer +from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline model_name = "deepset/minilm-uncased-squad2" diff --git a/model_cards/deepset/roberta-base-squad2-covid/README.md b/model_cards/deepset/roberta-base-squad2-covid/README.md index b34e4a0584d..970d15835bf 100644 --- a/model_cards/deepset/roberta-base-squad2-covid/README.md +++ b/model_cards/deepset/roberta-base-squad2-covid/README.md @@ -39,9 +39,8 @@ This model is the model obtained from the **third** fold of the cross-validation ### In Transformers ```python -from transformers.pipelines import pipeline -from transformers.modeling_auto import AutoModelForQuestionAnswering -from transformers.tokenization_auto import AutoTokenizer +from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline + model_name = "deepset/roberta-base-squad2-covid" diff --git a/model_cards/deepset/roberta-base-squad2-v2/README.md b/model_cards/deepset/roberta-base-squad2-v2/README.md index ea2955ac19a..7cbf6b88787 100644 --- a/model_cards/deepset/roberta-base-squad2-v2/README.md +++ b/model_cards/deepset/roberta-base-squad2-v2/README.md @@ -48,9 +48,7 @@ Evaluated on the SQuAD 2.0 dev set with the [official eval script](https://works ### In Transformers ```python -from transformers.pipelines import pipeline -from transformers.modeling_auto import AutoModelForQuestionAnswering -from transformers.tokenization_auto import AutoTokenizer +from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline model_name = "deepset/roberta-base-squad2-v2" diff --git a/model_cards/deepset/roberta-base-squad2/README.md b/model_cards/deepset/roberta-base-squad2/README.md index 48bc69ff1d4..9e443b06639 100644 --- a/model_cards/deepset/roberta-base-squad2/README.md +++ b/model_cards/deepset/roberta-base-squad2/README.md @@ -54,9 +54,7 @@ Evaluated on the SQuAD 2.0 dev set with the [official eval script](https://works ### In Transformers ```python -from transformers.pipelines import pipeline -from transformers.modeling_auto import AutoModelForQuestionAnswering -from transformers.tokenization_auto import AutoTokenizer +from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline model_name = "deepset/roberta-base-squad2" diff --git a/model_cards/deepset/xlm-roberta-large-squad2/README.md b/model_cards/deepset/xlm-roberta-large-squad2/README.md index db75ef4b587..8fcf54b7e63 100644 --- a/model_cards/deepset/xlm-roberta-large-squad2/README.md +++ b/model_cards/deepset/xlm-roberta-large-squad2/README.md @@ -63,9 +63,7 @@ Evaluated on German [XQuAD: xquad.de.json](https://github.com/deepmind/xquad) ### In Transformers ```python -from transformers.pipelines import pipeline -from transformers.modeling_auto import AutoModelForQuestionAnswering -from transformers.tokenization_auto import AutoTokenizer +from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline model_name = "deepset/xlm-roberta-large-squad2" diff --git a/model_cards/facebook/wmt19-de-en/README.md b/model_cards/facebook/wmt19-de-en/README.md index 2e0649aa219..924097c7524 100644 --- a/model_cards/facebook/wmt19-de-en/README.md +++ b/model_cards/facebook/wmt19-de-en/README.md @@ -36,8 +36,7 @@ All four models are available: #### How to use ```python -from transformers.tokenization_fsmt import FSMTTokenizer -from transformers.modeling_fsmt import FSMTForConditionalGeneration +from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-de-en" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) diff --git a/model_cards/facebook/wmt19-en-de/README.md b/model_cards/facebook/wmt19-en-de/README.md index 208835535b3..b4bacbbf8cc 100644 --- a/model_cards/facebook/wmt19-en-de/README.md +++ b/model_cards/facebook/wmt19-en-de/README.md @@ -36,8 +36,7 @@ All four models are available: #### How to use ```python -from transformers.tokenization_fsmt import FSMTTokenizer -from transformers.modeling_fsmt import FSMTForConditionalGeneration +from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-en-de" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) diff --git a/model_cards/facebook/wmt19-en-ru/README.md b/model_cards/facebook/wmt19-en-ru/README.md index b69af459031..aa55e7a3085 100644 --- a/model_cards/facebook/wmt19-en-ru/README.md +++ b/model_cards/facebook/wmt19-en-ru/README.md @@ -36,8 +36,7 @@ All four models are available: #### How to use ```python -from transformers.tokenization_fsmt import FSMTTokenizer -from transformers.modeling_fsmt import FSMTForConditionalGeneration +from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-en-ru" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) diff --git a/model_cards/facebook/wmt19-ru-en/README.md b/model_cards/facebook/wmt19-ru-en/README.md index 1e1cc5f1473..627aced50f0 100644 --- a/model_cards/facebook/wmt19-ru-en/README.md +++ b/model_cards/facebook/wmt19-ru-en/README.md @@ -36,8 +36,7 @@ All four models are available: #### How to use ```python -from transformers.tokenization_fsmt import FSMTTokenizer -from transformers.modeling_fsmt import FSMTForConditionalGeneration +from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-ru-en" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) diff --git a/model_cards/julien-c/bert-xsmall-dummy/README.md b/model_cards/julien-c/bert-xsmall-dummy/README.md index 36eef623272..7179f9e6588 100644 --- a/model_cards/julien-c/bert-xsmall-dummy/README.md +++ b/model_cards/julien-c/bert-xsmall-dummy/README.md @@ -2,11 +2,7 @@ ```python -from transformers.configuration_bert import BertConfig -from transformers.modeling_bert import BertForMaskedLM -from transformers.modeling_tf_bert import TFBertForMaskedLM -from transformers.tokenization_bert import BertTokenizer - +from transformers BertConfig, BertForMaskedLM, BertTokenizer, TFBertForMaskedLM SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy" DIRNAME = "./bert-xsmall-dummy" diff --git a/scripts/fsmt/gen-card-allenai-wmt16.py b/scripts/fsmt/gen-card-allenai-wmt16.py index c74c08f2dc0..aa106fe2c3f 100755 --- a/scripts/fsmt/gen-card-allenai-wmt16.py +++ b/scripts/fsmt/gen-card-allenai-wmt16.py @@ -60,8 +60,7 @@ All 3 models are available: #### How to use ```python -from transformers.tokenization_fsmt import FSMTTokenizer -from transformers.modeling_fsmt import FSMTForConditionalGeneration +from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) diff --git a/scripts/fsmt/gen-card-allenai-wmt19.py b/scripts/fsmt/gen-card-allenai-wmt19.py index 4df5ca0542e..fd24f303bb0 100755 --- a/scripts/fsmt/gen-card-allenai-wmt19.py +++ b/scripts/fsmt/gen-card-allenai-wmt19.py @@ -59,8 +59,7 @@ For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the S #### How to use ```python -from transformers.tokenization_fsmt import FSMTTokenizer -from transformers.modeling_fsmt import FSMTForConditionalGeneration +from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) diff --git a/scripts/fsmt/gen-card-facebook-wmt19.py b/scripts/fsmt/gen-card-facebook-wmt19.py index 8a7e937af85..eb8507f56d6 100755 --- a/scripts/fsmt/gen-card-facebook-wmt19.py +++ b/scripts/fsmt/gen-card-facebook-wmt19.py @@ -63,8 +63,7 @@ All four models are available: #### How to use ```python -from transformers.tokenization_fsmt import FSMTTokenizer -from transformers.modeling_fsmt import FSMTForConditionalGeneration +from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index d964ed6b5d4..ee5e6806113 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -27,46 +27,10 @@ from .integrations import ( # isort:skip is_wandb_available, ) -# Configurations -from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig -from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, AutoConfig -from .configuration_bart import BartConfig -from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig -from .configuration_bert_generation import BertGenerationConfig -from .configuration_blenderbot import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig -from .configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig -from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig -from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig -from .configuration_distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig -from .configuration_dpr import DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DPRConfig -from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig -from .configuration_encoder_decoder import EncoderDecoderConfig -from .configuration_flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig -from .configuration_fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig -from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig -from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config -from .configuration_layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig -from .configuration_longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig -from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig -from .configuration_marian import MarianConfig -from .configuration_mbart import MBartConfig -from .configuration_mmbt import MMBTConfig -from .configuration_mobilebert import MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig -from .configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig -from .configuration_pegasus import PegasusConfig -from .configuration_prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig -from .configuration_rag import RagConfig -from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig -from .configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig -from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig -from .configuration_squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig -from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config -from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig +# Configuration from .configuration_utils import PretrainedConfig -from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig -from .configuration_xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig -from .configuration_xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig -from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig + +# Data from .data import ( DataProcessor, InputExample, @@ -130,6 +94,77 @@ from .modeling_tf_pytorch_utils import ( load_tf2_model_in_pytorch_model, load_tf2_weights_in_pytorch_model, ) +from .models.albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig +from .models.auto import ( + ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, + CONFIG_MAPPING, + TOKENIZER_MAPPING, + AutoConfig, + AutoTokenizer, +) +from .models.bart import BartConfig, BartTokenizer +from .models.bert import ( + BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, + BasicTokenizer, + BertConfig, + BertTokenizer, + WordpieceTokenizer, +) +from .models.bert_generation import BertGenerationConfig +from .models.bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer +from .models.bertweet import BertweetTokenizer +from .models.blenderbot import ( + BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, + BlenderbotConfig, + BlenderbotSmallTokenizer, + BlenderbotTokenizer, +) +from .models.camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig +from .models.ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig, CTRLTokenizer +from .models.deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaTokenizer +from .models.distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertTokenizer +from .models.dpr import ( + DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, + DPRConfig, + DPRContextEncoderTokenizer, + DPRQuestionEncoderTokenizer, + DPRReaderOutput, + DPRReaderTokenizer, +) +from .models.electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraTokenizer +from .models.encoder_decoder import EncoderDecoderConfig +from .models.flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig, FlaubertTokenizer +from .models.fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig, FSMTTokenizer +from .models.funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig, FunnelTokenizer +from .models.gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2Tokenizer +from .models.herbert import HerbertTokenizer +from .models.layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig, LayoutLMTokenizer +from .models.longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerTokenizer +from .models.lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig, LxmertTokenizer +from .models.marian import MarianConfig +from .models.mbart import MBartConfig +from .models.mmbt import MMBTConfig +from .models.mobilebert import MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertTokenizer +from .models.openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig, OpenAIGPTTokenizer +from .models.pegasus import PegasusConfig +from .models.phobert import PhobertTokenizer +from .models.prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig, ProphetNetTokenizer +from .models.rag import RagConfig, RagRetriever, RagTokenizer +from .models.reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig +from .models.retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig, RetriBertTokenizer +from .models.roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaTokenizer +from .models.squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertTokenizer +from .models.t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config +from .models.transfo_xl import ( + TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, + TransfoXLConfig, + TransfoXLCorpus, + TransfoXLTokenizer, +) +from .models.xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMTokenizer +from .models.xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig +from .models.xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig +from .models.xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig # Pipelines from .pipelines import ( @@ -154,43 +189,7 @@ from .pipelines import ( pipeline, ) -# Retriever -from .retrieval_rag import RagRetriever - -# Tokenizers -from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer -from .tokenization_bart import BartTokenizer -from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer -from .tokenization_bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer -from .tokenization_bertweet import BertweetTokenizer -from .tokenization_blenderbot import BlenderbotSmallTokenizer, BlenderbotTokenizer -from .tokenization_ctrl import CTRLTokenizer -from .tokenization_deberta import DebertaTokenizer -from .tokenization_distilbert import DistilBertTokenizer -from .tokenization_dpr import ( - DPRContextEncoderTokenizer, - DPRQuestionEncoderTokenizer, - DPRReaderOutput, - DPRReaderTokenizer, -) -from .tokenization_electra import ElectraTokenizer -from .tokenization_flaubert import FlaubertTokenizer -from .tokenization_fsmt import FSMTTokenizer -from .tokenization_funnel import FunnelTokenizer -from .tokenization_gpt2 import GPT2Tokenizer -from .tokenization_herbert import HerbertTokenizer -from .tokenization_layoutlm import LayoutLMTokenizer -from .tokenization_longformer import LongformerTokenizer -from .tokenization_lxmert import LxmertTokenizer -from .tokenization_mobilebert import MobileBertTokenizer -from .tokenization_openai import OpenAIGPTTokenizer -from .tokenization_phobert import PhobertTokenizer -from .tokenization_prophetnet import ProphetNetTokenizer -from .tokenization_rag import RagTokenizer -from .tokenization_retribert import RetriBertTokenizer -from .tokenization_roberta import RobertaTokenizer -from .tokenization_squeezebert import SqueezeBertTokenizer -from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer +# Tokenization from .tokenization_utils import PreTrainedTokenizer from .tokenization_utils_base import ( AddedToken, @@ -201,54 +200,49 @@ from .tokenization_utils_base import ( TensorType, TokenSpan, ) -from .tokenization_xlm import XLMTokenizer if is_sentencepiece_available(): - from .tokenization_albert import AlbertTokenizer - from .tokenization_bert_generation import BertGenerationTokenizer - from .tokenization_camembert import CamembertTokenizer - from .tokenization_marian import MarianTokenizer - from .tokenization_mbart import MBartTokenizer - from .tokenization_pegasus import PegasusTokenizer - from .tokenization_reformer import ReformerTokenizer - from .tokenization_t5 import T5Tokenizer - from .tokenization_xlm_prophetnet import XLMProphetNetTokenizer - from .tokenization_xlm_roberta import XLMRobertaTokenizer - from .tokenization_xlnet import XLNetTokenizer + from .models.albert import AlbertTokenizer + from .models.bert_generation import BertGenerationTokenizer + from .models.camembert import CamembertTokenizer + from .models.marian import MarianTokenizer + from .models.mbart import MBartTokenizer + from .models.pegasus import PegasusTokenizer + from .models.reformer import ReformerTokenizer + from .models.t5 import T5Tokenizer + from .models.xlm_prophetnet import XLMProphetNetTokenizer + from .models.xlm_roberta import XLMRobertaTokenizer + from .models.xlnet import XLNetTokenizer else: from .utils.dummy_sentencepiece_objects import * if is_tokenizers_available(): - from .tokenization_albert_fast import AlbertTokenizerFast - from .tokenization_bart_fast import BartTokenizerFast - from .tokenization_bert_fast import BertTokenizerFast - from .tokenization_camembert_fast import CamembertTokenizerFast - from .tokenization_distilbert_fast import DistilBertTokenizerFast - from .tokenization_dpr_fast import ( - DPRContextEncoderTokenizerFast, - DPRQuestionEncoderTokenizerFast, - DPRReaderTokenizerFast, - ) - from .tokenization_electra_fast import ElectraTokenizerFast - from .tokenization_funnel_fast import FunnelTokenizerFast - from .tokenization_gpt2_fast import GPT2TokenizerFast - from .tokenization_herbert_fast import HerbertTokenizerFast - from .tokenization_layoutlm_fast import LayoutLMTokenizerFast - from .tokenization_longformer_fast import LongformerTokenizerFast - from .tokenization_lxmert_fast import LxmertTokenizerFast - from .tokenization_mbart_fast import MBartTokenizerFast - from .tokenization_mobilebert_fast import MobileBertTokenizerFast - from .tokenization_openai_fast import OpenAIGPTTokenizerFast - from .tokenization_pegasus_fast import PegasusTokenizerFast - from .tokenization_reformer_fast import ReformerTokenizerFast - from .tokenization_retribert_fast import RetriBertTokenizerFast - from .tokenization_roberta_fast import RobertaTokenizerFast - from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast - from .tokenization_t5_fast import T5TokenizerFast + from .models.albert import AlbertTokenizerFast + from .models.bart import BartTokenizerFast + from .models.bert import BertTokenizerFast + from .models.camembert import CamembertTokenizerFast + from .models.distilbert import DistilBertTokenizerFast + from .models.dpr import DPRContextEncoderTokenizerFast, DPRQuestionEncoderTokenizerFast, DPRReaderTokenizerFast + from .models.electra import ElectraTokenizerFast + from .models.funnel import FunnelTokenizerFast + from .models.gpt2 import GPT2TokenizerFast + from .models.herbert import HerbertTokenizerFast + from .models.layoutlm import LayoutLMTokenizerFast + from .models.longformer import LongformerTokenizerFast + from .models.lxmert import LxmertTokenizerFast + from .models.mbart import MBartTokenizerFast + from .models.mobilebert import MobileBertTokenizerFast + from .models.openai import OpenAIGPTTokenizerFast + from .models.pegasus import PegasusTokenizerFast + from .models.reformer import ReformerTokenizerFast + from .models.retribert import RetriBertTokenizerFast + from .models.roberta import RobertaTokenizerFast + from .models.squeezebert import SqueezeBertTokenizerFast + from .models.t5 import T5TokenizerFast + from .models.xlm_roberta import XLMRobertaTokenizerFast + from .models.xlnet import XLNetTokenizerFast from .tokenization_utils_fast import PreTrainedTokenizerFast - from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast - from .tokenization_xlnet_fast import XLNetTokenizerFast if is_sentencepiece_available(): from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, convert_slow_tokenizer @@ -313,7 +307,8 @@ if is_torch_available(): TopPLogitsWarper, ) from .generation_utils import top_k_top_p_filtering - from .modeling_albert import ( + from .modeling_utils import Conv1D, PreTrainedModel, apply_chunking_to_forward, prune_layer + from .models.albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, @@ -325,7 +320,7 @@ if is_torch_available(): AlbertPreTrainedModel, load_tf_weights_in_albert, ) - from .modeling_auto import ( + from .models.auto import ( MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_MULTIPLE_CHOICE_MAPPING, @@ -349,7 +344,7 @@ if is_torch_available(): AutoModelForTokenClassification, AutoModelWithLMHead, ) - from .modeling_bart import ( + from .models.bart import ( BART_PRETRAINED_MODEL_ARCHIVE_LIST, BartForConditionalGeneration, BartForQuestionAnswering, @@ -357,7 +352,7 @@ if is_torch_available(): BartModel, PretrainedBartModel, ) - from .modeling_bert import ( + from .models.bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, @@ -372,13 +367,13 @@ if is_torch_available(): BertPreTrainedModel, load_tf_weights_in_bert, ) - from .modeling_bert_generation import ( + from .models.bert_generation import ( BertGenerationDecoder, BertGenerationEncoder, load_tf_weights_in_bert_generation, ) - from .modeling_blenderbot import BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForConditionalGeneration - from .modeling_camembert import ( + from .models.blenderbot import BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForConditionalGeneration + from .models.camembert import ( CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, CamembertForCausalLM, CamembertForMaskedLM, @@ -388,14 +383,14 @@ if is_torch_available(): CamembertForTokenClassification, CamembertModel, ) - from .modeling_ctrl import CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel - from .modeling_deberta import ( + from .models.ctrl import CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel + from .models.deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForSequenceClassification, DebertaModel, DebertaPreTrainedModel, ) - from .modeling_distilbert import ( + from .models.distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, @@ -405,7 +400,7 @@ if is_torch_available(): DistilBertModel, DistilBertPreTrainedModel, ) - from .modeling_dpr import ( + from .models.dpr import ( DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, @@ -416,7 +411,7 @@ if is_torch_available(): DPRQuestionEncoder, DPRReader, ) - from .modeling_electra import ( + from .models.electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForMaskedLM, ElectraForMultipleChoice, @@ -428,8 +423,8 @@ if is_torch_available(): ElectraPreTrainedModel, load_tf_weights_in_electra, ) - from .modeling_encoder_decoder import EncoderDecoderModel - from .modeling_flaubert import ( + from .models.encoder_decoder import EncoderDecoderModel + from .models.flaubert import ( FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertForMultipleChoice, FlaubertForQuestionAnswering, @@ -439,8 +434,8 @@ if is_torch_available(): FlaubertModel, FlaubertWithLMHeadModel, ) - from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel - from .modeling_funnel import ( + from .models.fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel + from .models.funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, @@ -452,7 +447,7 @@ if is_torch_available(): FunnelModel, load_tf_weights_in_funnel, ) - from .modeling_gpt2 import ( + from .models.gpt2 import ( GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, GPT2DoubleHeadsModel, GPT2ForSequenceClassification, @@ -461,13 +456,13 @@ if is_torch_available(): GPT2PreTrainedModel, load_tf_weights_in_gpt2, ) - from .modeling_layoutlm import ( + from .models.layoutlm import ( LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMForMaskedLM, LayoutLMForTokenClassification, LayoutLMModel, ) - from .modeling_longformer import ( + from .models.longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, @@ -477,7 +472,7 @@ if is_torch_available(): LongformerModel, LongformerSelfAttention, ) - from .modeling_lxmert import ( + from .models.lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, @@ -486,10 +481,10 @@ if is_torch_available(): LxmertVisualFeatureEncoder, LxmertXLayer, ) - from .modeling_marian import MarianMTModel - from .modeling_mbart import MBartForConditionalGeneration - from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings - from .modeling_mobilebert import ( + from .models.marian import MarianMTModel + from .models.mbart import MBartForConditionalGeneration + from .models.mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings + from .models.mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, @@ -503,7 +498,7 @@ if is_torch_available(): MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) - from .modeling_openai import ( + from .models.openai import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, @@ -512,8 +507,8 @@ if is_torch_available(): OpenAIGPTPreTrainedModel, load_tf_weights_in_openai_gpt, ) - from .modeling_pegasus import PegasusForConditionalGeneration - from .modeling_prophetnet import ( + from .models.pegasus import PegasusForConditionalGeneration + from .models.prophetnet import ( PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, ProphetNetDecoder, ProphetNetEncoder, @@ -522,8 +517,8 @@ if is_torch_available(): ProphetNetModel, ProphetNetPreTrainedModel, ) - from .modeling_rag import RagModel, RagSequenceForGeneration, RagTokenForGeneration - from .modeling_reformer import ( + from .models.rag import RagModel, RagSequenceForGeneration, RagTokenForGeneration + from .models.reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, @@ -533,8 +528,8 @@ if is_torch_available(): ReformerModel, ReformerModelWithLMHead, ) - from .modeling_retribert import RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RetriBertModel, RetriBertPreTrainedModel - from .modeling_roberta import ( + from .models.retribert import RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RetriBertModel, RetriBertPreTrainedModel + from .models.roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, @@ -544,7 +539,7 @@ if is_torch_available(): RobertaForTokenClassification, RobertaModel, ) - from .modeling_squeezebert import ( + from .models.squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, @@ -555,14 +550,14 @@ if is_torch_available(): SqueezeBertModule, SqueezeBertPreTrainedModel, ) - from .modeling_t5 import ( + from .models.t5 import ( T5_PRETRAINED_MODEL_ARCHIVE_LIST, T5ForConditionalGeneration, T5Model, T5PreTrainedModel, load_tf_weights_in_t5, ) - from .modeling_transfo_xl import ( + from .models.transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLLMHeadModel, @@ -570,8 +565,7 @@ if is_torch_available(): TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) - from .modeling_utils import Conv1D, PreTrainedModel, apply_chunking_to_forward, prune_layer - from .modeling_xlm import ( + from .models.xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, @@ -582,7 +576,7 @@ if is_torch_available(): XLMPreTrainedModel, XLMWithLMHeadModel, ) - from .modeling_xlm_prophetnet import ( + from .models.xlm_prophetnet import ( XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLMProphetNetDecoder, XLMProphetNetEncoder, @@ -590,7 +584,7 @@ if is_torch_available(): XLMProphetNetForConditionalGeneration, XLMProphetNetModel, ) - from .modeling_xlm_roberta import ( + from .models.xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, @@ -600,7 +594,7 @@ if is_torch_available(): XLMRobertaForTokenClassification, XLMRobertaModel, ) - from .modeling_xlnet import ( + from .models.xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, @@ -638,7 +632,8 @@ if is_tf_available(): # Benchmarks from .benchmark.benchmark_tf import TensorFlowBenchmark from .generation_tf_utils import tf_top_k_top_p_filtering - from .modeling_tf_albert import ( + from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, shape_list + from .models.albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, @@ -650,7 +645,7 @@ if is_tf_available(): TFAlbertModel, TFAlbertPreTrainedModel, ) - from .modeling_tf_auto import ( + from .models.auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, @@ -673,8 +668,8 @@ if is_tf_available(): TFAutoModelForTokenClassification, TFAutoModelWithLMHead, ) - from .modeling_tf_bart import TFBartForConditionalGeneration, TFBartModel - from .modeling_tf_bert import ( + from .models.bart import TFBartForConditionalGeneration, TFBartModel + from .models.bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, @@ -689,8 +684,8 @@ if is_tf_available(): TFBertModel, TFBertPreTrainedModel, ) - from .modeling_tf_blenderbot import TFBlenderbotForConditionalGeneration - from .modeling_tf_camembert import ( + from .models.blenderbot import TFBlenderbotForConditionalGeneration + from .models.camembert import ( TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFCamembertForMaskedLM, TFCamembertForMultipleChoice, @@ -699,13 +694,13 @@ if is_tf_available(): TFCamembertForTokenClassification, TFCamembertModel, ) - from .modeling_tf_ctrl import ( + from .models.ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) - from .modeling_tf_distilbert import ( + from .models.distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, @@ -716,7 +711,7 @@ if is_tf_available(): TFDistilBertModel, TFDistilBertPreTrainedModel, ) - from .modeling_tf_dpr import ( + from .models.dpr import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, @@ -727,7 +722,7 @@ if is_tf_available(): TFDPRQuestionEncoder, TFDPRReader, ) - from .modeling_tf_electra import ( + from .models.electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, @@ -738,7 +733,7 @@ if is_tf_available(): TFElectraModel, TFElectraPreTrainedModel, ) - from .modeling_tf_flaubert import ( + from .models.flaubert import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, @@ -747,7 +742,7 @@ if is_tf_available(): TFFlaubertModel, TFFlaubertWithLMHeadModel, ) - from .modeling_tf_funnel import ( + from .models.funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, @@ -758,7 +753,7 @@ if is_tf_available(): TFFunnelForTokenClassification, TFFunnelModel, ) - from .modeling_tf_gpt2 import ( + from .models.gpt2 import ( TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, TFGPT2DoubleHeadsModel, TFGPT2LMHeadModel, @@ -766,14 +761,14 @@ if is_tf_available(): TFGPT2Model, TFGPT2PreTrainedModel, ) - from .modeling_tf_longformer import ( + from .models.longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForQuestionAnswering, TFLongformerModel, TFLongformerSelfAttention, ) - from .modeling_tf_lxmert import ( + from .models.lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, @@ -781,9 +776,9 @@ if is_tf_available(): TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) - from .modeling_tf_marian import TFMarianMTModel - from .modeling_tf_mbart import TFMBartForConditionalGeneration - from .modeling_tf_mobilebert import ( + from .models.marian import TFMarianMTModel + from .models.mbart import TFMBartForConditionalGeneration + from .models.mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, @@ -796,7 +791,7 @@ if is_tf_available(): TFMobileBertModel, TFMobileBertPreTrainedModel, ) - from .modeling_tf_openai import ( + from .models.openai import ( TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTLMHeadModel, @@ -804,8 +799,8 @@ if is_tf_available(): TFOpenAIGPTModel, TFOpenAIGPTPreTrainedModel, ) - from .modeling_tf_pegasus import TFPegasusForConditionalGeneration - from .modeling_tf_roberta import ( + from .models.pegasus import TFPegasusForConditionalGeneration + from .models.roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, @@ -816,13 +811,13 @@ if is_tf_available(): TFRobertaModel, TFRobertaPreTrainedModel, ) - from .modeling_tf_t5 import ( + from .models.t5 import ( TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST, TFT5ForConditionalGeneration, TFT5Model, TFT5PreTrainedModel, ) - from .modeling_tf_transfo_xl import ( + from .models.transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLLMHeadModel, @@ -830,8 +825,7 @@ if is_tf_available(): TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) - from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, shape_list - from .modeling_tf_xlm import ( + from .models.xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, @@ -842,7 +836,7 @@ if is_tf_available(): TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) - from .modeling_tf_xlm_roberta import ( + from .models.xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, @@ -851,7 +845,7 @@ if is_tf_available(): TFXLMRobertaForTokenClassification, TFXLMRobertaModel, ) - from .modeling_tf_xlnet import ( + from .models.xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, @@ -876,8 +870,8 @@ else: if is_flax_available(): - from .modeling_flax_bert import FlaxBertModel - from .modeling_flax_roberta import FlaxRobertaModel + from .models.bert import FlaxBertModel + from .models.roberta import FlaxRobertaModel else: # Import the same objects as dummies to get them in the namespace. # They will raise an import error if the user tries to instantiate / use them. diff --git a/src/transformers/benchmark/benchmark.py b/src/transformers/benchmark/benchmark.py index 8ad36e1d8b0..d9b17870f96 100644 --- a/src/transformers/benchmark/benchmark.py +++ b/src/transformers/benchmark/benchmark.py @@ -23,7 +23,7 @@ from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..file_utils import is_py3nvml_available, is_torch_available -from ..modeling_auto import MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING +from ..models.auto.modeling_auto import MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING from ..utils import logging from .benchmark_utils import ( Benchmark, diff --git a/src/transformers/benchmark/benchmark_tf.py b/src/transformers/benchmark/benchmark_tf.py index b82850b3aa5..030c0d22157 100644 --- a/src/transformers/benchmark/benchmark_tf.py +++ b/src/transformers/benchmark/benchmark_tf.py @@ -25,7 +25,7 @@ from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..file_utils import is_py3nvml_available, is_tf_available -from ..modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING +from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import logging from .benchmark_utils import ( Benchmark, diff --git a/src/transformers/benchmark/benchmark_utils.py b/src/transformers/benchmark/benchmark_utils.py index 7a9f538eeb5..d5c6d04483e 100644 --- a/src/transformers/benchmark/benchmark_utils.py +++ b/src/transformers/benchmark/benchmark_utils.py @@ -327,7 +327,7 @@ def start_memory_tracing( - `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or - 'transformers.modeling_gpt2') + 'transformers.models.gpt2.modeling_gpt2') - `modules_not_to_trace`: (None, string, list/tuple of string) if None, no module is avoided if string or list of strings: events from the listed module/sub-module will not be recorded (e.g. 'torch') - `events_to_trace`: string or list of string of events to be recorded (see official python doc for diff --git a/src/transformers/commands/convert.py b/src/transformers/commands/convert.py index 1e054b6a30e..ccae2899dab 100644 --- a/src/transformers/commands/convert.py +++ b/src/transformers/commands/convert.py @@ -73,7 +73,7 @@ class ConvertCommand(BaseTransformersCLICommand): def run(self): if self._model_type == "albert": try: - from transformers.convert_albert_original_tf_checkpoint_to_pytorch import ( + from transformers.models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: @@ -82,7 +82,7 @@ class ConvertCommand(BaseTransformersCLICommand): convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) elif self._model_type == "bert": try: - from transformers.convert_bert_original_tf_checkpoint_to_pytorch import ( + from transformers.models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: @@ -91,7 +91,7 @@ class ConvertCommand(BaseTransformersCLICommand): convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) elif self._model_type == "funnel": try: - from transformers.convert_funnel_original_tf_checkpoint_to_pytorch import ( + from transformers.models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: @@ -99,14 +99,14 @@ class ConvertCommand(BaseTransformersCLICommand): convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) elif self._model_type == "gpt": - from transformers.convert_openai_original_tf_checkpoint_to_pytorch import ( + from transformers.models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) elif self._model_type == "transfo_xl": try: - from transformers.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( + from transformers.models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: @@ -123,7 +123,7 @@ class ConvertCommand(BaseTransformersCLICommand): ) elif self._model_type == "gpt2": try: - from transformers.convert_gpt2_original_tf_checkpoint_to_pytorch import ( + from transformers.models.gpt2.convert_gpt2_original_tf_checkpoint_to_pytorch import ( convert_gpt2_checkpoint_to_pytorch, ) except ImportError: @@ -132,7 +132,7 @@ class ConvertCommand(BaseTransformersCLICommand): convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) elif self._model_type == "xlnet": try: - from transformers.convert_xlnet_original_tf_checkpoint_to_pytorch import ( + from transformers.models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: @@ -142,13 +142,13 @@ class ConvertCommand(BaseTransformersCLICommand): self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name ) elif self._model_type == "xlm": - from transformers.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( + from transformers.models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output) elif self._model_type == "lxmert": - from transformers.convert_lxmert_original_pytorch_checkpoint_to_pytorch import ( + from transformers.models.lxmert.convert_lxmert_original_pytorch_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) diff --git a/src/transformers/data/datasets/squad.py b/src/transformers/data/datasets/squad.py index 703cd1bc4ed..5c903907a16 100644 --- a/src/transformers/data/datasets/squad.py +++ b/src/transformers/data/datasets/squad.py @@ -9,7 +9,7 @@ from torch.utils.data.dataset import Dataset from filelock import FileLock -from ...modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING +from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features diff --git a/src/transformers/data/metrics/squad_metrics.py b/src/transformers/data/metrics/squad_metrics.py index 137b084eebf..9b775bfa182 100644 --- a/src/transformers/data/metrics/squad_metrics.py +++ b/src/transformers/data/metrics/squad_metrics.py @@ -14,7 +14,7 @@ import math import re import string -from transformers.tokenization_bert import BasicTokenizer +from transformers import BasicTokenizer from ...utils import logging diff --git a/src/transformers/data/processors/squad.py b/src/transformers/data/processors/squad.py index 167cf3ee48d..991759a331e 100644 --- a/src/transformers/data/processors/squad.py +++ b/src/transformers/data/processors/squad.py @@ -7,7 +7,7 @@ import numpy as np from tqdm import tqdm from ...file_utils import is_tf_available, is_torch_available -from ...tokenization_bert import whitespace_tokenize +from ...models.bert.tokenization_bert import whitespace_tokenize from ...tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase, TruncationStrategy from ...utils import logging from .utils import DataProcessor diff --git a/src/transformers/modelcard.py b/src/transformers/modelcard.py index c0a3d105782..2cd3673055b 100644 --- a/src/transformers/modelcard.py +++ b/src/transformers/modelcard.py @@ -19,7 +19,6 @@ import copy import json import os -from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP from .file_utils import ( CONFIG_NAME, MODEL_CARD_NAME, @@ -29,6 +28,7 @@ from .file_utils import ( hf_bucket_url, is_remote_url, ) +from .models.auto.configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP from .utils import logging diff --git a/src/transformers/models/albert/__init__.py b/src/transformers/models/albert/__init__.py new file mode 100644 index 00000000000..481f6bbeb8f --- /dev/null +++ b/src/transformers/models/albert/__init__.py @@ -0,0 +1,41 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig + + +if is_sentencepiece_available(): + from .tokenization_albert import AlbertTokenizer + +if is_tokenizers_available(): + from .tokenization_albert_fast import AlbertTokenizerFast + +if is_torch_available(): + from .modeling_albert import ( + ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + AlbertForMaskedLM, + AlbertForMultipleChoice, + AlbertForPreTraining, + AlbertForQuestionAnswering, + AlbertForSequenceClassification, + AlbertForTokenClassification, + AlbertModel, + AlbertPreTrainedModel, + load_tf_weights_in_albert, + ) + +if is_tf_available(): + from .modeling_tf_albert import ( + TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFAlbertForMaskedLM, + TFAlbertForMultipleChoice, + TFAlbertForPreTraining, + TFAlbertForQuestionAnswering, + TFAlbertForSequenceClassification, + TFAlbertForTokenClassification, + TFAlbertMainLayer, + TFAlbertModel, + TFAlbertPreTrainedModel, + ) diff --git a/src/transformers/configuration_albert.py b/src/transformers/models/albert/configuration_albert.py similarity index 99% rename from src/transformers/configuration_albert.py rename to src/transformers/models/albert/configuration_albert.py index a4a0595a8f0..e83be6b9ccf 100644 --- a/src/transformers/configuration_albert.py +++ b/src/transformers/models/albert/configuration_albert.py @@ -15,7 +15,7 @@ # limitations under the License. """ ALBERT model configuration """ -from .configuration_utils import PretrainedConfig +from ...configuration_utils import PretrainedConfig ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { diff --git a/src/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py similarity index 100% rename from src/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py rename to src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py diff --git a/src/transformers/modeling_albert.py b/src/transformers/models/albert/modeling_albert.py similarity index 99% rename from src/transformers/modeling_albert.py rename to src/transformers/models/albert/modeling_albert.py index d3c3102d2a7..f7b4702e2e7 100755 --- a/src/transformers/modeling_albert.py +++ b/src/transformers/models/albert/modeling_albert.py @@ -24,16 +24,15 @@ import torch import torch.nn as nn from torch.nn import CrossEntropyLoss, MSELoss -from .activations import ACT2FN -from .configuration_albert import AlbertConfig -from .file_utils import ( +from ...activations import ACT2FN +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_outputs import ( +from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, @@ -42,13 +41,14 @@ from .modeling_outputs import ( SequenceClassifierOutput, TokenClassifierOutput, ) -from .modeling_utils import ( +from ...modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) -from .utils import logging +from ...utils import logging +from .configuration_albert import AlbertConfig logger = logging.get_logger(__name__) @@ -216,7 +216,7 @@ class AlbertEmbeddings(nn.Module): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - # Copied from transformers.modeling_bert.BertEmbeddings.forward + # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() @@ -266,7 +266,7 @@ class AlbertAttention(nn.Module): self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pruned_heads = set() - # Copied from transformers.modeling_bert.BertSelfAttention.transpose_for_scores + # Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) diff --git a/src/transformers/modeling_tf_albert.py b/src/transformers/models/albert/modeling_tf_albert.py similarity index 99% rename from src/transformers/modeling_tf_albert.py rename to src/transformers/models/albert/modeling_tf_albert.py index fdec7bcaf22..ccbaab009cc 100644 --- a/src/transformers/modeling_tf_albert.py +++ b/src/transformers/models/albert/modeling_tf_albert.py @@ -21,9 +21,8 @@ from typing import Optional, Tuple import tensorflow as tf -from .activations_tf import get_tf_activation -from .configuration_albert import AlbertConfig -from .file_utils import ( +from ...activations_tf import get_tf_activation +from ...file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, @@ -31,7 +30,7 @@ from .file_utils import ( add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_tf_outputs import ( +from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPooling, TFMaskedLMOutput, @@ -40,7 +39,7 @@ from .modeling_tf_outputs import ( TFSequenceClassifierOutput, TFTokenClassifierOutput, ) -from .modeling_tf_utils import ( +from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFMultipleChoiceLoss, TFPreTrainedModel, @@ -51,8 +50,9 @@ from .modeling_tf_utils import ( keras_serializable, shape_list, ) -from .tokenization_utils import BatchEncoding -from .utils import logging +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from .configuration_albert import AlbertConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_albert.py b/src/transformers/models/albert/tokenization_albert.py similarity index 99% rename from src/transformers/tokenization_albert.py rename to src/transformers/models/albert/tokenization_albert.py index 10d4df0bb86..a9bb75e95f9 100644 --- a/src/transformers/tokenization_albert.py +++ b/src/transformers/models/albert/tokenization_albert.py @@ -22,8 +22,8 @@ from typing import List, Optional, Tuple import sentencepiece as spm -from .tokenization_utils import PreTrainedTokenizer -from .utils import logging +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_albert_fast.py b/src/transformers/models/albert/tokenization_albert_fast.py similarity index 98% rename from src/transformers/tokenization_albert_fast.py rename to src/transformers/models/albert/tokenization_albert_fast.py index ac190e77dd5..f538cc97018 100644 --- a/src/transformers/tokenization_albert_fast.py +++ b/src/transformers/models/albert/tokenization_albert_fast.py @@ -19,9 +19,9 @@ import os from shutil import copyfile from typing import List, Optional, Tuple -from .file_utils import is_sentencepiece_available -from .tokenization_utils_fast import PreTrainedTokenizerFast -from .utils import logging +from ...file_utils import is_sentencepiece_available +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging if is_sentencepiece_available(): diff --git a/src/transformers/models/auto/__init__.py b/src/transformers/models/auto/__init__.py new file mode 100644 index 00000000000..86ab29b8915 --- /dev/null +++ b/src/transformers/models/auto/__init__.py @@ -0,0 +1,59 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_torch_available +from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, AutoConfig +from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer + + +if is_torch_available(): + from .modeling_auto import ( + MODEL_FOR_CAUSAL_LM_MAPPING, + MODEL_FOR_MASKED_LM_MAPPING, + MODEL_FOR_MULTIPLE_CHOICE_MAPPING, + MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, + MODEL_FOR_PRETRAINING_MAPPING, + MODEL_FOR_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + MODEL_MAPPING, + MODEL_WITH_LM_HEAD_MAPPING, + AutoModel, + AutoModelForCausalLM, + AutoModelForMaskedLM, + AutoModelForMultipleChoice, + AutoModelForNextSentencePrediction, + AutoModelForPreTraining, + AutoModelForQuestionAnswering, + AutoModelForSeq2SeqLM, + AutoModelForSequenceClassification, + AutoModelForTokenClassification, + AutoModelWithLMHead, + ) + +if is_tf_available(): + from .modeling_tf_auto import ( + TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_MASKED_LM_MAPPING, + TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, + TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, + TF_MODEL_FOR_PRETRAINING_MAPPING, + TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + TF_MODEL_MAPPING, + TF_MODEL_WITH_LM_HEAD_MAPPING, + TFAutoModel, + TFAutoModelForCausalLM, + TFAutoModelForMaskedLM, + TFAutoModelForMultipleChoice, + TFAutoModelForPreTraining, + TFAutoModelForQuestionAnswering, + TFAutoModelForSeq2SeqLM, + TFAutoModelForSequenceClassification, + TFAutoModelForTokenClassification, + TFAutoModelWithLMHead, + ) diff --git a/src/transformers/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py similarity index 80% rename from src/transformers/configuration_auto.py rename to src/transformers/models/auto/configuration_auto.py index 41f9a1db82c..c8a74439742 100644 --- a/src/transformers/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -17,43 +17,46 @@ import re from collections import OrderedDict -from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig -from .configuration_bart import BART_PRETRAINED_CONFIG_ARCHIVE_MAP, BartConfig -from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig -from .configuration_bert_generation import BertGenerationConfig -from .configuration_blenderbot import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig -from .configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig -from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig -from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig -from .configuration_distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig -from .configuration_dpr import DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DPRConfig -from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig -from .configuration_encoder_decoder import EncoderDecoderConfig -from .configuration_flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig -from .configuration_fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig -from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig -from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config -from .configuration_layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig -from .configuration_longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig -from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig -from .configuration_marian import MarianConfig -from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig -from .configuration_mobilebert import MobileBertConfig -from .configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig -from .configuration_pegasus import PegasusConfig -from .configuration_prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig -from .configuration_rag import RagConfig -from .configuration_reformer import ReformerConfig -from .configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig -from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig -from .configuration_squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig -from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config -from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig -from .configuration_utils import PretrainedConfig -from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig -from .configuration_xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig -from .configuration_xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig -from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig +from ...configuration_utils import PretrainedConfig +from ..albert.configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig +from ..bart.configuration_bart import BART_PRETRAINED_CONFIG_ARCHIVE_MAP, BartConfig +from ..bert.configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig +from ..bert_generation.configuration_bert_generation import BertGenerationConfig +from ..blenderbot.configuration_blenderbot import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig +from ..camembert.configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig +from ..ctrl.configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig +from ..deberta.configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig +from ..distilbert.configuration_distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig +from ..dpr.configuration_dpr import DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DPRConfig +from ..electra.configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig +from ..encoder_decoder.configuration_encoder_decoder import EncoderDecoderConfig +from ..flaubert.configuration_flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig +from ..fsmt.configuration_fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig +from ..funnel.configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig +from ..gpt2.configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config +from ..layoutlm.configuration_layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig +from ..longformer.configuration_longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig +from ..lxmert.configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig +from ..marian.configuration_marian import MarianConfig +from ..mbart.configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig +from ..mobilebert.configuration_mobilebert import MobileBertConfig +from ..openai.configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig +from ..pegasus.configuration_pegasus import PegasusConfig +from ..prophetnet.configuration_prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig +from ..rag.configuration_rag import RagConfig +from ..reformer.configuration_reformer import ReformerConfig +from ..retribert.configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig +from ..roberta.configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig +from ..squeezebert.configuration_squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig +from ..t5.configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config +from ..transfo_xl.configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig +from ..xlm.configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig +from ..xlm_prophetnet.configuration_xlm_prophetnet import ( + XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, + XLMProphetNetConfig, +) +from ..xlm_roberta.configuration_xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig +from ..xlnet.configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig ALL_PRETRAINED_CONFIG_ARCHIVE_MAP = dict( diff --git a/src/transformers/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py similarity index 96% rename from src/transformers/modeling_auto.py rename to src/transformers/models/auto/modeling_auto.py index 18ec83ff2a9..0ba27b31e16 100644 --- a/src/transformers/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -18,6 +18,173 @@ import warnings from collections import OrderedDict +from ...configuration_utils import PretrainedConfig +from ...file_utils import add_start_docstrings +from ...utils import logging +from ..albert.modeling_albert import ( + AlbertForMaskedLM, + AlbertForMultipleChoice, + AlbertForPreTraining, + AlbertForQuestionAnswering, + AlbertForSequenceClassification, + AlbertForTokenClassification, + AlbertModel, +) +from ..bart.modeling_bart import ( + BartForConditionalGeneration, + BartForQuestionAnswering, + BartForSequenceClassification, + BartModel, +) +from ..bert.modeling_bert import ( + BertForMaskedLM, + BertForMultipleChoice, + BertForNextSentencePrediction, + BertForPreTraining, + BertForQuestionAnswering, + BertForSequenceClassification, + BertForTokenClassification, + BertLMHeadModel, + BertModel, +) +from ..bert_generation.modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder +from ..blenderbot.modeling_blenderbot import BlenderbotForConditionalGeneration +from ..camembert.modeling_camembert import ( + CamembertForCausalLM, + CamembertForMaskedLM, + CamembertForMultipleChoice, + CamembertForQuestionAnswering, + CamembertForSequenceClassification, + CamembertForTokenClassification, + CamembertModel, +) +from ..ctrl.modeling_ctrl import CTRLLMHeadModel, CTRLModel +from ..deberta.modeling_deberta import DebertaForSequenceClassification, DebertaModel +from ..distilbert.modeling_distilbert import ( + DistilBertForMaskedLM, + DistilBertForMultipleChoice, + DistilBertForQuestionAnswering, + DistilBertForSequenceClassification, + DistilBertForTokenClassification, + DistilBertModel, +) +from ..dpr.modeling_dpr import DPRQuestionEncoder +from ..electra.modeling_electra import ( + ElectraForMaskedLM, + ElectraForMultipleChoice, + ElectraForPreTraining, + ElectraForQuestionAnswering, + ElectraForSequenceClassification, + ElectraForTokenClassification, + ElectraModel, +) +from ..encoder_decoder.modeling_encoder_decoder import EncoderDecoderModel +from ..flaubert.modeling_flaubert import ( + FlaubertForMultipleChoice, + FlaubertForQuestionAnsweringSimple, + FlaubertForSequenceClassification, + FlaubertForTokenClassification, + FlaubertModel, + FlaubertWithLMHeadModel, +) +from ..fsmt.modeling_fsmt import FSMTForConditionalGeneration, FSMTModel +from ..funnel.modeling_funnel import ( + FunnelForMaskedLM, + FunnelForMultipleChoice, + FunnelForPreTraining, + FunnelForQuestionAnswering, + FunnelForSequenceClassification, + FunnelForTokenClassification, + FunnelModel, +) +from ..gpt2.modeling_gpt2 import GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model +from ..layoutlm.modeling_layoutlm import LayoutLMForMaskedLM, LayoutLMForTokenClassification, LayoutLMModel +from ..longformer.modeling_longformer import ( + LongformerForMaskedLM, + LongformerForMultipleChoice, + LongformerForQuestionAnswering, + LongformerForSequenceClassification, + LongformerForTokenClassification, + LongformerModel, +) +from ..lxmert.modeling_lxmert import LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel +from ..marian.modeling_marian import MarianMTModel +from ..mbart.modeling_mbart import MBartForConditionalGeneration +from ..mobilebert.modeling_mobilebert import ( + MobileBertForMaskedLM, + MobileBertForMultipleChoice, + MobileBertForNextSentencePrediction, + MobileBertForPreTraining, + MobileBertForQuestionAnswering, + MobileBertForSequenceClassification, + MobileBertForTokenClassification, + MobileBertModel, +) +from ..openai.modeling_openai import OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel +from ..pegasus.modeling_pegasus import PegasusForConditionalGeneration +from ..prophetnet.modeling_prophetnet import ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel +from ..rag.modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function + RagModel, + RagSequenceForGeneration, + RagTokenForGeneration, +) +from ..reformer.modeling_reformer import ( + ReformerForMaskedLM, + ReformerForQuestionAnswering, + ReformerForSequenceClassification, + ReformerModel, + ReformerModelWithLMHead, +) +from ..retribert.modeling_retribert import RetriBertModel +from ..roberta.modeling_roberta import ( + RobertaForCausalLM, + RobertaForMaskedLM, + RobertaForMultipleChoice, + RobertaForQuestionAnswering, + RobertaForSequenceClassification, + RobertaForTokenClassification, + RobertaModel, +) +from ..squeezebert.modeling_squeezebert import ( + SqueezeBertForMaskedLM, + SqueezeBertForMultipleChoice, + SqueezeBertForQuestionAnswering, + SqueezeBertForSequenceClassification, + SqueezeBertForTokenClassification, + SqueezeBertModel, +) +from ..t5.modeling_t5 import T5ForConditionalGeneration, T5Model +from ..transfo_xl.modeling_transfo_xl import TransfoXLLMHeadModel, TransfoXLModel +from ..xlm.modeling_xlm import ( + XLMForMultipleChoice, + XLMForQuestionAnsweringSimple, + XLMForSequenceClassification, + XLMForTokenClassification, + XLMModel, + XLMWithLMHeadModel, +) +from ..xlm_prophetnet.modeling_xlm_prophetnet import ( + XLMProphetNetForCausalLM, + XLMProphetNetForConditionalGeneration, + XLMProphetNetModel, +) +from ..xlm_roberta.modeling_xlm_roberta import ( + XLMRobertaForCausalLM, + XLMRobertaForMaskedLM, + XLMRobertaForMultipleChoice, + XLMRobertaForQuestionAnswering, + XLMRobertaForSequenceClassification, + XLMRobertaForTokenClassification, + XLMRobertaModel, +) +from ..xlnet.modeling_xlnet import ( + XLNetForMultipleChoice, + XLNetForQuestionAnsweringSimple, + XLNetForSequenceClassification, + XLNetForTokenClassification, + XLNetLMHeadModel, + XLNetModel, +) from .configuration_auto import ( AlbertConfig, AutoConfig, @@ -39,6 +206,7 @@ from .configuration_auto import ( LayoutLMConfig, LongformerConfig, LxmertConfig, + MarianConfig, MBartConfig, MobileBertConfig, OpenAIGPTConfig, @@ -56,174 +224,6 @@ from .configuration_auto import ( XLNetConfig, replace_list_option_in_docstrings, ) -from .configuration_marian import MarianConfig -from .configuration_utils import PretrainedConfig -from .file_utils import add_start_docstrings -from .modeling_albert import ( - AlbertForMaskedLM, - AlbertForMultipleChoice, - AlbertForPreTraining, - AlbertForQuestionAnswering, - AlbertForSequenceClassification, - AlbertForTokenClassification, - AlbertModel, -) -from .modeling_bart import ( - BartForConditionalGeneration, - BartForQuestionAnswering, - BartForSequenceClassification, - BartModel, -) -from .modeling_bert import ( - BertForMaskedLM, - BertForMultipleChoice, - BertForNextSentencePrediction, - BertForPreTraining, - BertForQuestionAnswering, - BertForSequenceClassification, - BertForTokenClassification, - BertLMHeadModel, - BertModel, -) -from .modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder -from .modeling_blenderbot import BlenderbotForConditionalGeneration -from .modeling_camembert import ( - CamembertForCausalLM, - CamembertForMaskedLM, - CamembertForMultipleChoice, - CamembertForQuestionAnswering, - CamembertForSequenceClassification, - CamembertForTokenClassification, - CamembertModel, -) -from .modeling_ctrl import CTRLLMHeadModel, CTRLModel -from .modeling_deberta import DebertaForSequenceClassification, DebertaModel -from .modeling_distilbert import ( - DistilBertForMaskedLM, - DistilBertForMultipleChoice, - DistilBertForQuestionAnswering, - DistilBertForSequenceClassification, - DistilBertForTokenClassification, - DistilBertModel, -) -from .modeling_dpr import DPRQuestionEncoder -from .modeling_electra import ( - ElectraForMaskedLM, - ElectraForMultipleChoice, - ElectraForPreTraining, - ElectraForQuestionAnswering, - ElectraForSequenceClassification, - ElectraForTokenClassification, - ElectraModel, -) -from .modeling_encoder_decoder import EncoderDecoderModel -from .modeling_flaubert import ( - FlaubertForMultipleChoice, - FlaubertForQuestionAnsweringSimple, - FlaubertForSequenceClassification, - FlaubertForTokenClassification, - FlaubertModel, - FlaubertWithLMHeadModel, -) -from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel -from .modeling_funnel import ( - FunnelForMaskedLM, - FunnelForMultipleChoice, - FunnelForPreTraining, - FunnelForQuestionAnswering, - FunnelForSequenceClassification, - FunnelForTokenClassification, - FunnelModel, -) -from .modeling_gpt2 import GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model -from .modeling_layoutlm import LayoutLMForMaskedLM, LayoutLMForTokenClassification, LayoutLMModel -from .modeling_longformer import ( - LongformerForMaskedLM, - LongformerForMultipleChoice, - LongformerForQuestionAnswering, - LongformerForSequenceClassification, - LongformerForTokenClassification, - LongformerModel, -) -from .modeling_lxmert import LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel -from .modeling_marian import MarianMTModel -from .modeling_mbart import MBartForConditionalGeneration -from .modeling_mobilebert import ( - MobileBertForMaskedLM, - MobileBertForMultipleChoice, - MobileBertForNextSentencePrediction, - MobileBertForPreTraining, - MobileBertForQuestionAnswering, - MobileBertForSequenceClassification, - MobileBertForTokenClassification, - MobileBertModel, -) -from .modeling_openai import OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel -from .modeling_pegasus import PegasusForConditionalGeneration -from .modeling_prophetnet import ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel -from .modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function - RagModel, - RagSequenceForGeneration, - RagTokenForGeneration, -) -from .modeling_reformer import ( - ReformerForMaskedLM, - ReformerForQuestionAnswering, - ReformerForSequenceClassification, - ReformerModel, - ReformerModelWithLMHead, -) -from .modeling_retribert import RetriBertModel -from .modeling_roberta import ( - RobertaForCausalLM, - RobertaForMaskedLM, - RobertaForMultipleChoice, - RobertaForQuestionAnswering, - RobertaForSequenceClassification, - RobertaForTokenClassification, - RobertaModel, -) -from .modeling_squeezebert import ( - SqueezeBertForMaskedLM, - SqueezeBertForMultipleChoice, - SqueezeBertForQuestionAnswering, - SqueezeBertForSequenceClassification, - SqueezeBertForTokenClassification, - SqueezeBertModel, -) -from .modeling_t5 import T5ForConditionalGeneration, T5Model -from .modeling_transfo_xl import TransfoXLLMHeadModel, TransfoXLModel -from .modeling_xlm import ( - XLMForMultipleChoice, - XLMForQuestionAnsweringSimple, - XLMForSequenceClassification, - XLMForTokenClassification, - XLMModel, - XLMWithLMHeadModel, -) -from .modeling_xlm_prophetnet import ( - XLMProphetNetForCausalLM, - XLMProphetNetForConditionalGeneration, - XLMProphetNetModel, -) -from .modeling_xlm_roberta import ( - XLMRobertaForCausalLM, - XLMRobertaForMaskedLM, - XLMRobertaForMultipleChoice, - XLMRobertaForQuestionAnswering, - XLMRobertaForSequenceClassification, - XLMRobertaForTokenClassification, - XLMRobertaModel, -) -from .modeling_xlnet import ( - XLNetForMultipleChoice, - XLNetForQuestionAnsweringSimple, - XLNetForSequenceClassification, - XLNetForTokenClassification, - XLNetLMHeadModel, - XLNetModel, -) -from .utils import logging # Add modeling imports here diff --git a/src/transformers/modeling_flax_auto.py b/src/transformers/models/auto/modeling_flax_auto.py similarity index 97% rename from src/transformers/modeling_flax_auto.py rename to src/transformers/models/auto/modeling_flax_auto.py index 8a4be347324..642815a7fcc 100644 --- a/src/transformers/modeling_flax_auto.py +++ b/src/transformers/models/auto/modeling_flax_auto.py @@ -17,11 +17,11 @@ from collections import OrderedDict +from ...configuration_utils import PretrainedConfig +from ...utils import logging +from ..bert.modeling_flax_bert import FlaxBertModel +from ..roberta.modeling_flax_roberta import FlaxRobertaModel from .configuration_auto import AutoConfig, BertConfig, RobertaConfig -from .configuration_utils import PretrainedConfig -from .modeling_flax_bert import FlaxBertModel -from .modeling_flax_roberta import FlaxRobertaModel -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_auto.py b/src/transformers/models/auto/modeling_tf_auto.py similarity index 97% rename from src/transformers/modeling_tf_auto.py rename to src/transformers/models/auto/modeling_tf_auto.py index 12898daf0bf..c433d8c198d 100644 --- a/src/transformers/modeling_tf_auto.py +++ b/src/transformers/models/auto/modeling_tf_auto.py @@ -18,38 +18,10 @@ import warnings from collections import OrderedDict -from .configuration_auto import ( - AlbertConfig, - AutoConfig, - BartConfig, - BertConfig, - CamembertConfig, - CTRLConfig, - DistilBertConfig, - ElectraConfig, - FlaubertConfig, - FunnelConfig, - GPT2Config, - LongformerConfig, - LxmertConfig, - MobileBertConfig, - OpenAIGPTConfig, - RobertaConfig, - T5Config, - TransfoXLConfig, - XLMConfig, - XLMRobertaConfig, - XLNetConfig, - replace_list_option_in_docstrings, -) -from .configuration_blenderbot import BlenderbotConfig -from .configuration_dpr import DPRConfig -from .configuration_marian import MarianConfig -from .configuration_mbart import MBartConfig -from .configuration_pegasus import PegasusConfig -from .configuration_utils import PretrainedConfig -from .file_utils import add_start_docstrings -from .modeling_tf_albert import ( +from ...configuration_utils import PretrainedConfig +from ...file_utils import add_start_docstrings +from ...utils import logging +from ..albert.modeling_tf_albert import ( TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, @@ -58,8 +30,8 @@ from .modeling_tf_albert import ( TFAlbertForTokenClassification, TFAlbertModel, ) -from .modeling_tf_bart import TFBartForConditionalGeneration, TFBartModel -from .modeling_tf_bert import ( +from ..bart.modeling_tf_bart import TFBartForConditionalGeneration, TFBartModel +from ..bert.modeling_tf_bert import ( TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, @@ -70,8 +42,8 @@ from .modeling_tf_bert import ( TFBertLMHeadModel, TFBertModel, ) -from .modeling_tf_blenderbot import TFBlenderbotForConditionalGeneration -from .modeling_tf_camembert import ( +from ..blenderbot.modeling_tf_blenderbot import TFBlenderbotForConditionalGeneration +from ..camembert.modeling_tf_camembert import ( TFCamembertForMaskedLM, TFCamembertForMultipleChoice, TFCamembertForQuestionAnswering, @@ -79,8 +51,8 @@ from .modeling_tf_camembert import ( TFCamembertForTokenClassification, TFCamembertModel, ) -from .modeling_tf_ctrl import TFCTRLLMHeadModel, TFCTRLModel -from .modeling_tf_distilbert import ( +from ..ctrl.modeling_tf_ctrl import TFCTRLLMHeadModel, TFCTRLModel +from ..distilbert.modeling_tf_distilbert import ( TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, @@ -88,8 +60,8 @@ from .modeling_tf_distilbert import ( TFDistilBertForTokenClassification, TFDistilBertModel, ) -from .modeling_tf_dpr import TFDPRQuestionEncoder -from .modeling_tf_electra import ( +from ..dpr.modeling_tf_dpr import TFDPRQuestionEncoder +from ..electra.modeling_tf_electra import ( TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, @@ -98,7 +70,7 @@ from .modeling_tf_electra import ( TFElectraForTokenClassification, TFElectraModel, ) -from .modeling_tf_flaubert import ( +from ..flaubert.modeling_tf_flaubert import ( TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, @@ -106,7 +78,7 @@ from .modeling_tf_flaubert import ( TFFlaubertModel, TFFlaubertWithLMHeadModel, ) -from .modeling_tf_funnel import ( +from ..funnel.modeling_tf_funnel import ( TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, @@ -115,12 +87,16 @@ from .modeling_tf_funnel import ( TFFunnelForTokenClassification, TFFunnelModel, ) -from .modeling_tf_gpt2 import TFGPT2LMHeadModel, TFGPT2Model -from .modeling_tf_longformer import TFLongformerForMaskedLM, TFLongformerForQuestionAnswering, TFLongformerModel -from .modeling_tf_lxmert import TFLxmertForPreTraining, TFLxmertModel -from .modeling_tf_marian import TFMarianMTModel -from .modeling_tf_mbart import TFMBartForConditionalGeneration -from .modeling_tf_mobilebert import ( +from ..gpt2.modeling_tf_gpt2 import TFGPT2LMHeadModel, TFGPT2Model +from ..longformer.modeling_tf_longformer import ( + TFLongformerForMaskedLM, + TFLongformerForQuestionAnswering, + TFLongformerModel, +) +from ..lxmert.modeling_tf_lxmert import TFLxmertForPreTraining, TFLxmertModel +from ..marian.modeling_tf_marian import TFMarianMTModel +from ..mbart.modeling_tf_mbart import TFMBartForConditionalGeneration +from ..mobilebert.modeling_tf_mobilebert import ( TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, @@ -130,9 +106,9 @@ from .modeling_tf_mobilebert import ( TFMobileBertForTokenClassification, TFMobileBertModel, ) -from .modeling_tf_openai import TFOpenAIGPTLMHeadModel, TFOpenAIGPTModel -from .modeling_tf_pegasus import TFPegasusForConditionalGeneration -from .modeling_tf_roberta import ( +from ..openai.modeling_tf_openai import TFOpenAIGPTLMHeadModel, TFOpenAIGPTModel +from ..pegasus.modeling_tf_pegasus import TFPegasusForConditionalGeneration +from ..roberta.modeling_tf_roberta import ( TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, @@ -140,9 +116,9 @@ from .modeling_tf_roberta import ( TFRobertaForTokenClassification, TFRobertaModel, ) -from .modeling_tf_t5 import TFT5ForConditionalGeneration, TFT5Model -from .modeling_tf_transfo_xl import TFTransfoXLLMHeadModel, TFTransfoXLModel -from .modeling_tf_xlm import ( +from ..t5.modeling_tf_t5 import TFT5ForConditionalGeneration, TFT5Model +from ..transfo_xl.modeling_tf_transfo_xl import TFTransfoXLLMHeadModel, TFTransfoXLModel +from ..xlm.modeling_tf_xlm import ( TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, @@ -150,7 +126,7 @@ from .modeling_tf_xlm import ( TFXLMModel, TFXLMWithLMHeadModel, ) -from .modeling_tf_xlm_roberta import ( +from ..xlm_roberta.modeling_tf_xlm_roberta import ( TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, @@ -158,7 +134,7 @@ from .modeling_tf_xlm_roberta import ( TFXLMRobertaForTokenClassification, TFXLMRobertaModel, ) -from .modeling_tf_xlnet import ( +from ..xlnet.modeling_tf_xlnet import ( TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, @@ -166,7 +142,35 @@ from .modeling_tf_xlnet import ( TFXLNetLMHeadModel, TFXLNetModel, ) -from .utils import logging +from .configuration_auto import ( + AlbertConfig, + AutoConfig, + BartConfig, + BertConfig, + BlenderbotConfig, + CamembertConfig, + CTRLConfig, + DistilBertConfig, + DPRConfig, + ElectraConfig, + FlaubertConfig, + FunnelConfig, + GPT2Config, + LongformerConfig, + LxmertConfig, + MarianConfig, + MBartConfig, + MobileBertConfig, + OpenAIGPTConfig, + PegasusConfig, + RobertaConfig, + T5Config, + TransfoXLConfig, + XLMConfig, + XLMRobertaConfig, + XLNetConfig, + replace_list_option_in_docstrings, +) # Add modeling imports here diff --git a/src/transformers/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py similarity index 74% rename from src/transformers/tokenization_auto.py rename to src/transformers/models/auto/tokenization_auto.py index 7e375d05986..ea5113bd1ce 100644 --- a/src/transformers/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -17,6 +17,37 @@ from collections import OrderedDict +from ...configuration_utils import PretrainedConfig +from ...file_utils import is_sentencepiece_available, is_tokenizers_available +from ...utils import logging +from ..bart.tokenization_bart import BartTokenizer +from ..bert.tokenization_bert import BertTokenizer +from ..bert_japanese.tokenization_bert_japanese import BertJapaneseTokenizer +from ..bertweet.tokenization_bertweet import BertweetTokenizer +from ..blenderbot.tokenization_blenderbot import BlenderbotSmallTokenizer +from ..ctrl.tokenization_ctrl import CTRLTokenizer +from ..deberta.tokenization_deberta import DebertaTokenizer +from ..distilbert.tokenization_distilbert import DistilBertTokenizer +from ..dpr.tokenization_dpr import DPRQuestionEncoderTokenizer +from ..electra.tokenization_electra import ElectraTokenizer +from ..flaubert.tokenization_flaubert import FlaubertTokenizer +from ..fsmt.tokenization_fsmt import FSMTTokenizer +from ..funnel.tokenization_funnel import FunnelTokenizer +from ..gpt2.tokenization_gpt2 import GPT2Tokenizer +from ..herbert.tokenization_herbert import HerbertTokenizer +from ..layoutlm.tokenization_layoutlm import LayoutLMTokenizer +from ..longformer.tokenization_longformer import LongformerTokenizer +from ..lxmert.tokenization_lxmert import LxmertTokenizer +from ..mobilebert.tokenization_mobilebert import MobileBertTokenizer +from ..openai.tokenization_openai import OpenAIGPTTokenizer +from ..phobert.tokenization_phobert import PhobertTokenizer +from ..prophetnet.tokenization_prophetnet import ProphetNetTokenizer +from ..rag.tokenization_rag import RagTokenizer +from ..retribert.tokenization_retribert import RetriBertTokenizer +from ..roberta.tokenization_roberta import RobertaTokenizer +from ..squeezebert.tokenization_squeezebert import SqueezeBertTokenizer +from ..transfo_xl.tokenization_transfo_xl import TransfoXLTokenizer +from ..xlm.tokenization_xlm import XLMTokenizer from .configuration_auto import ( AlbertConfig, AutoConfig, @@ -57,51 +88,20 @@ from .configuration_auto import ( XLNetConfig, replace_list_option_in_docstrings, ) -from .configuration_utils import PretrainedConfig -from .file_utils import is_sentencepiece_available, is_tokenizers_available -from .tokenization_bart import BartTokenizer -from .tokenization_bert import BertTokenizer -from .tokenization_bert_japanese import BertJapaneseTokenizer -from .tokenization_bertweet import BertweetTokenizer -from .tokenization_blenderbot import BlenderbotSmallTokenizer -from .tokenization_ctrl import CTRLTokenizer -from .tokenization_deberta import DebertaTokenizer -from .tokenization_distilbert import DistilBertTokenizer -from .tokenization_dpr import DPRQuestionEncoderTokenizer -from .tokenization_electra import ElectraTokenizer -from .tokenization_flaubert import FlaubertTokenizer -from .tokenization_fsmt import FSMTTokenizer -from .tokenization_funnel import FunnelTokenizer -from .tokenization_gpt2 import GPT2Tokenizer -from .tokenization_herbert import HerbertTokenizer -from .tokenization_layoutlm import LayoutLMTokenizer -from .tokenization_longformer import LongformerTokenizer -from .tokenization_lxmert import LxmertTokenizer -from .tokenization_mobilebert import MobileBertTokenizer -from .tokenization_openai import OpenAIGPTTokenizer -from .tokenization_phobert import PhobertTokenizer -from .tokenization_prophetnet import ProphetNetTokenizer -from .tokenization_rag import RagTokenizer -from .tokenization_retribert import RetriBertTokenizer -from .tokenization_roberta import RobertaTokenizer -from .tokenization_squeezebert import SqueezeBertTokenizer -from .tokenization_transfo_xl import TransfoXLTokenizer -from .tokenization_xlm import XLMTokenizer -from .utils import logging if is_sentencepiece_available(): - from .tokenization_albert import AlbertTokenizer - from .tokenization_bert_generation import BertGenerationTokenizer - from .tokenization_camembert import CamembertTokenizer - from .tokenization_marian import MarianTokenizer - from .tokenization_mbart import MBartTokenizer - from .tokenization_pegasus import PegasusTokenizer - from .tokenization_reformer import ReformerTokenizer - from .tokenization_t5 import T5Tokenizer - from .tokenization_xlm_prophetnet import XLMProphetNetTokenizer - from .tokenization_xlm_roberta import XLMRobertaTokenizer - from .tokenization_xlnet import XLNetTokenizer + from ..albert.tokenization_albert import AlbertTokenizer + from ..bert_generation.tokenization_bert_generation import BertGenerationTokenizer + from ..camembert.tokenization_camembert import CamembertTokenizer + from ..marian.tokenization_marian import MarianTokenizer + from ..mbart.tokenization_mbart import MBartTokenizer + from ..pegasus.tokenization_pegasus import PegasusTokenizer + from ..reformer.tokenization_reformer import ReformerTokenizer + from ..t5.tokenization_t5 import T5Tokenizer + from ..xlm_prophetnet.tokenization_xlm_prophetnet import XLMProphetNetTokenizer + from ..xlm_roberta.tokenization_xlm_roberta import XLMRobertaTokenizer + from ..xlnet.tokenization_xlnet import XLNetTokenizer else: AlbertTokenizer = None BertGenerationTokenizer = None @@ -116,30 +116,30 @@ else: XLMProphetNetTokenizer = None if is_tokenizers_available(): - from .tokenization_albert_fast import AlbertTokenizerFast - from .tokenization_bart_fast import BartTokenizerFast - from .tokenization_bert_fast import BertTokenizerFast - from .tokenization_camembert_fast import CamembertTokenizerFast - from .tokenization_distilbert_fast import DistilBertTokenizerFast - from .tokenization_dpr_fast import DPRQuestionEncoderTokenizerFast - from .tokenization_electra_fast import ElectraTokenizerFast - from .tokenization_funnel_fast import FunnelTokenizerFast - from .tokenization_gpt2_fast import GPT2TokenizerFast - from .tokenization_herbert_fast import HerbertTokenizerFast - from .tokenization_layoutlm_fast import LayoutLMTokenizerFast - from .tokenization_longformer_fast import LongformerTokenizerFast - from .tokenization_lxmert_fast import LxmertTokenizerFast - from .tokenization_mbart_fast import MBartTokenizerFast - from .tokenization_mobilebert_fast import MobileBertTokenizerFast - from .tokenization_openai_fast import OpenAIGPTTokenizerFast - from .tokenization_pegasus_fast import PegasusTokenizerFast - from .tokenization_reformer_fast import ReformerTokenizerFast - from .tokenization_retribert_fast import RetriBertTokenizerFast - from .tokenization_roberta_fast import RobertaTokenizerFast - from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast - from .tokenization_t5_fast import T5TokenizerFast - from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast - from .tokenization_xlnet_fast import XLNetTokenizerFast + from ..albert.tokenization_albert_fast import AlbertTokenizerFast + from ..bart.tokenization_bart_fast import BartTokenizerFast + from ..bert.tokenization_bert_fast import BertTokenizerFast + from ..camembert.tokenization_camembert_fast import CamembertTokenizerFast + from ..distilbert.tokenization_distilbert_fast import DistilBertTokenizerFast + from ..dpr.tokenization_dpr_fast import DPRQuestionEncoderTokenizerFast + from ..electra.tokenization_electra_fast import ElectraTokenizerFast + from ..funnel.tokenization_funnel_fast import FunnelTokenizerFast + from ..gpt2.tokenization_gpt2_fast import GPT2TokenizerFast + from ..herbert.tokenization_herbert_fast import HerbertTokenizerFast + from ..layoutlm.tokenization_layoutlm_fast import LayoutLMTokenizerFast + from ..longformer.tokenization_longformer_fast import LongformerTokenizerFast + from ..lxmert.tokenization_lxmert_fast import LxmertTokenizerFast + from ..mbart.tokenization_mbart_fast import MBartTokenizerFast + from ..mobilebert.tokenization_mobilebert_fast import MobileBertTokenizerFast + from ..openai.tokenization_openai_fast import OpenAIGPTTokenizerFast + from ..pegasus.tokenization_pegasus_fast import PegasusTokenizerFast + from ..reformer.tokenization_reformer_fast import ReformerTokenizerFast + from ..retribert.tokenization_retribert_fast import RetriBertTokenizerFast + from ..roberta.tokenization_roberta_fast import RobertaTokenizerFast + from ..squeezebert.tokenization_squeezebert_fast import SqueezeBertTokenizerFast + from ..t5.tokenization_t5_fast import T5TokenizerFast + from ..xlm_roberta.tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast + from ..xlnet.tokenization_xlnet_fast import XLNetTokenizerFast else: AlbertTokenizerFast = None BartTokenizerFast = None diff --git a/src/transformers/models/bart/__init__.py b/src/transformers/models/bart/__init__.py new file mode 100644 index 00000000000..3cfc8e8de9a --- /dev/null +++ b/src/transformers/models/bart/__init__.py @@ -0,0 +1,24 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_bart import BartConfig +from .tokenization_bart import BartTokenizer + + +if is_tokenizers_available(): + from .tokenization_bart_fast import BartTokenizerFast + +if is_torch_available(): + from .modeling_bart import ( + BART_PRETRAINED_MODEL_ARCHIVE_LIST, + BartForConditionalGeneration, + BartForQuestionAnswering, + BartForSequenceClassification, + BartModel, + PretrainedBartModel, + ) + +if is_tf_available(): + from .modeling_tf_bart import TFBartForConditionalGeneration, TFBartModel diff --git a/src/transformers/configuration_bart.py b/src/transformers/models/bart/configuration_bart.py similarity index 99% rename from src/transformers/configuration_bart.py rename to src/transformers/models/bart/configuration_bart.py index 0947d03584c..8533a013be5 100644 --- a/src/transformers/configuration_bart.py +++ b/src/transformers/models/bart/configuration_bart.py @@ -14,8 +14,8 @@ # limitations under the License. """ BART configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_bart_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py similarity index 98% rename from src/transformers/convert_bart_original_pytorch_checkpoint_to_pytorch.py rename to src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py index c07b50fef0c..8978b8b2e57 100644 --- a/src/transformers/convert_bart_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py @@ -30,7 +30,7 @@ from transformers import ( BartModel, BartTokenizer, ) -from transformers.modeling_bart import _make_linear_from_emb +from transformers.models.bart.modeling_bart import _make_linear_from_emb from transformers.utils import logging diff --git a/src/transformers/modeling_bart.py b/src/transformers/models/bart/modeling_bart.py similarity index 99% rename from src/transformers/modeling_bart.py rename to src/transformers/models/bart/modeling_bart.py index 48ac20aa60b..9516e5bc8f3 100644 --- a/src/transformers/modeling_bart.py +++ b/src/transformers/models/bart/modeling_bart.py @@ -24,16 +24,15 @@ import torch.nn.functional as F from torch import Tensor, nn from torch.nn import CrossEntropyLoss -from .activations import ACT2FN -from .configuration_bart import BartConfig -from .file_utils import ( +from ...activations import ACT2FN +from ...file_utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_outputs import ( +from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, @@ -41,8 +40,9 @@ from .modeling_outputs import ( Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, ) -from .modeling_utils import PreTrainedModel -from .utils import logging +from ...modeling_utils import PreTrainedModel +from ...utils import logging +from .configuration_bart import BartConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_bart.py b/src/transformers/models/bart/modeling_tf_bart.py similarity index 99% rename from src/transformers/modeling_tf_bart.py rename to src/transformers/models/bart/modeling_tf_bart.py index e1bf4f76b70..b84632137dd 100644 --- a/src/transformers/modeling_tf_bart.py +++ b/src/transformers/models/bart/modeling_tf_bart.py @@ -24,13 +24,17 @@ import tensorflow as tf from tensorflow import Tensor from tensorflow.keras.layers import Dense, Layer, LayerNormalization -from .activations_tf import ACT2FN -from .configuration_bart import BartConfig -from .file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings -from .modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPast, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput +from ...activations_tf import ACT2FN +from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings +from ...modeling_tf_outputs import ( + TFBaseModelOutput, + TFBaseModelOutputWithPast, + TFSeq2SeqLMOutput, + TFSeq2SeqModelOutput, +) # Public API -from .modeling_tf_utils import ( +from ...modeling_tf_utils import ( DUMMY_INPUTS, TFPreTrainedModel, TFSharedEmbeddings, @@ -39,8 +43,9 @@ from .modeling_tf_utils import ( keras_serializable, shape_list, ) -from .tokenization_utils_base import BatchEncoding -from .utils import logging +from ...tokenization_utils_base import BatchEncoding +from ...utils import logging +from .configuration_bart import BartConfig _CONFIG_FOR_DOC = "BartConfig" diff --git a/src/transformers/tokenization_bart.py b/src/transformers/models/bart/tokenization_bart.py similarity index 98% rename from src/transformers/tokenization_bart.py rename to src/transformers/models/bart/tokenization_bart.py index bc346074bd4..24046b39676 100644 --- a/src/transformers/tokenization_bart.py +++ b/src/transformers/models/bart/tokenization_bart.py @@ -15,9 +15,9 @@ from typing import List, Optional -from .tokenization_roberta import RobertaTokenizer -from .tokenization_utils_base import BatchEncoding -from .utils import logging +from ...tokenization_utils_base import BatchEncoding +from ...utils import logging +from ..roberta.tokenization_roberta import RobertaTokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_bart_fast.py b/src/transformers/models/bart/tokenization_bart_fast.py similarity index 98% rename from src/transformers/tokenization_bart_fast.py rename to src/transformers/models/bart/tokenization_bart_fast.py index e27e83a7b55..69c16c2be4c 100644 --- a/src/transformers/tokenization_bart_fast.py +++ b/src/transformers/models/bart/tokenization_bart_fast.py @@ -15,10 +15,10 @@ from typing import List, Optional +from ...tokenization_utils_base import BatchEncoding +from ...utils import logging +from ..roberta.tokenization_roberta_fast import RobertaTokenizerFast from .tokenization_bart import BartTokenizer -from .tokenization_roberta_fast import RobertaTokenizerFast -from .tokenization_utils_base import BatchEncoding -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/bert/__init__.py b/src/transformers/models/bert/__init__.py new file mode 100644 index 00000000000..1e447fa8285 --- /dev/null +++ b/src/transformers/models/bert/__init__.py @@ -0,0 +1,48 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig +from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer + + +if is_tokenizers_available(): + from .tokenization_bert_fast import BertTokenizerFast + +if is_torch_available(): + from .modeling_bert import ( + BERT_PRETRAINED_MODEL_ARCHIVE_LIST, + BertForMaskedLM, + BertForMultipleChoice, + BertForNextSentencePrediction, + BertForPreTraining, + BertForQuestionAnswering, + BertForSequenceClassification, + BertForTokenClassification, + BertLayer, + BertLMHeadModel, + BertModel, + BertPreTrainedModel, + load_tf_weights_in_bert, + ) + +if is_tf_available(): + from .modeling_tf_bert import ( + TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFBertEmbeddings, + TFBertForMaskedLM, + TFBertForMultipleChoice, + TFBertForNextSentencePrediction, + TFBertForPreTraining, + TFBertForQuestionAnswering, + TFBertForSequenceClassification, + TFBertForTokenClassification, + TFBertLMHeadModel, + TFBertMainLayer, + TFBertModel, + TFBertPreTrainedModel, + ) + +if is_flax_available(): + from .modeling_flax_bert import FlaxBertModel diff --git a/src/transformers/configuration_bert.py b/src/transformers/models/bert/configuration_bert.py similarity index 99% rename from src/transformers/configuration_bert.py rename to src/transformers/models/bert/configuration_bert.py index 19a7c8ed273..4d5de80e78d 100644 --- a/src/transformers/configuration_bert.py +++ b/src/transformers/models/bert/configuration_bert.py @@ -15,8 +15,8 @@ # limitations under the License. """ BERT model configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_bert_original_tf2_checkpoint_to_pytorch.py b/src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py similarity index 100% rename from src/transformers/convert_bert_original_tf2_checkpoint_to_pytorch.py rename to src/transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py diff --git a/src/transformers/convert_bert_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py similarity index 100% rename from src/transformers/convert_bert_original_tf_checkpoint_to_pytorch.py rename to src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py diff --git a/src/transformers/convert_bert_pytorch_checkpoint_to_original_tf.py b/src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py similarity index 100% rename from src/transformers/convert_bert_pytorch_checkpoint_to_original_tf.py rename to src/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py diff --git a/src/transformers/modeling_bert.py b/src/transformers/models/bert/modeling_bert.py similarity index 99% rename from src/transformers/modeling_bert.py rename to src/transformers/models/bert/modeling_bert.py index 3787db98e92..8a7aa1a4df0 100755 --- a/src/transformers/modeling_bert.py +++ b/src/transformers/models/bert/modeling_bert.py @@ -27,16 +27,15 @@ import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss, MSELoss -from .activations import ACT2FN -from .configuration_bert import BertConfig -from .file_utils import ( +from ...activations import ACT2FN +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_outputs import ( +from ...modeling_outputs import ( BaseModelOutputWithCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, @@ -47,13 +46,14 @@ from .modeling_outputs import ( SequenceClassifierOutput, TokenClassifierOutput, ) -from .modeling_utils import ( +from ...modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) -from .utils import logging +from ...utils import logging +from .configuration_bert import BertConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_flax_bert.py b/src/transformers/models/bert/modeling_flax_bert.py similarity index 98% rename from src/transformers/modeling_flax_bert.py rename to src/transformers/models/bert/modeling_flax_bert.py index 50b499e0837..a1cbbb87de1 100644 --- a/src/transformers/modeling_flax_bert.py +++ b/src/transformers/models/bert/modeling_flax_bert.py @@ -21,10 +21,10 @@ import flax.linen as nn import jax import jax.numpy as jnp +from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward +from ...modeling_flax_utils import FlaxPreTrainedModel, gelu +from ...utils import logging from .configuration_bert import BertConfig -from .file_utils import add_start_docstrings, add_start_docstrings_to_model_forward -from .modeling_flax_utils import FlaxPreTrainedModel, gelu -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_bert.py b/src/transformers/models/bert/modeling_tf_bert.py similarity index 99% rename from src/transformers/modeling_tf_bert.py rename to src/transformers/models/bert/modeling_tf_bert.py index 72da644ebc6..f6b9d81d269 100644 --- a/src/transformers/modeling_tf_bert.py +++ b/src/transformers/models/bert/modeling_tf_bert.py @@ -21,9 +21,8 @@ from typing import Optional, Tuple import tensorflow as tf -from .activations_tf import get_tf_activation -from .configuration_bert import BertConfig -from .file_utils import ( +from ...activations_tf import get_tf_activation +from ...file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, @@ -31,7 +30,7 @@ from .file_utils import ( add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_tf_outputs import ( +from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPooling, TFCausalLMOutput, @@ -42,7 +41,7 @@ from .modeling_tf_outputs import ( TFSequenceClassifierOutput, TFTokenClassifierOutput, ) -from .modeling_tf_utils import ( +from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFMaskedLanguageModelingLoss, TFMultipleChoiceLoss, @@ -55,8 +54,9 @@ from .modeling_tf_utils import ( keras_serializable, shape_list, ) -from .tokenization_utils import BatchEncoding -from .utils import logging +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from .configuration_bert import BertConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_bert.py b/src/transformers/models/bert/tokenization_bert.py similarity index 99% rename from src/transformers/tokenization_bert.py rename to src/transformers/models/bert/tokenization_bert.py index fe67e319383..3198d3f7ab2 100644 --- a/src/transformers/tokenization_bert.py +++ b/src/transformers/models/bert/tokenization_bert.py @@ -20,8 +20,8 @@ import os import unicodedata from typing import List, Optional, Tuple -from .tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace -from .utils import logging +from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_bert_fast.py b/src/transformers/models/bert/tokenization_bert_fast.py similarity index 99% rename from src/transformers/tokenization_bert_fast.py rename to src/transformers/models/bert/tokenization_bert_fast.py index ddd7f6043f6..230def78aa0 100644 --- a/src/transformers/tokenization_bert_fast.py +++ b/src/transformers/models/bert/tokenization_bert_fast.py @@ -19,9 +19,9 @@ from typing import List, Optional, Tuple from tokenizers import normalizers +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging from .tokenization_bert import BertTokenizer -from .tokenization_utils_fast import PreTrainedTokenizerFast -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/bert_generation/__init__.py b/src/transformers/models/bert_generation/__init__.py new file mode 100644 index 00000000000..d3c66f6b456 --- /dev/null +++ b/src/transformers/models/bert_generation/__init__.py @@ -0,0 +1,17 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_sentencepiece_available, is_torch_available +from .configuration_bert_generation import BertGenerationConfig + + +if is_sentencepiece_available(): + from .tokenization_bert_generation import BertGenerationTokenizer + +if is_torch_available(): + from .modeling_bert_generation import ( + BertGenerationDecoder, + BertGenerationEncoder, + load_tf_weights_in_bert_generation, + ) diff --git a/src/transformers/configuration_bert_generation.py b/src/transformers/models/bert_generation/configuration_bert_generation.py similarity index 99% rename from src/transformers/configuration_bert_generation.py rename to src/transformers/models/bert_generation/configuration_bert_generation.py index 3b9dc4873f4..523de1997bd 100644 --- a/src/transformers/configuration_bert_generation.py +++ b/src/transformers/models/bert_generation/configuration_bert_generation.py @@ -14,7 +14,7 @@ # limitations under the License. """ BertGeneration model configuration """ -from .configuration_utils import PretrainedConfig +from ...configuration_utils import PretrainedConfig class BertGenerationConfig(PretrainedConfig): diff --git a/src/transformers/modeling_bert_generation.py b/src/transformers/models/bert_generation/modeling_bert_generation.py similarity index 98% rename from src/transformers/modeling_bert_generation.py rename to src/transformers/models/bert_generation/modeling_bert_generation.py index 68926e9c387..9ab4d1ee4de 100755 --- a/src/transformers/modeling_bert_generation.py +++ b/src/transformers/models/bert_generation/modeling_bert_generation.py @@ -20,17 +20,17 @@ import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss -from .configuration_bert_generation import BertGenerationConfig -from .file_utils import ( +from ...file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_bert import BertEncoder -from .modeling_outputs import BaseModelOutputWithCrossAttentions, CausalLMOutputWithCrossAttentions -from .modeling_utils import PreTrainedModel -from .utils import logging +from ...modeling_outputs import BaseModelOutputWithCrossAttentions, CausalLMOutputWithCrossAttentions +from ...modeling_utils import PreTrainedModel +from ...utils import logging +from ..bert.modeling_bert import BertEncoder +from .configuration_bert_generation import BertGenerationConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_bert_generation.py b/src/transformers/models/bert_generation/tokenization_bert_generation.py similarity index 98% rename from src/transformers/tokenization_bert_generation.py rename to src/transformers/models/bert_generation/tokenization_bert_generation.py index ba3c9d0126e..92525e85274 100644 --- a/src/transformers/tokenization_bert_generation.py +++ b/src/transformers/models/bert_generation/tokenization_bert_generation.py @@ -21,8 +21,8 @@ from typing import List, Optional, Tuple import sentencepiece as spm -from .tokenization_utils import PreTrainedTokenizer -from .utils import logging +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/bert_japanese/__init__.py b/src/transformers/models/bert_japanese/__init__.py new file mode 100644 index 00000000000..68b1e51c0b9 --- /dev/null +++ b/src/transformers/models/bert_japanese/__init__.py @@ -0,0 +1,5 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from .tokenization_bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer diff --git a/src/transformers/tokenization_bert_japanese.py b/src/transformers/models/bert_japanese/tokenization_bert_japanese.py similarity index 99% rename from src/transformers/tokenization_bert_japanese.py rename to src/transformers/models/bert_japanese/tokenization_bert_japanese.py index 2ef263977ce..6da3fa52abc 100644 --- a/src/transformers/tokenization_bert_japanese.py +++ b/src/transformers/models/bert_japanese/tokenization_bert_japanese.py @@ -21,8 +21,8 @@ import os import unicodedata from typing import Optional -from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer, load_vocab -from .utils import logging +from ...utils import logging +from ..bert.tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer, load_vocab logger = logging.get_logger(__name__) diff --git a/src/transformers/models/bertweet/__init__.py b/src/transformers/models/bertweet/__init__.py new file mode 100644 index 00000000000..93fc956d406 --- /dev/null +++ b/src/transformers/models/bertweet/__init__.py @@ -0,0 +1,5 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from .tokenization_bertweet import BertweetTokenizer diff --git a/src/transformers/tokenization_bertweet.py b/src/transformers/models/bertweet/tokenization_bertweet.py similarity index 99% rename from src/transformers/tokenization_bertweet.py rename to src/transformers/models/bertweet/tokenization_bertweet.py index 402fe7708b4..5ba8a213aa7 100644 --- a/src/transformers/tokenization_bertweet.py +++ b/src/transformers/models/bertweet/tokenization_bertweet.py @@ -24,8 +24,8 @@ from typing import List, Optional, Tuple import regex -from .tokenization_utils import PreTrainedTokenizer -from .utils import logging +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/blenderbot/__init__.py b/src/transformers/models/blenderbot/__init__.py new file mode 100644 index 00000000000..29f913bbf7d --- /dev/null +++ b/src/transformers/models/blenderbot/__init__.py @@ -0,0 +1,14 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_torch_available +from .configuration_blenderbot import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig +from .tokenization_blenderbot import BlenderbotSmallTokenizer, BlenderbotTokenizer + + +if is_torch_available(): + from .modeling_blenderbot import BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForConditionalGeneration + +if is_tf_available(): + from .modeling_tf_blenderbot import TFBlenderbotForConditionalGeneration diff --git a/src/transformers/configuration_blenderbot.py b/src/transformers/models/blenderbot/configuration_blenderbot.py similarity index 99% rename from src/transformers/configuration_blenderbot.py rename to src/transformers/models/blenderbot/configuration_blenderbot.py index 449089a862d..b273ebb6ae0 100644 --- a/src/transformers/configuration_blenderbot.py +++ b/src/transformers/models/blenderbot/configuration_blenderbot.py @@ -18,7 +18,7 @@ BlenderbotConfig has the same signature as BartConfig. We only rewrite the signature in order to document blenderbot-90M defaults. """ -from .configuration_bart import BartConfig +from ..bart.configuration_bart import BartConfig BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP = { diff --git a/src/transformers/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py similarity index 100% rename from src/transformers/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py rename to src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py diff --git a/src/transformers/modeling_blenderbot.py b/src/transformers/models/blenderbot/modeling_blenderbot.py similarity index 95% rename from src/transformers/modeling_blenderbot.py rename to src/transformers/models/blenderbot/modeling_blenderbot.py index 64a12f964ac..1421a87ca9b 100644 --- a/src/transformers/modeling_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_blenderbot.py @@ -18,9 +18,9 @@ import torch +from ...file_utils import add_start_docstrings +from ..bart.modeling_bart import BartForConditionalGeneration from .configuration_blenderbot import BlenderbotConfig -from .file_utils import add_start_docstrings -from .modeling_bart import BartForConditionalGeneration BLENDER_START_DOCSTRING = r""" diff --git a/src/transformers/modeling_tf_blenderbot.py b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py similarity index 89% rename from src/transformers/modeling_tf_blenderbot.py rename to src/transformers/models/blenderbot/modeling_tf_blenderbot.py index 633b50ec775..aa87c8cd9e2 100644 --- a/src/transformers/modeling_tf_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py @@ -13,10 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. """TF BlenderBot model, ported from the fairseq repo.""" + +from ...file_utils import add_start_docstrings, is_tf_available +from ...utils import logging +from ..bart.modeling_tf_bart import BART_START_DOCSTRING, LARGE_NEGATIVE, TFBartForConditionalGeneration from .configuration_blenderbot import BlenderbotConfig -from .file_utils import add_start_docstrings, is_tf_available -from .modeling_tf_bart import BART_START_DOCSTRING, LARGE_NEGATIVE, TFBartForConditionalGeneration -from .utils import logging if is_tf_available(): diff --git a/src/transformers/tokenization_blenderbot.py b/src/transformers/models/blenderbot/tokenization_blenderbot.py similarity index 98% rename from src/transformers/tokenization_blenderbot.py rename to src/transformers/models/blenderbot/tokenization_blenderbot.py index ec350cf2625..bf96a63d04a 100644 --- a/src/transformers/tokenization_blenderbot.py +++ b/src/transformers/models/blenderbot/tokenization_blenderbot.py @@ -21,9 +21,9 @@ from typing import Dict, List, Optional, Tuple import regex as re -from .tokenization_roberta import RobertaTokenizer -from .tokenization_utils import PreTrainedTokenizer -from .utils import logging +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging +from ..roberta.tokenization_roberta import RobertaTokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/models/camembert/__init__.py b/src/transformers/models/camembert/__init__.py new file mode 100644 index 00000000000..0c072e35072 --- /dev/null +++ b/src/transformers/models/camembert/__init__.py @@ -0,0 +1,36 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig + + +if is_sentencepiece_available(): + from .tokenization_camembert import CamembertTokenizer + +if is_tokenizers_available(): + from .tokenization_camembert_fast import CamembertTokenizerFast + +if is_torch_available(): + from .modeling_camembert import ( + CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + CamembertForCausalLM, + CamembertForMaskedLM, + CamembertForMultipleChoice, + CamembertForQuestionAnswering, + CamembertForSequenceClassification, + CamembertForTokenClassification, + CamembertModel, + ) + +if is_tf_available(): + from .modeling_tf_camembert import ( + TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFCamembertForMaskedLM, + TFCamembertForMultipleChoice, + TFCamembertForQuestionAnswering, + TFCamembertForSequenceClassification, + TFCamembertForTokenClassification, + TFCamembertModel, + ) diff --git a/src/transformers/configuration_camembert.py b/src/transformers/models/camembert/configuration_camembert.py similarity index 94% rename from src/transformers/configuration_camembert.py rename to src/transformers/models/camembert/configuration_camembert.py index 64e64e9f621..31f9d94a0d9 100644 --- a/src/transformers/configuration_camembert.py +++ b/src/transformers/models/camembert/configuration_camembert.py @@ -15,8 +15,8 @@ # limitations under the License. """ CamemBERT configuration """ -from .configuration_roberta import RobertaConfig -from .utils import logging +from ...utils import logging +from ..roberta.configuration_roberta import RobertaConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_camembert.py b/src/transformers/models/camembert/modeling_camembert.py similarity index 97% rename from src/transformers/modeling_camembert.py rename to src/transformers/models/camembert/modeling_camembert.py index 633975556f4..46bf8d20bbe 100644 --- a/src/transformers/modeling_camembert.py +++ b/src/transformers/models/camembert/modeling_camembert.py @@ -15,9 +15,9 @@ # limitations under the License. """PyTorch CamemBERT model. """ -from .configuration_camembert import CamembertConfig -from .file_utils import add_start_docstrings -from .modeling_roberta import ( +from ...file_utils import add_start_docstrings +from ...utils import logging +from ..roberta.modeling_roberta import ( RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, @@ -26,7 +26,7 @@ from .modeling_roberta import ( RobertaForTokenClassification, RobertaModel, ) -from .utils import logging +from .configuration_camembert import CamembertConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_camembert.py b/src/transformers/models/camembert/modeling_tf_camembert.py similarity index 98% rename from src/transformers/modeling_tf_camembert.py rename to src/transformers/models/camembert/modeling_tf_camembert.py index 6b01884179d..f552c9f5c28 100644 --- a/src/transformers/modeling_tf_camembert.py +++ b/src/transformers/models/camembert/modeling_tf_camembert.py @@ -15,9 +15,9 @@ # limitations under the License. """ TF 2.0 CamemBERT model. """ -from .configuration_camembert import CamembertConfig -from .file_utils import add_start_docstrings -from .modeling_tf_roberta import ( +from ...file_utils import add_start_docstrings +from ...utils import logging +from ..roberta.modeling_tf_roberta import ( TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, @@ -25,7 +25,7 @@ from .modeling_tf_roberta import ( TFRobertaForTokenClassification, TFRobertaModel, ) -from .utils import logging +from .configuration_camembert import CamembertConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_camembert.py b/src/transformers/models/camembert/tokenization_camembert.py similarity index 99% rename from src/transformers/tokenization_camembert.py rename to src/transformers/models/camembert/tokenization_camembert.py index 794f3d8c711..734b8140638 100644 --- a/src/transformers/tokenization_camembert.py +++ b/src/transformers/models/camembert/tokenization_camembert.py @@ -21,8 +21,8 @@ from typing import List, Optional, Tuple import sentencepiece as spm -from .tokenization_utils import PreTrainedTokenizer -from .utils import logging +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_camembert_fast.py b/src/transformers/models/camembert/tokenization_camembert_fast.py similarity index 98% rename from src/transformers/tokenization_camembert_fast.py rename to src/transformers/models/camembert/tokenization_camembert_fast.py index 4e4cf9557df..55a609b3c23 100644 --- a/src/transformers/tokenization_camembert_fast.py +++ b/src/transformers/models/camembert/tokenization_camembert_fast.py @@ -19,9 +19,9 @@ import os from shutil import copyfile from typing import List, Optional, Tuple -from .file_utils import is_sentencepiece_available -from .tokenization_utils_fast import PreTrainedTokenizerFast -from .utils import logging +from ...file_utils import is_sentencepiece_available +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging if is_sentencepiece_available(): diff --git a/src/transformers/models/ctrl/__init__.py b/src/transformers/models/ctrl/__init__.py new file mode 100644 index 00000000000..d32bc870801 --- /dev/null +++ b/src/transformers/models/ctrl/__init__.py @@ -0,0 +1,19 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_torch_available +from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig +from .tokenization_ctrl import CTRLTokenizer + + +if is_torch_available(): + from .modeling_ctrl import CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel + +if is_tf_available(): + from .modeling_tf_ctrl import ( + TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, + TFCTRLLMHeadModel, + TFCTRLModel, + TFCTRLPreTrainedModel, + ) diff --git a/src/transformers/configuration_ctrl.py b/src/transformers/models/ctrl/configuration_ctrl.py similarity index 98% rename from src/transformers/configuration_ctrl.py rename to src/transformers/models/ctrl/configuration_ctrl.py index f5e876697b2..faffaa0df96 100644 --- a/src/transformers/configuration_ctrl.py +++ b/src/transformers/models/ctrl/configuration_ctrl.py @@ -14,8 +14,8 @@ # limitations under the License. """ Salesforce CTRL configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_ctrl.py b/src/transformers/models/ctrl/modeling_ctrl.py similarity index 98% rename from src/transformers/modeling_ctrl.py rename to src/transformers/models/ctrl/modeling_ctrl.py index 10d6949b2cd..4b9ae6debe3 100644 --- a/src/transformers/modeling_ctrl.py +++ b/src/transformers/models/ctrl/modeling_ctrl.py @@ -23,11 +23,11 @@ import torch import torch.nn as nn from torch.nn import CrossEntropyLoss +from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward +from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast +from ...modeling_utils import Conv1D, PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import logging from .configuration_ctrl import CTRLConfig -from .file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward -from .modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast -from .modeling_utils import Conv1D, PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_ctrl.py b/src/transformers/models/ctrl/modeling_tf_ctrl.py similarity index 98% rename from src/transformers/modeling_tf_ctrl.py rename to src/transformers/models/ctrl/modeling_tf_ctrl.py index 804614f16ab..2b0058f7040 100644 --- a/src/transformers/modeling_tf_ctrl.py +++ b/src/transformers/models/ctrl/modeling_tf_ctrl.py @@ -19,18 +19,18 @@ import numpy as np import tensorflow as tf -from .configuration_ctrl import CTRLConfig -from .file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward -from .modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast -from .modeling_tf_utils import ( +from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward +from ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast +from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFPreTrainedModel, TFSharedEmbeddings, keras_serializable, shape_list, ) -from .tokenization_utils import BatchEncoding -from .utils import logging +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from .configuration_ctrl import CTRLConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_ctrl.py b/src/transformers/models/ctrl/tokenization_ctrl.py similarity index 99% rename from src/transformers/tokenization_ctrl.py rename to src/transformers/models/ctrl/tokenization_ctrl.py index 845a105cfd8..65df6bbab3e 100644 --- a/src/transformers/tokenization_ctrl.py +++ b/src/transformers/models/ctrl/tokenization_ctrl.py @@ -21,8 +21,8 @@ from typing import Optional, Tuple import regex as re -from .tokenization_utils import PreTrainedTokenizer -from .utils import logging +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/deberta/__init__.py b/src/transformers/models/deberta/__init__.py new file mode 100644 index 00000000000..f2b6cccbcf0 --- /dev/null +++ b/src/transformers/models/deberta/__init__.py @@ -0,0 +1,16 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_torch_available +from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig +from .tokenization_deberta import DebertaTokenizer + + +if is_torch_available(): + from .modeling_deberta import ( + DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, + DebertaForSequenceClassification, + DebertaModel, + DebertaPreTrainedModel, + ) diff --git a/src/transformers/configuration_deberta.py b/src/transformers/models/deberta/configuration_deberta.py similarity index 98% rename from src/transformers/configuration_deberta.py rename to src/transformers/models/deberta/configuration_deberta.py index 637dddd9877..25dd39cade8 100644 --- a/src/transformers/configuration_deberta.py +++ b/src/transformers/models/deberta/configuration_deberta.py @@ -14,8 +14,8 @@ # limitations under the License. """ DeBERTa model configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_deberta.py b/src/transformers/models/deberta/modeling_deberta.py similarity index 98% rename from src/transformers/modeling_deberta.py rename to src/transformers/models/deberta/modeling_deberta.py index 7e1b6837eb9..00ae44aa432 100644 --- a/src/transformers/modeling_deberta.py +++ b/src/transformers/models/deberta/modeling_deberta.py @@ -22,12 +22,12 @@ from packaging import version from torch import _softmax_backward_data, nn from torch.nn import CrossEntropyLoss -from .activations import ACT2FN +from ...activations import ACT2FN +from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward +from ...modeling_outputs import BaseModelOutput, SequenceClassifierOutput +from ...modeling_utils import PreTrainedModel +from ...utils import logging from .configuration_deberta import DebertaConfig -from .file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward -from .modeling_outputs import BaseModelOutput, SequenceClassifierOutput -from .modeling_utils import PreTrainedModel -from .utils import logging logger = logging.get_logger(__name__) @@ -74,7 +74,7 @@ class XSoftmax(torch.autograd.Function): Example:: import torch - from transformers.modeling_deroberta import XSoftmax + from transformers.models.deberta import XSoftmax # Make a tensor x = torch.randn([4,20,100]) # Create a mask @@ -278,7 +278,7 @@ class DebertaAttention(nn.Module): return attention_output -# Copied from transformers.modeling_bert.BertIntermediate with Bert->Deberta +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Deberta class DebertaIntermediate(nn.Module): def __init__(self, config): super().__init__() diff --git a/src/transformers/tokenization_deberta.py b/src/transformers/models/deberta/tokenization_deberta.py similarity index 99% rename from src/transformers/tokenization_deberta.py rename to src/transformers/models/deberta/tokenization_deberta.py index 057dd94a59e..4edba5fd599 100644 --- a/src/transformers/tokenization_deberta.py +++ b/src/transformers/models/deberta/tokenization_deberta.py @@ -26,8 +26,8 @@ import tqdm import requests -from .tokenization_utils import PreTrainedTokenizer -from .utils import logging +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging try: diff --git a/src/transformers/models/dialogpt/__init__.py b/src/transformers/models/dialogpt/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/transformers/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py similarity index 100% rename from src/transformers/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py rename to src/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py diff --git a/src/transformers/models/distilbert/__init__.py b/src/transformers/models/distilbert/__init__.py new file mode 100644 index 00000000000..722c7058b9b --- /dev/null +++ b/src/transformers/models/distilbert/__init__.py @@ -0,0 +1,36 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig +from .tokenization_distilbert import DistilBertTokenizer + + +if is_tokenizers_available(): + from .tokenization_distilbert_fast import DistilBertTokenizerFast + +if is_torch_available(): + from .modeling_distilbert import ( + DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + DistilBertForMaskedLM, + DistilBertForMultipleChoice, + DistilBertForQuestionAnswering, + DistilBertForSequenceClassification, + DistilBertForTokenClassification, + DistilBertModel, + DistilBertPreTrainedModel, + ) + +if is_tf_available(): + from .modeling_tf_distilbert import ( + TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFDistilBertForMaskedLM, + TFDistilBertForMultipleChoice, + TFDistilBertForQuestionAnswering, + TFDistilBertForSequenceClassification, + TFDistilBertForTokenClassification, + TFDistilBertMainLayer, + TFDistilBertModel, + TFDistilBertPreTrainedModel, + ) diff --git a/src/transformers/configuration_distilbert.py b/src/transformers/models/distilbert/configuration_distilbert.py similarity index 98% rename from src/transformers/configuration_distilbert.py rename to src/transformers/models/distilbert/configuration_distilbert.py index 269a9cbac65..df561b65169 100644 --- a/src/transformers/configuration_distilbert.py +++ b/src/transformers/models/distilbert/configuration_distilbert.py @@ -14,8 +14,8 @@ # limitations under the License. """ DistilBERT model configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_distilbert.py b/src/transformers/models/distilbert/modeling_distilbert.py similarity index 99% rename from src/transformers/modeling_distilbert.py rename to src/transformers/models/distilbert/modeling_distilbert.py index a75e3f32b48..2b2fdb5252c 100755 --- a/src/transformers/modeling_distilbert.py +++ b/src/transformers/models/distilbert/modeling_distilbert.py @@ -27,15 +27,14 @@ import torch import torch.nn as nn from torch.nn import CrossEntropyLoss -from .activations import gelu -from .configuration_distilbert import DistilBertConfig -from .file_utils import ( +from ...activations import gelu +from ...file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_outputs import ( +from ...modeling_outputs import ( BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, @@ -43,13 +42,14 @@ from .modeling_outputs import ( SequenceClassifierOutput, TokenClassifierOutput, ) -from .modeling_utils import ( +from ...modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) -from .utils import logging +from ...utils import logging +from .configuration_distilbert import DistilBertConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_distilbert.py b/src/transformers/models/distilbert/modeling_tf_distilbert.py similarity index 99% rename from src/transformers/modeling_tf_distilbert.py rename to src/transformers/models/distilbert/modeling_tf_distilbert.py index 4239c38116a..ca104a47967 100644 --- a/src/transformers/modeling_tf_distilbert.py +++ b/src/transformers/models/distilbert/modeling_tf_distilbert.py @@ -19,15 +19,14 @@ import tensorflow as tf -from .activations_tf import get_tf_activation -from .configuration_distilbert import DistilBertConfig -from .file_utils import ( +from ...activations_tf import get_tf_activation +from ...file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, ) -from .modeling_tf_outputs import ( +from ...modeling_tf_outputs import ( TFBaseModelOutput, TFMaskedLMOutput, TFMultipleChoiceModelOutput, @@ -35,7 +34,7 @@ from .modeling_tf_outputs import ( TFSequenceClassifierOutput, TFTokenClassifierOutput, ) -from .modeling_tf_utils import ( +from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFMultipleChoiceLoss, TFPreTrainedModel, @@ -47,8 +46,9 @@ from .modeling_tf_utils import ( keras_serializable, shape_list, ) -from .tokenization_utils import BatchEncoding -from .utils import logging +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from .configuration_distilbert import DistilBertConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_distilbert.py b/src/transformers/models/distilbert/tokenization_distilbert.py similarity index 97% rename from src/transformers/tokenization_distilbert.py rename to src/transformers/models/distilbert/tokenization_distilbert.py index 8f4109ed763..5c08b4e93eb 100644 --- a/src/transformers/tokenization_distilbert.py +++ b/src/transformers/models/distilbert/tokenization_distilbert.py @@ -14,8 +14,8 @@ # limitations under the License. """Tokenization classes for DistilBERT.""" -from .tokenization_bert import BertTokenizer -from .utils import logging +from ...utils import logging +from ..bert.tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_distilbert_fast.py b/src/transformers/models/distilbert/tokenization_distilbert_fast.py similarity index 97% rename from src/transformers/tokenization_distilbert_fast.py rename to src/transformers/models/distilbert/tokenization_distilbert_fast.py index 7129bc3eec6..a0e40ca1f7b 100644 --- a/src/transformers/tokenization_distilbert_fast.py +++ b/src/transformers/models/distilbert/tokenization_distilbert_fast.py @@ -14,9 +14,9 @@ # limitations under the License. """Tokenization classes for DistilBERT.""" -from .tokenization_bert_fast import BertTokenizerFast +from ...utils import logging +from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_distilbert import DistilBertTokenizer -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/dpr/__init__.py b/src/transformers/models/dpr/__init__.py new file mode 100644 index 00000000000..f48e7d99605 --- /dev/null +++ b/src/transformers/models/dpr/__init__.py @@ -0,0 +1,46 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_dpr import DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DPRConfig +from .tokenization_dpr import ( + DPRContextEncoderTokenizer, + DPRQuestionEncoderTokenizer, + DPRReaderOutput, + DPRReaderTokenizer, +) + + +if is_tokenizers_available(): + from .tokenization_dpr_fast import ( + DPRContextEncoderTokenizerFast, + DPRQuestionEncoderTokenizerFast, + DPRReaderTokenizerFast, + ) + +if is_torch_available(): + from .modeling_dpr import ( + DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, + DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, + DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, + DPRContextEncoder, + DPRPretrainedContextEncoder, + DPRPretrainedQuestionEncoder, + DPRPretrainedReader, + DPRQuestionEncoder, + DPRReader, + ) + +if is_tf_available(): + from .modeling_tf_dpr import ( + TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, + TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, + TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, + TFDPRContextEncoder, + TFDPRPretrainedContextEncoder, + TFDPRPretrainedQuestionEncoder, + TFDPRPretrainedReader, + TFDPRQuestionEncoder, + TFDPRReader, + ) diff --git a/src/transformers/configuration_dpr.py b/src/transformers/models/dpr/configuration_dpr.py similarity index 98% rename from src/transformers/configuration_dpr.py rename to src/transformers/models/dpr/configuration_dpr.py index 3ea59679471..efc9d5e1f47 100644 --- a/src/transformers/configuration_dpr.py +++ b/src/transformers/models/dpr/configuration_dpr.py @@ -14,8 +14,8 @@ # limitations under the License. """ DPR model configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_dpr_original_checkpoint_to_pytorch.py b/src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py similarity index 100% rename from src/transformers/convert_dpr_original_checkpoint_to_pytorch.py rename to src/transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py diff --git a/src/transformers/modeling_dpr.py b/src/transformers/models/dpr/modeling_dpr.py similarity index 99% rename from src/transformers/modeling_dpr.py rename to src/transformers/models/dpr/modeling_dpr.py index b93908b9333..5d5763137bb 100644 --- a/src/transformers/modeling_dpr.py +++ b/src/transformers/models/dpr/modeling_dpr.py @@ -21,17 +21,17 @@ from typing import Optional, Tuple, Union import torch from torch import Tensor, nn -from .configuration_dpr import DPRConfig -from .file_utils import ( +from ...file_utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_bert import BertModel -from .modeling_outputs import BaseModelOutputWithPooling -from .modeling_utils import PreTrainedModel -from .utils import logging +from ...modeling_outputs import BaseModelOutputWithPooling +from ...modeling_utils import PreTrainedModel +from ...utils import logging +from ..bert.modeling_bert import BertModel +from .configuration_dpr import DPRConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_dpr.py b/src/transformers/models/dpr/modeling_tf_dpr.py similarity index 99% rename from src/transformers/modeling_tf_dpr.py rename to src/transformers/models/dpr/modeling_tf_dpr.py index 45f29ded13b..598321fc0a3 100644 --- a/src/transformers/modeling_tf_dpr.py +++ b/src/transformers/models/dpr/modeling_tf_dpr.py @@ -22,18 +22,18 @@ import tensorflow as tf from tensorflow import Tensor from tensorflow.keras.layers import Dense -from .configuration_dpr import DPRConfig -from .file_utils import ( +from ...file_utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_tf_bert import TFBertMainLayer -from .modeling_tf_outputs import TFBaseModelOutputWithPooling -from .modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list -from .tokenization_utils import BatchEncoding -from .utils import logging +from ...modeling_tf_outputs import TFBaseModelOutputWithPooling +from ...modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from ..bert.modeling_tf_bert import TFBertMainLayer +from .configuration_dpr import DPRConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_dpr.py b/src/transformers/models/dpr/tokenization_dpr.py similarity index 98% rename from src/transformers/tokenization_dpr.py rename to src/transformers/models/dpr/tokenization_dpr.py index b48349c168c..7fa6c96233f 100644 --- a/src/transformers/tokenization_dpr.py +++ b/src/transformers/models/dpr/tokenization_dpr.py @@ -18,10 +18,10 @@ import collections from typing import List, Optional, Union -from .file_utils import add_end_docstrings, add_start_docstrings -from .tokenization_bert import BertTokenizer -from .tokenization_utils_base import BatchEncoding, TensorType -from .utils import logging +from ...file_utils import add_end_docstrings, add_start_docstrings +from ...tokenization_utils_base import BatchEncoding, TensorType +from ...utils import logging +from ..bert.tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_dpr_fast.py b/src/transformers/models/dpr/tokenization_dpr_fast.py similarity index 98% rename from src/transformers/tokenization_dpr_fast.py rename to src/transformers/models/dpr/tokenization_dpr_fast.py index 7d5f052051f..d3364433cbe 100644 --- a/src/transformers/tokenization_dpr_fast.py +++ b/src/transformers/models/dpr/tokenization_dpr_fast.py @@ -18,11 +18,11 @@ import collections from typing import List, Optional, Union -from .file_utils import add_end_docstrings, add_start_docstrings -from .tokenization_bert_fast import BertTokenizerFast +from ...file_utils import add_end_docstrings, add_start_docstrings +from ...tokenization_utils_base import BatchEncoding, TensorType +from ...utils import logging +from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer -from .tokenization_utils_base import BatchEncoding, TensorType -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/electra/__init__.py b/src/transformers/models/electra/__init__.py new file mode 100644 index 00000000000..bf54f0b9dc7 --- /dev/null +++ b/src/transformers/models/electra/__init__.py @@ -0,0 +1,38 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig +from .tokenization_electra import ElectraTokenizer + + +if is_tokenizers_available(): + from .tokenization_electra_fast import ElectraTokenizerFast + +if is_torch_available(): + from .modeling_electra import ( + ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, + ElectraForMaskedLM, + ElectraForMultipleChoice, + ElectraForPreTraining, + ElectraForQuestionAnswering, + ElectraForSequenceClassification, + ElectraForTokenClassification, + ElectraModel, + ElectraPreTrainedModel, + load_tf_weights_in_electra, + ) + +if is_tf_available(): + from .modeling_tf_electra import ( + TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, + TFElectraForMaskedLM, + TFElectraForMultipleChoice, + TFElectraForPreTraining, + TFElectraForQuestionAnswering, + TFElectraForSequenceClassification, + TFElectraForTokenClassification, + TFElectraModel, + TFElectraPreTrainedModel, + ) diff --git a/src/transformers/configuration_electra.py b/src/transformers/models/electra/configuration_electra.py similarity index 99% rename from src/transformers/configuration_electra.py rename to src/transformers/models/electra/configuration_electra.py index 439d80ca674..c26d055bee6 100644 --- a/src/transformers/configuration_electra.py +++ b/src/transformers/models/electra/configuration_electra.py @@ -15,8 +15,8 @@ # limitations under the License. """ ELECTRA model configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_electra_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py similarity index 100% rename from src/transformers/convert_electra_original_tf_checkpoint_to_pytorch.py rename to src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py diff --git a/src/transformers/modeling_electra.py b/src/transformers/models/electra/modeling_electra.py similarity index 98% rename from src/transformers/modeling_electra.py rename to src/transformers/models/electra/modeling_electra.py index 2dca8b215b9..00dfa76dc11 100644 --- a/src/transformers/modeling_electra.py +++ b/src/transformers/models/electra/modeling_electra.py @@ -24,16 +24,15 @@ import torch import torch.nn as nn from torch.nn import CrossEntropyLoss, MSELoss -from .activations import ACT2FN, get_activation -from .configuration_electra import ElectraConfig -from .file_utils import ( +from ...activations import ACT2FN, get_activation +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_outputs import ( +from ...modeling_outputs import ( BaseModelOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, @@ -41,14 +40,15 @@ from .modeling_outputs import ( SequenceClassifierOutput, TokenClassifierOutput, ) -from .modeling_utils import ( +from ...modeling_utils import ( PreTrainedModel, SequenceSummary, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) -from .utils import logging +from ...utils import logging +from .configuration_electra import ElectraConfig logger = logging.get_logger(__name__) @@ -167,7 +167,7 @@ class ElectraEmbeddings(nn.Module): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - # Copied from transformers.modeling_bert.BertEmbeddings.forward + # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() @@ -193,7 +193,7 @@ class ElectraEmbeddings(nn.Module): return embeddings -# Copied from transformers.modeling_bert.BertSelfAttention with Bert->Electra +# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Electra class ElectraSelfAttention(nn.Module): def __init__(self, config): super().__init__() @@ -272,7 +272,7 @@ class ElectraSelfAttention(nn.Module): return outputs -# Copied from transformers.modeling_bert.BertSelfOutput +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput class ElectraSelfOutput(nn.Module): def __init__(self, config): super().__init__() @@ -287,7 +287,7 @@ class ElectraSelfOutput(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertAttention with Bert->Electra +# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Electra class ElectraAttention(nn.Module): def __init__(self, config): super().__init__() @@ -335,7 +335,7 @@ class ElectraAttention(nn.Module): return outputs -# Copied from transformers.modeling_bert.BertIntermediate +# Copied from transformers.models.bert.modeling_bert.BertIntermediate class ElectraIntermediate(nn.Module): def __init__(self, config): super().__init__() @@ -351,7 +351,7 @@ class ElectraIntermediate(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertOutput +# Copied from transformers.models.bert.modeling_bert.BertOutput class ElectraOutput(nn.Module): def __init__(self, config): super().__init__() @@ -366,7 +366,7 @@ class ElectraOutput(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertLayer with Bert->Electra +# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Electra class ElectraLayer(nn.Module): def __init__(self, config): super().__init__() @@ -426,7 +426,7 @@ class ElectraLayer(nn.Module): return layer_output -# Copied from transformers.modeling_bert.BertEncoder with Bert->Electra +# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Electra class ElectraEncoder(nn.Module): def __init__(self, config): super().__init__() @@ -548,7 +548,7 @@ class ElectraPreTrainedModel(PreTrainedModel): authorized_missing_keys = [r"position_ids"] authorized_unexpected_keys = [r"electra\.embeddings_project\.weight", r"electra\.embeddings_project\.bias"] - # Copied from transformers.modeling_bert.BertPreTrainedModel._init_weights + # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): diff --git a/src/transformers/modeling_tf_electra.py b/src/transformers/models/electra/modeling_tf_electra.py similarity index 98% rename from src/transformers/modeling_tf_electra.py rename to src/transformers/models/electra/modeling_tf_electra.py index 4fc34923784..aff9d735c0a 100644 --- a/src/transformers/modeling_tf_electra.py +++ b/src/transformers/models/electra/modeling_tf_electra.py @@ -4,9 +4,8 @@ from typing import Optional, Tuple import tensorflow as tf -from .activations_tf import get_tf_activation -from .configuration_electra import ElectraConfig -from .file_utils import ( +from ...activations_tf import get_tf_activation +from ...file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, @@ -14,7 +13,7 @@ from .file_utils import ( add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_tf_outputs import ( +from ...modeling_tf_outputs import ( TFBaseModelOutput, TFMaskedLMOutput, TFMultipleChoiceModelOutput, @@ -22,7 +21,7 @@ from .modeling_tf_outputs import ( TFSequenceClassifierOutput, TFTokenClassifierOutput, ) -from .modeling_tf_utils import ( +from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFMultipleChoiceLoss, TFPreTrainedModel, @@ -34,8 +33,9 @@ from .modeling_tf_utils import ( keras_serializable, shape_list, ) -from .tokenization_utils import BatchEncoding -from .utils import logging +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from .configuration_electra import ElectraConfig logger = logging.get_logger(__name__) @@ -54,7 +54,7 @@ TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = [ ] -# Copied from transformers.modeling_tf_bert.TFBertSelfAttention +# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention class TFElectraSelfAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -126,7 +126,7 @@ class TFElectraSelfAttention(tf.keras.layers.Layer): return outputs -# Copied from transformers.modeling_tf_bert.TFBertSelfOutput +# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput class TFElectraSelfOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -145,7 +145,7 @@ class TFElectraSelfOutput(tf.keras.layers.Layer): return hidden_states -# Copied from from transformers.modeling_tf_bert.TFBertAttention with Bert->Electra +# Copied from from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Electra class TFElectraAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -166,7 +166,7 @@ class TFElectraAttention(tf.keras.layers.Layer): return outputs -# Copied from transformers.modeling_tf_bert.TFBertIntermediate +# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate class TFElectraIntermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -187,7 +187,7 @@ class TFElectraIntermediate(tf.keras.layers.Layer): return hidden_states -# Copied from transformers.modeling_tf_bert.TFBertOutput +# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput class TFElectraOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -206,7 +206,7 @@ class TFElectraOutput(tf.keras.layers.Layer): return hidden_states -# Copied from transformers.modeling_tf_bert.TFBertLayer with Bert->Electra +# Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Electra class TFElectraLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -227,7 +227,7 @@ class TFElectraLayer(tf.keras.layers.Layer): return outputs -# Copied from transformers.modeling_tf_bert.TFBertEncoder with Bert->Electra +# Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Electra class TFElectraEncoder(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -271,7 +271,7 @@ class TFElectraEncoder(tf.keras.layers.Layer): ) -# Copied from transformers.modeling_tf_bert.TFBertPooler +# Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler class TFElectraPooler(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -332,7 +332,7 @@ class TFElectraEmbeddings(tf.keras.layers.Layer): super().build(input_shape) - # Copied from transformers.modeling_tf_bert.TFBertEmbeddings.call + # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call def call( self, input_ids=None, @@ -367,7 +367,7 @@ class TFElectraEmbeddings(tf.keras.layers.Layer): else: raise ValueError("mode {} is not valid.".format(mode)) - # Copied from transformers.modeling_tf_bert.TFBertEmbeddings._embedding + # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings._embedding def _embedding(self, input_ids, position_ids, token_type_ids, inputs_embeds, training=False): """Applies embedding based on inputs tensor.""" assert not (input_ids is None and inputs_embeds is None) diff --git a/src/transformers/tokenization_electra.py b/src/transformers/models/electra/tokenization_electra.py similarity index 98% rename from src/transformers/tokenization_electra.py rename to src/transformers/models/electra/tokenization_electra.py index 9b0e394bf18..89c6c922e99 100644 --- a/src/transformers/tokenization_electra.py +++ b/src/transformers/models/electra/tokenization_electra.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .tokenization_bert import BertTokenizer +from ..bert.tokenization_bert import BertTokenizer VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} diff --git a/src/transformers/tokenization_electra_fast.py b/src/transformers/models/electra/tokenization_electra_fast.py similarity index 98% rename from src/transformers/tokenization_electra_fast.py rename to src/transformers/models/electra/tokenization_electra_fast.py index 470b936d720..67259d83eae 100644 --- a/src/transformers/tokenization_electra_fast.py +++ b/src/transformers/models/electra/tokenization_electra_fast.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .tokenization_bert_fast import BertTokenizerFast +from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_electra import ElectraTokenizer diff --git a/src/transformers/models/encoder_decoder/__init__.py b/src/transformers/models/encoder_decoder/__init__.py new file mode 100644 index 00000000000..daebae1d97e --- /dev/null +++ b/src/transformers/models/encoder_decoder/__init__.py @@ -0,0 +1,10 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_torch_available +from .configuration_encoder_decoder import EncoderDecoderConfig + + +if is_torch_available(): + from .modeling_encoder_decoder import EncoderDecoderModel diff --git a/src/transformers/configuration_encoder_decoder.py b/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py similarity index 97% rename from src/transformers/configuration_encoder_decoder.py rename to src/transformers/models/encoder_decoder/configuration_encoder_decoder.py index 5b73be7b9c2..b12e32a2c32 100644 --- a/src/transformers/configuration_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/configuration_encoder_decoder.py @@ -16,8 +16,8 @@ import copy -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) @@ -81,7 +81,7 @@ class EncoderDecoderConfig(PretrainedConfig): decoder_config = kwargs.pop("decoder") decoder_model_type = decoder_config.pop("model_type") - from .configuration_auto import AutoConfig + from ..auto.configuration_auto import AutoConfig self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config) self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config) diff --git a/src/transformers/modeling_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py similarity index 97% rename from src/transformers/modeling_encoder_decoder.py rename to src/transformers/models/encoder_decoder/modeling_encoder_decoder.py index 81d6c64661f..c16514ac6cc 100644 --- a/src/transformers/modeling_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py @@ -17,12 +17,12 @@ from typing import Optional +from ...configuration_utils import PretrainedConfig +from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings +from ...modeling_outputs import Seq2SeqLMOutput +from ...modeling_utils import PreTrainedModel +from ...utils import logging from .configuration_encoder_decoder import EncoderDecoderConfig -from .configuration_utils import PretrainedConfig -from .file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings -from .modeling_outputs import Seq2SeqLMOutput -from .modeling_utils import PreTrainedModel -from .utils import logging logger = logging.get_logger(__name__) @@ -155,12 +155,12 @@ class EncoderDecoderModel(PreTrainedModel): super().__init__(config) if encoder is None: - from .modeling_auto import AutoModel + from ..auto.modeling_auto import AutoModel encoder = AutoModel.from_config(config.encoder) if decoder is None: - from .modeling_auto import AutoModelForCausalLM + from ..auto.modeling_auto import AutoModelForCausalLM decoder = AutoModelForCausalLM.from_config(config.decoder) @@ -286,10 +286,10 @@ class EncoderDecoderModel(PreTrainedModel): assert ( encoder_pretrained_model_name_or_path is not None ), "If `model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has to be defined" - from .modeling_auto import AutoModel + from ..auto.modeling_auto import AutoModel if "config" not in kwargs_encoder: - from .configuration_auto import AutoConfig + from ..auto.configuration_auto import AutoConfig encoder_config = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path) if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True: @@ -309,10 +309,10 @@ class EncoderDecoderModel(PreTrainedModel): assert ( decoder_pretrained_model_name_or_path is not None ), "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has to be defined" - from .modeling_auto import AutoModelForCausalLM + from ..auto.modeling_auto import AutoModelForCausalLM if "config" not in kwargs_decoder: - from .configuration_auto import AutoConfig + from ..auto.configuration_auto import AutoConfig decoder_config = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path) if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False: diff --git a/src/transformers/models/flaubert/__init__.py b/src/transformers/models/flaubert/__init__.py new file mode 100644 index 00000000000..8c1b5abebf0 --- /dev/null +++ b/src/transformers/models/flaubert/__init__.py @@ -0,0 +1,31 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_torch_available +from .configuration_flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig +from .tokenization_flaubert import FlaubertTokenizer + + +if is_torch_available(): + from .modeling_flaubert import ( + FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + FlaubertForMultipleChoice, + FlaubertForQuestionAnswering, + FlaubertForQuestionAnsweringSimple, + FlaubertForSequenceClassification, + FlaubertForTokenClassification, + FlaubertModel, + FlaubertWithLMHeadModel, + ) + +if is_tf_available(): + from .modeling_tf_flaubert import ( + TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFFlaubertForMultipleChoice, + TFFlaubertForQuestionAnsweringSimple, + TFFlaubertForSequenceClassification, + TFFlaubertForTokenClassification, + TFFlaubertModel, + TFFlaubertWithLMHeadModel, + ) diff --git a/src/transformers/configuration_flaubert.py b/src/transformers/models/flaubert/configuration_flaubert.py similarity index 99% rename from src/transformers/configuration_flaubert.py rename to src/transformers/models/flaubert/configuration_flaubert.py index 508543ecbef..436e1a8871d 100644 --- a/src/transformers/configuration_flaubert.py +++ b/src/transformers/models/flaubert/configuration_flaubert.py @@ -14,8 +14,8 @@ # limitations under the License. """ Flaubert configuration, based on XLM. """ -from .configuration_xlm import XLMConfig -from .utils import logging +from ...utils import logging +from ..xlm.configuration_xlm import XLMConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_flaubert.py b/src/transformers/models/flaubert/modeling_flaubert.py similarity index 98% rename from src/transformers/modeling_flaubert.py rename to src/transformers/models/flaubert/modeling_flaubert.py index 4b90bbc2314..6168d7d229b 100644 --- a/src/transformers/modeling_flaubert.py +++ b/src/transformers/models/flaubert/modeling_flaubert.py @@ -20,10 +20,10 @@ import random import torch from torch.nn import functional as F -from .configuration_flaubert import FlaubertConfig -from .file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward -from .modeling_outputs import BaseModelOutput -from .modeling_xlm import ( +from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward +from ...modeling_outputs import BaseModelOutput +from ...utils import logging +from ..xlm.modeling_xlm import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, @@ -33,7 +33,7 @@ from .modeling_xlm import ( XLMWithLMHeadModel, get_masks, ) -from .utils import logging +from .configuration_flaubert import FlaubertConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_flaubert.py b/src/transformers/models/flaubert/modeling_tf_flaubert.py similarity index 98% rename from src/transformers/modeling_tf_flaubert.py rename to src/transformers/models/flaubert/modeling_tf_flaubert.py index 59604249ba1..61799337854 100644 --- a/src/transformers/modeling_tf_flaubert.py +++ b/src/transformers/models/flaubert/modeling_tf_flaubert.py @@ -24,23 +24,23 @@ import tensorflow as tf from transformers.activations_tf import get_tf_activation -from .configuration_flaubert import FlaubertConfig -from .file_utils import ( +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, ) -from .modeling_tf_outputs import TFBaseModelOutput -from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, get_initializer, keras_serializable, shape_list -from .modeling_tf_xlm import ( +from ...modeling_tf_outputs import TFBaseModelOutput +from ...modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, get_initializer, keras_serializable, shape_list +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from ..xlm.modeling_tf_xlm import ( TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, ) -from .tokenization_utils import BatchEncoding -from .utils import logging +from .configuration_flaubert import FlaubertConfig logger = logging.get_logger(__name__) @@ -234,7 +234,7 @@ class TFFlaubertModel(TFFlaubertPreTrainedModel): return outputs -# Copied from transformers.modeling_tf_xlm.TFXLMMultiHeadAttention with XLM->Flaubert +# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMMultiHeadAttention with XLM->Flaubert class TFFlaubertMultiHeadAttention(tf.keras.layers.Layer): NEW_ID = itertools.count() @@ -328,7 +328,7 @@ class TFFlaubertMultiHeadAttention(tf.keras.layers.Layer): return outputs -# Copied from transformers.modeling_tf_xlm.TFXLMTransformerFFN +# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMTransformerFFN class TFFlaubertTransformerFFN(tf.keras.layers.Layer): def __init__(self, in_dim, dim_hidden, out_dim, config, **kwargs): super().__init__(**kwargs) @@ -632,7 +632,7 @@ class TFFlaubertMainLayer(tf.keras.layers.Layer): return TFBaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions) -# Copied from transformers.modeling_tf_xlm.TFXLMPredLayer +# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMPredLayer class TFFlaubertPredLayer(tf.keras.layers.Layer): """ Prediction layer (cross_entropy or adaptive_softmax). diff --git a/src/transformers/tokenization_flaubert.py b/src/transformers/models/flaubert/tokenization_flaubert.py similarity index 98% rename from src/transformers/tokenization_flaubert.py rename to src/transformers/models/flaubert/tokenization_flaubert.py index b81793ecfce..96dc7ad2829 100644 --- a/src/transformers/tokenization_flaubert.py +++ b/src/transformers/models/flaubert/tokenization_flaubert.py @@ -19,8 +19,8 @@ import unicodedata import six -from .tokenization_xlm import XLMTokenizer -from .utils import logging +from ...utils import logging +from ..xlm.tokenization_xlm import XLMTokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/models/fsmt/__init__.py b/src/transformers/models/fsmt/__init__.py new file mode 100644 index 00000000000..b839dc0e2a6 --- /dev/null +++ b/src/transformers/models/fsmt/__init__.py @@ -0,0 +1,11 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_torch_available +from .configuration_fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig +from .tokenization_fsmt import FSMTTokenizer + + +if is_torch_available(): + from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel diff --git a/src/transformers/configuration_fsmt.py b/src/transformers/models/fsmt/configuration_fsmt.py similarity index 99% rename from src/transformers/configuration_fsmt.py rename to src/transformers/models/fsmt/configuration_fsmt.py index 16a68b514d5..c17e9b38e0f 100644 --- a/src/transformers/configuration_fsmt.py +++ b/src/transformers/models/fsmt/configuration_fsmt.py @@ -17,8 +17,8 @@ import copy -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py similarity index 98% rename from src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py rename to src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py index 39da7c894ae..e27650d7dd0 100755 --- a/src/transformers/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py @@ -32,9 +32,7 @@ from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import WEIGHTS_NAME, logging -from transformers.configuration_fsmt import FSMTConfig -from transformers.modeling_fsmt import FSMTForConditionalGeneration -from transformers.tokenization_fsmt import VOCAB_FILES_NAMES +from transformers.models.fsmt import VOCAB_FILES_NAMES, FSMTConfig, FSMTForConditionalGeneration from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE diff --git a/src/transformers/modeling_fsmt.py b/src/transformers/models/fsmt/modeling_fsmt.py similarity index 99% rename from src/transformers/modeling_fsmt.py rename to src/transformers/models/fsmt/modeling_fsmt.py index 471181dd4fc..b4b42ebff23 100644 --- a/src/transformers/modeling_fsmt.py +++ b/src/transformers/models/fsmt/modeling_fsmt.py @@ -37,23 +37,23 @@ import torch.nn.functional as F from torch import Tensor, nn from torch.nn import CrossEntropyLoss -from .activations import ACT2FN -from .configuration_fsmt import FSMTConfig -from .file_utils import ( +from ...activations import ACT2FN +from ...file_utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_outputs import ( +from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) -from .modeling_utils import PreTrainedModel -from .utils import logging +from ...modeling_utils import PreTrainedModel +from ...utils import logging +from .configuration_fsmt import FSMTConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_fsmt.py b/src/transformers/models/fsmt/tokenization_fsmt.py similarity index 98% rename from src/transformers/tokenization_fsmt.py rename to src/transformers/models/fsmt/tokenization_fsmt.py index fae7a7a562b..083906bf303 100644 --- a/src/transformers/tokenization_fsmt.py +++ b/src/transformers/models/fsmt/tokenization_fsmt.py @@ -23,10 +23,10 @@ from typing import Dict, List, Optional, Tuple import sacremoses as sm -from .file_utils import add_start_docstrings -from .tokenization_utils import BatchEncoding, PreTrainedTokenizer -from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING -from .utils import logging +from ...file_utils import add_start_docstrings +from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer +from ...tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/funnel/__init__.py b/src/transformers/models/funnel/__init__.py new file mode 100644 index 00000000000..7f528f2ac1c --- /dev/null +++ b/src/transformers/models/funnel/__init__.py @@ -0,0 +1,38 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig +from .tokenization_funnel import FunnelTokenizer + + +if is_tokenizers_available(): + from .tokenization_funnel_fast import FunnelTokenizerFast + +if is_torch_available(): + from .modeling_funnel import ( + FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, + FunnelBaseModel, + FunnelForMaskedLM, + FunnelForMultipleChoice, + FunnelForPreTraining, + FunnelForQuestionAnswering, + FunnelForSequenceClassification, + FunnelForTokenClassification, + FunnelModel, + load_tf_weights_in_funnel, + ) + +if is_tf_available(): + from .modeling_tf_funnel import ( + TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, + TFFunnelBaseModel, + TFFunnelForMaskedLM, + TFFunnelForMultipleChoice, + TFFunnelForPreTraining, + TFFunnelForQuestionAnswering, + TFFunnelForSequenceClassification, + TFFunnelForTokenClassification, + TFFunnelModel, + ) diff --git a/src/transformers/configuration_funnel.py b/src/transformers/models/funnel/configuration_funnel.py similarity index 99% rename from src/transformers/configuration_funnel.py rename to src/transformers/models/funnel/configuration_funnel.py index 6a463240adf..aeb836e9e9c 100644 --- a/src/transformers/configuration_funnel.py +++ b/src/transformers/models/funnel/configuration_funnel.py @@ -14,8 +14,8 @@ # limitations under the License. """ Funnel Transformer model configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_funnel_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py similarity index 100% rename from src/transformers/convert_funnel_original_tf_checkpoint_to_pytorch.py rename to src/transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py diff --git a/src/transformers/modeling_funnel.py b/src/transformers/models/funnel/modeling_funnel.py similarity index 99% rename from src/transformers/modeling_funnel.py rename to src/transformers/models/funnel/modeling_funnel.py index 867f0e1cb85..cfd8dada01e 100644 --- a/src/transformers/modeling_funnel.py +++ b/src/transformers/models/funnel/modeling_funnel.py @@ -24,16 +24,15 @@ from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from torch.nn import functional as F -from .activations import ACT2FN -from .configuration_funnel import FunnelConfig -from .file_utils import ( +from ...activations import ACT2FN +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_outputs import ( +from ...modeling_outputs import ( BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, @@ -41,8 +40,9 @@ from .modeling_outputs import ( SequenceClassifierOutput, TokenClassifierOutput, ) -from .modeling_utils import PreTrainedModel -from .utils import logging +from ...modeling_utils import PreTrainedModel +from ...utils import logging +from .configuration_funnel import FunnelConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_funnel.py b/src/transformers/models/funnel/modeling_tf_funnel.py similarity index 99% rename from src/transformers/modeling_tf_funnel.py rename to src/transformers/models/funnel/modeling_tf_funnel.py index 1b5fa323b0a..8114bf36113 100644 --- a/src/transformers/modeling_tf_funnel.py +++ b/src/transformers/models/funnel/modeling_tf_funnel.py @@ -20,9 +20,8 @@ from typing import Optional, Tuple import tensorflow as tf -from .activations_tf import get_tf_activation -from .configuration_funnel import FunnelConfig -from .file_utils import ( +from ...activations_tf import get_tf_activation +from ...file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, @@ -30,7 +29,7 @@ from .file_utils import ( add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_tf_outputs import ( +from ...modeling_tf_outputs import ( TFBaseModelOutput, TFMaskedLMOutput, TFMultipleChoiceModelOutput, @@ -38,7 +37,7 @@ from .modeling_tf_outputs import ( TFSequenceClassifierOutput, TFTokenClassifierOutput, ) -from .modeling_tf_utils import ( +from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFMultipleChoiceLoss, TFPreTrainedModel, @@ -49,8 +48,9 @@ from .modeling_tf_utils import ( keras_serializable, shape_list, ) -from .tokenization_utils import BatchEncoding -from .utils import logging +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from .configuration_funnel import FunnelConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_funnel.py b/src/transformers/models/funnel/tokenization_funnel.py similarity index 98% rename from src/transformers/tokenization_funnel.py rename to src/transformers/models/funnel/tokenization_funnel.py index 68df5744e98..8a2f00d8479 100644 --- a/src/transformers/tokenization_funnel.py +++ b/src/transformers/models/funnel/tokenization_funnel.py @@ -16,8 +16,8 @@ from typing import List, Optional -from .tokenization_bert import BertTokenizer -from .utils import logging +from ...utils import logging +from ..bert.tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_funnel_fast.py b/src/transformers/models/funnel/tokenization_funnel_fast.py similarity index 98% rename from src/transformers/tokenization_funnel_fast.py rename to src/transformers/models/funnel/tokenization_funnel_fast.py index bc24846c66d..2fda812f5e0 100644 --- a/src/transformers/tokenization_funnel_fast.py +++ b/src/transformers/models/funnel/tokenization_funnel_fast.py @@ -16,9 +16,9 @@ from typing import List, Optional -from .tokenization_bert_fast import BertTokenizerFast +from ...utils import logging +from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_funnel import FunnelTokenizer -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/gpt2/__init__.py b/src/transformers/models/gpt2/__init__.py new file mode 100644 index 00000000000..8cdd95d69e0 --- /dev/null +++ b/src/transformers/models/gpt2/__init__.py @@ -0,0 +1,32 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config +from .tokenization_gpt2 import GPT2Tokenizer + + +if is_tokenizers_available(): + from .tokenization_gpt2_fast import GPT2TokenizerFast + +if is_torch_available(): + from .modeling_gpt2 import ( + GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, + GPT2DoubleHeadsModel, + GPT2ForSequenceClassification, + GPT2LMHeadModel, + GPT2Model, + GPT2PreTrainedModel, + load_tf_weights_in_gpt2, + ) + +if is_tf_available(): + from .modeling_tf_gpt2 import ( + TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, + TFGPT2DoubleHeadsModel, + TFGPT2LMHeadModel, + TFGPT2MainLayer, + TFGPT2Model, + TFGPT2PreTrainedModel, + ) diff --git a/src/transformers/configuration_gpt2.py b/src/transformers/models/gpt2/configuration_gpt2.py similarity index 99% rename from src/transformers/configuration_gpt2.py rename to src/transformers/models/gpt2/configuration_gpt2.py index 9264263c7c0..25cdcb49f21 100644 --- a/src/transformers/configuration_gpt2.py +++ b/src/transformers/models/gpt2/configuration_gpt2.py @@ -15,8 +15,8 @@ # limitations under the License. """ OpenAI GPT-2 configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_gpt2_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py similarity index 100% rename from src/transformers/convert_gpt2_original_tf_checkpoint_to_pytorch.py rename to src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py diff --git a/src/transformers/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py similarity index 99% rename from src/transformers/modeling_gpt2.py rename to src/transformers/models/gpt2/modeling_gpt2.py index e8cc8dcf3a7..e4bb41ae6ab 100644 --- a/src/transformers/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -24,28 +24,28 @@ import torch import torch.nn as nn from torch.nn import CrossEntropyLoss, MSELoss -from .activations import ACT2FN -from .configuration_gpt2 import GPT2Config -from .file_utils import ( +from ...activations import ACT2FN +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_outputs import ( +from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithPastAndCrossAttentions, SequenceClassifierOutputWithPast, ) -from .modeling_utils import ( +from ...modeling_utils import ( Conv1D, PreTrainedModel, SequenceSummary, find_pruneable_heads_and_indices, prune_conv1d_layer, ) -from .utils import logging +from ...utils import logging +from .configuration_gpt2 import GPT2Config logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_gpt2.py b/src/transformers/models/gpt2/modeling_tf_gpt2.py similarity index 99% rename from src/transformers/modeling_tf_gpt2.py rename to src/transformers/models/gpt2/modeling_tf_gpt2.py index 705fd4025b7..7b7b74b8593 100644 --- a/src/transformers/modeling_tf_gpt2.py +++ b/src/transformers/models/gpt2/modeling_tf_gpt2.py @@ -21,17 +21,16 @@ from typing import List, Optional, Tuple import tensorflow as tf -from .activations_tf import get_tf_activation -from .configuration_gpt2 import GPT2Config -from .file_utils import ( +from ...activations_tf import get_tf_activation +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast -from .modeling_tf_utils import ( +from ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast +from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFConv1D, TFPreTrainedModel, @@ -41,8 +40,9 @@ from .modeling_tf_utils import ( keras_serializable, shape_list, ) -from .tokenization_utils import BatchEncoding -from .utils import logging +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from .configuration_gpt2 import GPT2Config logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_gpt2.py b/src/transformers/models/gpt2/tokenization_gpt2.py similarity index 99% rename from src/transformers/tokenization_gpt2.py rename to src/transformers/models/gpt2/tokenization_gpt2.py index f4a7e54912a..6e9711c8580 100644 --- a/src/transformers/tokenization_gpt2.py +++ b/src/transformers/models/gpt2/tokenization_gpt2.py @@ -23,8 +23,8 @@ from typing import Optional, Tuple import regex as re -from .tokenization_utils import AddedToken, PreTrainedTokenizer -from .utils import logging +from ...tokenization_utils import AddedToken, PreTrainedTokenizer +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_gpt2_fast.py b/src/transformers/models/gpt2/tokenization_gpt2_fast.py similarity index 98% rename from src/transformers/tokenization_gpt2_fast.py rename to src/transformers/models/gpt2/tokenization_gpt2_fast.py index 54c49422110..bedfed2c9c0 100644 --- a/src/transformers/tokenization_gpt2_fast.py +++ b/src/transformers/models/gpt2/tokenization_gpt2_fast.py @@ -21,10 +21,10 @@ from typing import Optional, Tuple from tokenizers import pre_tokenizers +from ...tokenization_utils_base import BatchEncoding +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging from .tokenization_gpt2 import GPT2Tokenizer -from .tokenization_utils_base import BatchEncoding -from .tokenization_utils_fast import PreTrainedTokenizerFast -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/herbert/__init__.py b/src/transformers/models/herbert/__init__.py new file mode 100644 index 00000000000..f4da74d76e8 --- /dev/null +++ b/src/transformers/models/herbert/__init__.py @@ -0,0 +1,10 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tokenizers_available +from .tokenization_herbert import HerbertTokenizer + + +if is_tokenizers_available(): + from .tokenization_herbert_fast import HerbertTokenizerFast diff --git a/src/transformers/tokenization_herbert.py b/src/transformers/models/herbert/tokenization_herbert.py similarity index 95% rename from src/transformers/tokenization_herbert.py rename to src/transformers/models/herbert/tokenization_herbert.py index 664b93b512a..79b82ec10ab 100644 --- a/src/transformers/tokenization_herbert.py +++ b/src/transformers/models/herbert/tokenization_herbert.py @@ -13,9 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .tokenization_bert import BasicTokenizer -from .tokenization_xlm import XLMTokenizer -from .utils import logging +from ...utils import logging +from ..bert.tokenization_bert import BasicTokenizer +from ..xlm.tokenization_xlm import XLMTokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_herbert_fast.py b/src/transformers/models/herbert/tokenization_herbert_fast.py similarity index 98% rename from src/transformers/tokenization_herbert_fast.py rename to src/transformers/models/herbert/tokenization_herbert_fast.py index 642f8aa1bac..e98f5ff38ac 100644 --- a/src/transformers/tokenization_herbert_fast.py +++ b/src/transformers/models/herbert/tokenization_herbert_fast.py @@ -15,14 +15,14 @@ from typing import List, Optional, Tuple +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging from .tokenization_herbert import ( PRETRAINED_INIT_CONFIGURATION, PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES, PRETRAINED_VOCAB_FILES_MAP, HerbertTokenizer, ) -from .tokenization_utils_fast import PreTrainedTokenizerFast -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/layoutlm/__init__.py b/src/transformers/models/layoutlm/__init__.py new file mode 100644 index 00000000000..cfddf5c00d7 --- /dev/null +++ b/src/transformers/models/layoutlm/__init__.py @@ -0,0 +1,19 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tokenizers_available, is_torch_available +from .configuration_layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig +from .tokenization_layoutlm import LayoutLMTokenizer + + +if is_tokenizers_available(): + from .tokenization_layoutlm_fast import LayoutLMTokenizerFast + +if is_torch_available(): + from .modeling_layoutlm import ( + LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, + LayoutLMForMaskedLM, + LayoutLMForTokenClassification, + LayoutLMModel, + ) diff --git a/src/transformers/configuration_layoutlm.py b/src/transformers/models/layoutlm/configuration_layoutlm.py similarity index 98% rename from src/transformers/configuration_layoutlm.py rename to src/transformers/models/layoutlm/configuration_layoutlm.py index 1b629ca7d02..ee9a10e8245 100644 --- a/src/transformers/configuration_layoutlm.py +++ b/src/transformers/models/layoutlm/configuration_layoutlm.py @@ -15,8 +15,8 @@ """ LayoutLM model configuration """ -from .configuration_bert import BertConfig -from .utils import logging +from ...utils import logging +from ..bert.configuration_bert import BertConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_layoutlm.py b/src/transformers/models/layoutlm/modeling_layoutlm.py similarity index 96% rename from src/transformers/modeling_layoutlm.py rename to src/transformers/models/layoutlm/modeling_layoutlm.py index 073e25b6c4f..f75eb701008 100644 --- a/src/transformers/modeling_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_layoutlm.py @@ -21,22 +21,22 @@ import torch from torch import nn from torch.nn import CrossEntropyLoss -from .activations import ACT2FN -from .configuration_layoutlm import LayoutLMConfig -from .file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward -from .modeling_outputs import ( +from ...activations import ACT2FN +from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward +from ...modeling_outputs import ( BaseModelOutputWithCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, TokenClassifierOutput, ) -from .modeling_utils import ( +from ...modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) -from .utils import logging +from ...utils import logging +from .configuration_layoutlm import LayoutLMConfig logger = logging.get_logger(__name__) @@ -127,7 +127,7 @@ class LayoutLMEmbeddings(nn.Module): return embeddings -# Copied from transformers.modeling_bert.BertSelfAttention with Bert->LayoutLM +# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->LayoutLM class LayoutLMSelfAttention(nn.Module): def __init__(self, config): super().__init__() @@ -206,7 +206,7 @@ class LayoutLMSelfAttention(nn.Module): return outputs -# Copied from transformers.modeling_bert.BertSelfOutput with Bert->LayoutLM +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->LayoutLM class LayoutLMSelfOutput(nn.Module): def __init__(self, config): super().__init__() @@ -221,7 +221,7 @@ class LayoutLMSelfOutput(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertAttention with Bert->LayoutLM +# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->LayoutLM class LayoutLMAttention(nn.Module): def __init__(self, config): super().__init__() @@ -269,7 +269,7 @@ class LayoutLMAttention(nn.Module): return outputs -# Copied from transformers.modeling_bert.BertIntermediate +# Copied from transformers.models.bert.modeling_bert.BertIntermediate class LayoutLMIntermediate(nn.Module): def __init__(self, config): super().__init__() @@ -285,7 +285,7 @@ class LayoutLMIntermediate(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertOutput with Bert->LayoutLM +# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->LayoutLM class LayoutLMOutput(nn.Module): def __init__(self, config): super().__init__() @@ -300,7 +300,7 @@ class LayoutLMOutput(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertLayer with Bert->LayoutLM +# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->LayoutLM class LayoutLMLayer(nn.Module): def __init__(self, config): super().__init__() @@ -360,7 +360,7 @@ class LayoutLMLayer(nn.Module): return layer_output -# Copied from transformers.modeling_bert.BertEncoder with Bert->LayoutLM +# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->LayoutLM class LayoutLMEncoder(nn.Module): def __init__(self, config): super().__init__() @@ -435,7 +435,7 @@ class LayoutLMEncoder(nn.Module): ) -# Copied from transformers.modeling_bert.BertPooler +# Copied from transformers.models.bert.modeling_bert.BertPooler class LayoutLMPooler(nn.Module): def __init__(self, config): super().__init__() @@ -451,7 +451,7 @@ class LayoutLMPooler(nn.Module): return pooled_output -# Copied from transformers.modeling_bert.BertPredictionHeadTransform with Bert->LayoutLM +# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->LayoutLM class LayoutLMPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() @@ -469,7 +469,7 @@ class LayoutLMPredictionHeadTransform(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertLMPredictionHead with Bert->LayoutLM +# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->LayoutLM class LayoutLMLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() @@ -490,7 +490,7 @@ class LayoutLMLMPredictionHead(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertOnlyMLMHead with Bert->LayoutLM +# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->LayoutLM class LayoutLMOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() diff --git a/src/transformers/tokenization_layoutlm.py b/src/transformers/models/layoutlm/tokenization_layoutlm.py similarity index 96% rename from src/transformers/tokenization_layoutlm.py rename to src/transformers/models/layoutlm/tokenization_layoutlm.py index 61ae88e5dc6..1d5e2eeaa49 100644 --- a/src/transformers/tokenization_layoutlm.py +++ b/src/transformers/models/layoutlm/tokenization_layoutlm.py @@ -15,8 +15,8 @@ """ Tokenization class for model LayoutLM.""" -from .tokenization_bert import BertTokenizer -from .utils import logging +from ...utils import logging +from ..bert.tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_layoutlm_fast.py b/src/transformers/models/layoutlm/tokenization_layoutlm_fast.py similarity index 96% rename from src/transformers/tokenization_layoutlm_fast.py rename to src/transformers/models/layoutlm/tokenization_layoutlm_fast.py index 4d9598cae21..00027ce11ed 100644 --- a/src/transformers/tokenization_layoutlm_fast.py +++ b/src/transformers/models/layoutlm/tokenization_layoutlm_fast.py @@ -15,9 +15,9 @@ """ Tokenization class for model LayoutLM.""" -from .tokenization_bert_fast import BertTokenizerFast +from ...utils import logging +from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_layoutlm import LayoutLMTokenizer -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/longformer/__init__.py b/src/transformers/models/longformer/__init__.py new file mode 100644 index 00000000000..5a95483dc8f --- /dev/null +++ b/src/transformers/models/longformer/__init__.py @@ -0,0 +1,32 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig +from .tokenization_longformer import LongformerTokenizer + + +if is_tokenizers_available(): + from .tokenization_longformer_fast import LongformerTokenizerFast + +if is_torch_available(): + from .modeling_longformer import ( + LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + LongformerForMaskedLM, + LongformerForMultipleChoice, + LongformerForQuestionAnswering, + LongformerForSequenceClassification, + LongformerForTokenClassification, + LongformerModel, + LongformerSelfAttention, + ) + +if is_tf_available(): + from .modeling_tf_longformer import ( + TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + TFLongformerForMaskedLM, + TFLongformerForQuestionAnswering, + TFLongformerModel, + TFLongformerSelfAttention, + ) diff --git a/src/transformers/configuration_longformer.py b/src/transformers/models/longformer/configuration_longformer.py similarity index 97% rename from src/transformers/configuration_longformer.py rename to src/transformers/models/longformer/configuration_longformer.py index 55178b5fdf1..3efd5781d24 100644 --- a/src/transformers/configuration_longformer.py +++ b/src/transformers/models/longformer/configuration_longformer.py @@ -16,8 +16,8 @@ from typing import List, Union -from .configuration_roberta import RobertaConfig -from .utils import logging +from ...utils import logging +from ..roberta.configuration_roberta import RobertaConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_longformer_original_pytorch_lightning_to_pytorch.py b/src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py similarity index 96% rename from src/transformers/convert_longformer_original_pytorch_lightning_to_pytorch.py rename to src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py index 48337e9bebd..6c310a5fafd 100644 --- a/src/transformers/convert_longformer_original_pytorch_lightning_to_pytorch.py +++ b/src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py @@ -20,7 +20,7 @@ import argparse import pytorch_lightning as pl import torch -from transformers.modeling_longformer import LongformerForQuestionAnswering, LongformerModel +from transformers import LongformerForQuestionAnswering, LongformerModel class LightningModel(pl.LightningModule): diff --git a/src/transformers/modeling_longformer.py b/src/transformers/models/longformer/modeling_longformer.py similarity index 99% rename from src/transformers/modeling_longformer.py rename to src/transformers/models/longformer/modeling_longformer.py index 665de8b543f..f9972a771e9 100755 --- a/src/transformers/modeling_longformer.py +++ b/src/transformers/models/longformer/modeling_longformer.py @@ -24,23 +24,23 @@ import torch.nn as nn from torch.nn import CrossEntropyLoss, MSELoss from torch.nn import functional as F -from .activations import ACT2FN, gelu -from .configuration_longformer import LongformerConfig -from .file_utils import ( +from ...activations import ACT2FN, gelu +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_outputs import MaskedLMOutput, SequenceClassifierOutput, TokenClassifierOutput -from .modeling_utils import ( +from ...modeling_outputs import MaskedLMOutput, SequenceClassifierOutput, TokenClassifierOutput +from ...modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) -from .utils import logging +from ...utils import logging +from .configuration_longformer import LongformerConfig logger = logging.get_logger(__name__) @@ -285,7 +285,7 @@ def _compute_global_attention_mask(input_ids, sep_token_id, before_sep_token=Tru return attention_mask -# Copied from transformers.modeling_roberta.create_position_ids_from_input_ids +# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols @@ -307,7 +307,7 @@ class LongformerEmbeddings(nn.Module): Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ - # Copied from transformers.modeling_bert.BertEmbeddings.__init__ + # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) @@ -336,7 +336,7 @@ class LongformerEmbeddings(nn.Module): else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) - # Copied from transformers.modeling_bert.BertEmbeddings.forward + # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward if input_ids is not None: input_shape = input_ids.size() else: @@ -918,7 +918,7 @@ class LongformerSelfAttention(nn.Module): return global_attn_output, global_attn_probs -# Copied from transformers.modeling_bert.BertSelfOutput +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput class LongformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() @@ -973,7 +973,7 @@ class LongformerAttention(nn.Module): return outputs -# Copied from transformers.modeling_bert.BertIntermediate +# Copied from transformers.models.bert.modeling_bert.BertIntermediate class LongformerIntermediate(nn.Module): def __init__(self, config): super().__init__() @@ -989,7 +989,7 @@ class LongformerIntermediate(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertOutput +# Copied from transformers.models.bert.modeling_bert.BertOutput class LongformerOutput(nn.Module): def __init__(self, config): super().__init__() @@ -1114,7 +1114,7 @@ class LongformerEncoder(nn.Module): ) -# Copied from transformers.modeling_bert.BertPooler +# Copied from transformers.models.bert.modeling_bert.BertPooler class LongformerPooler(nn.Module): def __init__(self, config): super().__init__() @@ -1130,7 +1130,7 @@ class LongformerPooler(nn.Module): return pooled_output -# Copied from transformers.modeling_roberta.RobertaLMHead with Roberta->Longformer +# Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->Longformer class LongformerLMHead(nn.Module): """Longformer Head for masked language modeling.""" diff --git a/src/transformers/modeling_tf_longformer.py b/src/transformers/models/longformer/modeling_tf_longformer.py similarity index 99% rename from src/transformers/modeling_tf_longformer.py rename to src/transformers/models/longformer/modeling_tf_longformer.py index 62a5cbee5b0..4ace90e5aa8 100644 --- a/src/transformers/modeling_tf_longformer.py +++ b/src/transformers/models/longformer/modeling_tf_longformer.py @@ -21,15 +21,14 @@ import tensorflow as tf from transformers.activations_tf import get_tf_activation -from .configuration_longformer import LongformerConfig -from .file_utils import ( +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, ) -from .modeling_tf_outputs import TFMaskedLMOutput, TFQuestionAnsweringModelOutput -from .modeling_tf_utils import ( +from ...modeling_tf_outputs import TFMaskedLMOutput, TFQuestionAnsweringModelOutput +from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, @@ -37,8 +36,9 @@ from .modeling_tf_utils import ( keras_serializable, shape_list, ) -from .tokenization_utils import BatchEncoding -from .utils import logging +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from .configuration_longformer import LongformerConfig logger = logging.get_logger(__name__) @@ -226,7 +226,7 @@ def _compute_global_attention_mask(input_ids_shape, sep_token_indices, before_se return attention_mask -# Copied from transformers.modeling_tf_roberta.TFRobertaLMHead +# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaLMHead class TFLongformerLMHead(tf.keras.layers.Layer): """Roberta Head for masked language modeling.""" @@ -260,7 +260,7 @@ class TFLongformerLMHead(tf.keras.layers.Layer): return x -# Copied from transformers.modeling_tf_roberta.TFRobertaEmbeddings +# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaEmbeddings class TFLongformerEmbeddings(tf.keras.layers.Layer): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. @@ -420,7 +420,7 @@ class TFLongformerEmbeddings(tf.keras.layers.Layer): return tf.reshape(logits, [batch_size, length, self.vocab_size]) -# Copied from transformers.modeling_tf_bert.TFBertIntermediate +# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate class TFLongformerIntermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -441,7 +441,7 @@ class TFLongformerIntermediate(tf.keras.layers.Layer): return hidden_states -# Copied from transformers.modeling_tf_bert.TFBertOutput +# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput class TFLongformerOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -460,7 +460,7 @@ class TFLongformerOutput(tf.keras.layers.Layer): return hidden_states -# Copied from transformers.modeling_tf_bert.TFBertPooler +# Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler class TFLongformerPooler(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -481,7 +481,7 @@ class TFLongformerPooler(tf.keras.layers.Layer): return pooled_output -# Copied from transformers.modeling_tf_bert.TFBertSelfOutput +# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput class TFLongformerSelfOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) diff --git a/src/transformers/tokenization_longformer.py b/src/transformers/models/longformer/tokenization_longformer.py similarity index 95% rename from src/transformers/tokenization_longformer.py rename to src/transformers/models/longformer/tokenization_longformer.py index 365dc856d9d..4aa9da74f54 100644 --- a/src/transformers/tokenization_longformer.py +++ b/src/transformers/models/longformer/tokenization_longformer.py @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .tokenization_roberta import RobertaTokenizer -from .utils import logging +from ...utils import logging +from ..roberta.tokenization_roberta import RobertaTokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_longformer_fast.py b/src/transformers/models/longformer/tokenization_longformer_fast.py similarity index 96% rename from src/transformers/tokenization_longformer_fast.py rename to src/transformers/models/longformer/tokenization_longformer_fast.py index 8cab26e59f2..2dea891246b 100644 --- a/src/transformers/tokenization_longformer_fast.py +++ b/src/transformers/models/longformer/tokenization_longformer_fast.py @@ -13,9 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from ...utils import logging +from ..roberta.tokenization_roberta_fast import RobertaTokenizerFast from .tokenization_longformer import LongformerTokenizer -from .tokenization_roberta_fast import RobertaTokenizerFast -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/lxmert/__init__.py b/src/transformers/models/lxmert/__init__.py new file mode 100644 index 00000000000..def84a1569e --- /dev/null +++ b/src/transformers/models/lxmert/__init__.py @@ -0,0 +1,32 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig +from .tokenization_lxmert import LxmertTokenizer + + +if is_tokenizers_available(): + from .tokenization_lxmert_fast import LxmertTokenizerFast + +if is_torch_available(): + from .modeling_lxmert import ( + LxmertEncoder, + LxmertForPreTraining, + LxmertForQuestionAnswering, + LxmertModel, + LxmertPreTrainedModel, + LxmertVisualFeatureEncoder, + LxmertXLayer, + ) + +if is_tf_available(): + from .modeling_tf_lxmert import ( + TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFLxmertForPreTraining, + TFLxmertMainLayer, + TFLxmertModel, + TFLxmertPreTrainedModel, + TFLxmertVisualFeatureEncoder, + ) diff --git a/src/transformers/configuration_lxmert.py b/src/transformers/models/lxmert/configuration_lxmert.py similarity index 99% rename from src/transformers/configuration_lxmert.py rename to src/transformers/models/lxmert/configuration_lxmert.py index e18d4ed0314..8c3ca17187a 100644 --- a/src/transformers/configuration_lxmert.py +++ b/src/transformers/models/lxmert/configuration_lxmert.py @@ -15,8 +15,8 @@ """ LXMERT model configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_lxmert_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py similarity index 100% rename from src/transformers/convert_lxmert_original_tf_checkpoint_to_pytorch.py rename to src/transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py diff --git a/src/transformers/modeling_lxmert.py b/src/transformers/models/lxmert/modeling_lxmert.py similarity index 99% rename from src/transformers/modeling_lxmert.py rename to src/transformers/models/lxmert/modeling_lxmert.py index ca49cf99347..9af11f51c32 100644 --- a/src/transformers/modeling_lxmert.py +++ b/src/transformers/models/lxmert/modeling_lxmert.py @@ -25,17 +25,17 @@ import torch from torch import nn from torch.nn import CrossEntropyLoss, SmoothL1Loss -from .activations import ACT2FN, gelu -from .configuration_lxmert import LxmertConfig -from .file_utils import ( +from ...activations import ACT2FN, gelu +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_utils import PreTrainedModel -from .utils import logging +from ...modeling_utils import PreTrainedModel +from ...utils import logging +from .configuration_lxmert import LxmertConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_lxmert.py b/src/transformers/models/lxmert/modeling_tf_lxmert.py similarity index 99% rename from src/transformers/modeling_tf_lxmert.py rename to src/transformers/models/lxmert/modeling_tf_lxmert.py index 09c6ca0fdaa..f67a421391d 100644 --- a/src/transformers/modeling_tf_lxmert.py +++ b/src/transformers/models/lxmert/modeling_tf_lxmert.py @@ -22,18 +22,18 @@ from typing import Dict, Optional, Tuple import tensorflow as tf -from .activations_tf import get_tf_activation -from .configuration_lxmert import LxmertConfig -from .file_utils import ( +from ...activations_tf import get_tf_activation +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_tf_utils import TFPreTrainedModel, get_initializer, keras_serializable, shape_list -from .tokenization_utils_base import BatchEncoding -from .utils import logging +from ...modeling_tf_utils import TFPreTrainedModel, get_initializer, keras_serializable, shape_list +from ...tokenization_utils_base import BatchEncoding +from ...utils import logging +from .configuration_lxmert import LxmertConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_lxmert.py b/src/transformers/models/lxmert/tokenization_lxmert.py similarity index 98% rename from src/transformers/tokenization_lxmert.py rename to src/transformers/models/lxmert/tokenization_lxmert.py index 272b2fa3480..fe12a95a34b 100644 --- a/src/transformers/tokenization_lxmert.py +++ b/src/transformers/models/lxmert/tokenization_lxmert.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .tokenization_bert import BertTokenizer +from ..bert.tokenization_bert import BertTokenizer #################################################### diff --git a/src/transformers/tokenization_lxmert_fast.py b/src/transformers/models/lxmert/tokenization_lxmert_fast.py similarity index 97% rename from src/transformers/tokenization_lxmert_fast.py rename to src/transformers/models/lxmert/tokenization_lxmert_fast.py index 740a22f6b93..bace6236b18 100644 --- a/src/transformers/tokenization_lxmert_fast.py +++ b/src/transformers/models/lxmert/tokenization_lxmert_fast.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .tokenization_bert_fast import BertTokenizerFast +from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_lxmert import LxmertTokenizer diff --git a/src/transformers/models/marian/__init__.py b/src/transformers/models/marian/__init__.py new file mode 100644 index 00000000000..ef5ac8ae04f --- /dev/null +++ b/src/transformers/models/marian/__init__.py @@ -0,0 +1,16 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_sentencepiece_available, is_tf_available, is_torch_available +from .configuration_marian import MarianConfig + + +if is_sentencepiece_available(): + from .tokenization_marian import MarianTokenizer + +if is_torch_available(): + from .modeling_marian import MarianMTModel + +if is_tf_available(): + from .modeling_tf_marian import TFMarianMTModel diff --git a/src/transformers/configuration_marian.py b/src/transformers/models/marian/configuration_marian.py similarity index 99% rename from src/transformers/configuration_marian.py rename to src/transformers/models/marian/configuration_marian.py index d389a0cd227..d5769bcb9cc 100644 --- a/src/transformers/configuration_marian.py +++ b/src/transformers/models/marian/configuration_marian.py @@ -14,7 +14,7 @@ # limitations under the License. """ Marian model configuration """ -from .configuration_bart import BartConfig +from ..bart.configuration_bart import BartConfig PRETRAINED_CONFIG_ARCHIVE_MAP = { diff --git a/src/transformers/convert_marian_tatoeba_to_pytorch.py b/src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py similarity index 99% rename from src/transformers/convert_marian_tatoeba_to_pytorch.py rename to src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py index 6d7333aae7d..7b3fca59972 100644 --- a/src/transformers/convert_marian_tatoeba_to_pytorch.py +++ b/src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py @@ -3,7 +3,7 @@ import os from pathlib import Path from typing import List, Tuple -from transformers.convert_marian_to_pytorch import ( +from transformers.models.marian.convert_marian_to_pytorch import ( FRONT_MATTER_TEMPLATE, _parse_readme, convert_all_sentencepiece_models, diff --git a/src/transformers/convert_marian_to_pytorch.py b/src/transformers/models/marian/convert_marian_to_pytorch.py similarity index 100% rename from src/transformers/convert_marian_to_pytorch.py rename to src/transformers/models/marian/convert_marian_to_pytorch.py diff --git a/src/transformers/modeling_marian.py b/src/transformers/models/marian/modeling_marian.py similarity index 97% rename from src/transformers/modeling_marian.py rename to src/transformers/models/marian/modeling_marian.py index 1f76ae45973..b02e4599fdc 100644 --- a/src/transformers/modeling_marian.py +++ b/src/transformers/models/marian/modeling_marian.py @@ -15,8 +15,8 @@ """PyTorch MarianMTModel model, ported from the Marian C++ repo.""" +from ..bart.modeling_bart import BartForConditionalGeneration from .configuration_marian import MarianConfig -from .modeling_bart import BartForConditionalGeneration # See all Marian models at https://huggingface.co/models?search=Helsinki-NLP diff --git a/src/transformers/modeling_tf_marian.py b/src/transformers/models/marian/modeling_tf_marian.py similarity index 90% rename from src/transformers/modeling_tf_marian.py rename to src/transformers/models/marian/modeling_tf_marian.py index 9dcd5489660..e385e5f6e5e 100644 --- a/src/transformers/modeling_tf_marian.py +++ b/src/transformers/models/marian/modeling_tf_marian.py @@ -14,10 +14,10 @@ # limitations under the License. """TF Marian model, ported from the fairseq repo.""" +from ...file_utils import add_start_docstrings, is_tf_available +from ...utils import logging +from ..bart.modeling_tf_bart import BART_START_DOCSTRING, LARGE_NEGATIVE, TFBartForConditionalGeneration from .configuration_marian import MarianConfig -from .file_utils import add_start_docstrings, is_tf_available -from .modeling_tf_bart import BART_START_DOCSTRING, LARGE_NEGATIVE, TFBartForConditionalGeneration -from .utils import logging if is_tf_available(): diff --git a/src/transformers/tokenization_marian.py b/src/transformers/models/marian/tokenization_marian.py similarity index 98% rename from src/transformers/tokenization_marian.py rename to src/transformers/models/marian/tokenization_marian.py index cd3ba593360..8af0c40f4f7 100644 --- a/src/transformers/tokenization_marian.py +++ b/src/transformers/models/marian/tokenization_marian.py @@ -7,9 +7,9 @@ from typing import Dict, List, Optional, Tuple, Union import sentencepiece -from .file_utils import add_start_docstrings -from .tokenization_utils import BatchEncoding, PreTrainedTokenizer -from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING +from ...file_utils import add_start_docstrings +from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer +from ...tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING vocab_files_names = { diff --git a/src/transformers/models/mbart/__init__.py b/src/transformers/models/mbart/__init__.py new file mode 100644 index 00000000000..31112a80534 --- /dev/null +++ b/src/transformers/models/mbart/__init__.py @@ -0,0 +1,19 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_mbart import MBartConfig + + +if is_sentencepiece_available(): + from .tokenization_mbart import MBartTokenizer + +if is_tokenizers_available(): + from .tokenization_mbart_fast import MBartTokenizerFast + +if is_torch_available(): + from .modeling_mbart import MBartForConditionalGeneration + +if is_tf_available(): + from .modeling_tf_mbart import TFMBartForConditionalGeneration diff --git a/src/transformers/configuration_mbart.py b/src/transformers/models/mbart/configuration_mbart.py similarity index 98% rename from src/transformers/configuration_mbart.py rename to src/transformers/models/mbart/configuration_mbart.py index ecea31633af..74366602783 100644 --- a/src/transformers/configuration_mbart.py +++ b/src/transformers/models/mbart/configuration_mbart.py @@ -14,8 +14,8 @@ # limitations under the License. """ MBART configuration """ -from .configuration_bart import BartConfig -from .utils import logging +from ...utils import logging +from ..bart.configuration_bart import BartConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_mbart_original_checkpoint_to_pytorch.py b/src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py similarity index 93% rename from src/transformers/convert_mbart_original_checkpoint_to_pytorch.py rename to src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py index e61395d0d4a..f42083d1e1b 100644 --- a/src/transformers/convert_mbart_original_checkpoint_to_pytorch.py +++ b/src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py @@ -4,7 +4,7 @@ import torch from transformers import BartForConditionalGeneration, MBartConfig -from .convert_bart_original_pytorch_checkpoint_to_pytorch import remove_ignore_keys_ +from ..bart.convert_bart_original_pytorch_checkpoint_to_pytorch import remove_ignore_keys_ def convert_fairseq_mbart_checkpoint_from_disk(checkpoint_path, hf_config_path="facebook/mbart-large-en-ro"): diff --git a/src/transformers/modeling_mbart.py b/src/transformers/models/mbart/modeling_mbart.py similarity index 96% rename from src/transformers/modeling_mbart.py rename to src/transformers/models/mbart/modeling_mbart.py index 2df91c6e606..7f929291253 100644 --- a/src/transformers/modeling_mbart.py +++ b/src/transformers/models/mbart/modeling_mbart.py @@ -1,5 +1,5 @@ +from ..bart.modeling_bart import BartForConditionalGeneration from .configuration_mbart import MBartConfig -from .modeling_bart import BartForConditionalGeneration _CONFIG_FOR_DOC = "MBartConfig" diff --git a/src/transformers/modeling_tf_mbart.py b/src/transformers/models/mbart/modeling_tf_mbart.py similarity index 83% rename from src/transformers/modeling_tf_mbart.py rename to src/transformers/models/mbart/modeling_tf_mbart.py index 804324a3163..23b30fd4b36 100644 --- a/src/transformers/modeling_tf_mbart.py +++ b/src/transformers/models/mbart/modeling_tf_mbart.py @@ -13,10 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. """TF mBART model, originally from fairseq.""" +from ...file_utils import add_start_docstrings +from ...utils import logging +from ..bart.modeling_tf_bart import BART_START_DOCSTRING, TFBartForConditionalGeneration from .configuration_mbart import MBartConfig -from .file_utils import add_start_docstrings -from .modeling_tf_bart import BART_START_DOCSTRING, TFBartForConditionalGeneration -from .utils import logging _CONFIG_FOR_DOC = "MBartConfig" @@ -33,4 +33,4 @@ logger = logging.get_logger(__name__) @add_start_docstrings("mBART (multilingual BART) model for machine translation", START_DOCSTRING) class TFMBartForConditionalGeneration(TFBartForConditionalGeneration): config_class = MBartConfig - # All the code is in src/transformers/modeling_tf_bart.py + # All the code is in src/transformers/models/bart/modeling_tf_bart.py diff --git a/src/transformers/tokenization_mbart.py b/src/transformers/models/mbart/tokenization_mbart.py similarity index 97% rename from src/transformers/tokenization_mbart.py rename to src/transformers/models/mbart/tokenization_mbart.py index 44cf760be21..bb5f604d6b4 100644 --- a/src/transformers/tokenization_mbart.py +++ b/src/transformers/models/mbart/tokenization_mbart.py @@ -15,11 +15,11 @@ from typing import List, Optional -from .file_utils import add_start_docstrings -from .tokenization_utils import BatchEncoding -from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING -from .tokenization_xlm_roberta import XLMRobertaTokenizer -from .utils import logging +from ...file_utils import add_start_docstrings +from ...tokenization_utils import BatchEncoding +from ...tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING +from ...utils import logging +from ..xlm_roberta.tokenization_xlm_roberta import XLMRobertaTokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_mbart_fast.py b/src/transformers/models/mbart/tokenization_mbart_fast.py similarity index 97% rename from src/transformers/tokenization_mbart_fast.py rename to src/transformers/models/mbart/tokenization_mbart_fast.py index 3cd7b0ae391..27243c55afa 100644 --- a/src/transformers/tokenization_mbart_fast.py +++ b/src/transformers/models/mbart/tokenization_mbart_fast.py @@ -17,11 +17,11 @@ from typing import List, Optional from tokenizers import processors -from .file_utils import add_start_docstrings, is_sentencepiece_available -from .tokenization_utils import BatchEncoding -from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING -from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast -from .utils import logging +from ...file_utils import add_start_docstrings, is_sentencepiece_available +from ...tokenization_utils import BatchEncoding +from ...tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING +from ...utils import logging +from ..xlm_roberta.tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast if is_sentencepiece_available(): diff --git a/src/transformers/models/mmbt/__init__.py b/src/transformers/models/mmbt/__init__.py new file mode 100644 index 00000000000..4f209f56f8a --- /dev/null +++ b/src/transformers/models/mmbt/__init__.py @@ -0,0 +1,10 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_torch_available +from .configuration_mmbt import MMBTConfig + + +if is_torch_available(): + from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings diff --git a/src/transformers/configuration_mmbt.py b/src/transformers/models/mmbt/configuration_mmbt.py similarity index 98% rename from src/transformers/configuration_mmbt.py rename to src/transformers/models/mmbt/configuration_mmbt.py index cae65ab5c5e..bbb6c9d240e 100644 --- a/src/transformers/configuration_mmbt.py +++ b/src/transformers/models/mmbt/configuration_mmbt.py @@ -15,7 +15,7 @@ # limitations under the License. """ MMBT configuration """ -from .utils import logging +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_mmbt.py b/src/transformers/models/mmbt/modeling_mmbt.py similarity index 98% rename from src/transformers/modeling_mmbt.py rename to src/transformers/models/mmbt/modeling_mmbt.py index 53d9e7535fd..8588cb815f5 100644 --- a/src/transformers/modeling_mmbt.py +++ b/src/transformers/models/mmbt/modeling_mmbt.py @@ -20,10 +20,10 @@ import torch import torch.nn as nn from torch.nn import CrossEntropyLoss, MSELoss -from .file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings -from .modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput -from .modeling_utils import ModuleUtilsMixin -from .utils import logging +from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings +from ...modeling_outputs import BaseModelOutputWithPooling, SequenceClassifierOutput +from ...modeling_utils import ModuleUtilsMixin +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/mobilebert/__init__.py b/src/transformers/models/mobilebert/__init__.py new file mode 100644 index 00000000000..b08be09ef60 --- /dev/null +++ b/src/transformers/models/mobilebert/__init__.py @@ -0,0 +1,42 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_mobilebert import MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig +from .tokenization_mobilebert import MobileBertTokenizer + + +if is_tokenizers_available(): + from .tokenization_mobilebert_fast import MobileBertTokenizerFast + +if is_torch_available(): + from .modeling_mobilebert import ( + MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + MobileBertForMaskedLM, + MobileBertForMultipleChoice, + MobileBertForNextSentencePrediction, + MobileBertForPreTraining, + MobileBertForQuestionAnswering, + MobileBertForSequenceClassification, + MobileBertForTokenClassification, + MobileBertLayer, + MobileBertModel, + MobileBertPreTrainedModel, + load_tf_weights_in_mobilebert, + ) + +if is_tf_available(): + from .modeling_tf_mobilebert import ( + TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFMobileBertForMaskedLM, + TFMobileBertForMultipleChoice, + TFMobileBertForNextSentencePrediction, + TFMobileBertForPreTraining, + TFMobileBertForQuestionAnswering, + TFMobileBertForSequenceClassification, + TFMobileBertForTokenClassification, + TFMobileBertMainLayer, + TFMobileBertModel, + TFMobileBertPreTrainedModel, + ) diff --git a/src/transformers/configuration_mobilebert.py b/src/transformers/models/mobilebert/configuration_mobilebert.py similarity index 98% rename from src/transformers/configuration_mobilebert.py rename to src/transformers/models/mobilebert/configuration_mobilebert.py index 2342fbfcc43..e293a86847e 100644 --- a/src/transformers/configuration_mobilebert.py +++ b/src/transformers/models/mobilebert/configuration_mobilebert.py @@ -12,8 +12,8 @@ # limitations under the License. """ MobileBERT model configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_mobilebert_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py similarity index 100% rename from src/transformers/convert_mobilebert_original_tf_checkpoint_to_pytorch.py rename to src/transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py diff --git a/src/transformers/modeling_mobilebert.py b/src/transformers/models/mobilebert/modeling_mobilebert.py similarity index 99% rename from src/transformers/modeling_mobilebert.py rename to src/transformers/models/mobilebert/modeling_mobilebert.py index bd146e697fd..131b085d558 100644 --- a/src/transformers/modeling_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_mobilebert.py @@ -31,16 +31,15 @@ import torch.nn.functional as F from torch import nn from torch.nn import CrossEntropyLoss, MSELoss -from .activations import ACT2FN -from .configuration_mobilebert import MobileBertConfig -from .file_utils import ( +from ...activations import ACT2FN +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_outputs import ( +from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, @@ -50,8 +49,9 @@ from .modeling_outputs import ( SequenceClassifierOutput, TokenClassifierOutput, ) -from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer -from .utils import logging +from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import logging +from .configuration_mobilebert import MobileBertConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_mobilebert.py b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py similarity index 99% rename from src/transformers/modeling_tf_mobilebert.py rename to src/transformers/models/mobilebert/modeling_tf_mobilebert.py index cf3bbde94f7..a776230f276 100644 --- a/src/transformers/modeling_tf_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py @@ -21,9 +21,8 @@ from typing import Optional, Tuple import tensorflow as tf -from . import MobileBertConfig -from .activations_tf import get_tf_activation -from .file_utils import ( +from ...activations_tf import get_tf_activation +from ...file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, @@ -31,7 +30,7 @@ from .file_utils import ( add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_tf_outputs import ( +from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPooling, TFMaskedLMOutput, @@ -41,7 +40,7 @@ from .modeling_tf_outputs import ( TFSequenceClassifierOutput, TFTokenClassifierOutput, ) -from .modeling_tf_utils import ( +from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFMultipleChoiceLoss, TFNextSentencePredictionLoss, @@ -53,8 +52,9 @@ from .modeling_tf_utils import ( keras_serializable, shape_list, ) -from .tokenization_utils import BatchEncoding -from .utils import logging +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from .configuration_mobilebert import MobileBertConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_mobilebert.py b/src/transformers/models/mobilebert/tokenization_mobilebert.py similarity index 95% rename from src/transformers/tokenization_mobilebert.py rename to src/transformers/models/mobilebert/tokenization_mobilebert.py index f6ab35a9af6..0b9d4f690b1 100644 --- a/src/transformers/tokenization_mobilebert.py +++ b/src/transformers/models/mobilebert/tokenization_mobilebert.py @@ -13,8 +13,8 @@ # limitations under the License. """Tokenization classes for MobileBERT.""" -from .tokenization_bert import BertTokenizer -from .utils import logging +from ...utils import logging +from ..bert.tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_mobilebert_fast.py b/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py similarity index 95% rename from src/transformers/tokenization_mobilebert_fast.py rename to src/transformers/models/mobilebert/tokenization_mobilebert_fast.py index bdbf2ebc98a..d0f1380c168 100644 --- a/src/transformers/tokenization_mobilebert_fast.py +++ b/src/transformers/models/mobilebert/tokenization_mobilebert_fast.py @@ -13,9 +13,9 @@ # limitations under the License. """Tokenization classes for MobileBERT.""" -from .tokenization_bert_fast import BertTokenizerFast +from ...utils import logging +from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_mobilebert import MobileBertTokenizer -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/openai/__init__.py b/src/transformers/models/openai/__init__.py new file mode 100644 index 00000000000..0cb9f49185c --- /dev/null +++ b/src/transformers/models/openai/__init__.py @@ -0,0 +1,32 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig +from .tokenization_openai import OpenAIGPTTokenizer + + +if is_tokenizers_available(): + from .tokenization_openai_fast import OpenAIGPTTokenizerFast + +if is_torch_available(): + from .modeling_openai import ( + OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, + OpenAIGPTDoubleHeadsModel, + OpenAIGPTForSequenceClassification, + OpenAIGPTLMHeadModel, + OpenAIGPTModel, + OpenAIGPTPreTrainedModel, + load_tf_weights_in_openai_gpt, + ) + +if is_tf_available(): + from .modeling_tf_openai import ( + TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFOpenAIGPTDoubleHeadsModel, + TFOpenAIGPTLMHeadModel, + TFOpenAIGPTMainLayer, + TFOpenAIGPTModel, + TFOpenAIGPTPreTrainedModel, + ) diff --git a/src/transformers/configuration_openai.py b/src/transformers/models/openai/configuration_openai.py similarity index 99% rename from src/transformers/configuration_openai.py rename to src/transformers/models/openai/configuration_openai.py index 632656c99b2..5583c540232 100644 --- a/src/transformers/configuration_openai.py +++ b/src/transformers/models/openai/configuration_openai.py @@ -15,8 +15,8 @@ # limitations under the License. """ OpenAI GPT configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_openai_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py similarity index 100% rename from src/transformers/convert_openai_original_tf_checkpoint_to_pytorch.py rename to src/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py diff --git a/src/transformers/modeling_openai.py b/src/transformers/models/openai/modeling_openai.py similarity index 99% rename from src/transformers/modeling_openai.py rename to src/transformers/models/openai/modeling_openai.py index 25ed2d40bdf..3e30bdd7735 100644 --- a/src/transformers/modeling_openai.py +++ b/src/transformers/models/openai/modeling_openai.py @@ -27,24 +27,24 @@ import torch import torch.nn as nn from torch.nn import CrossEntropyLoss, MSELoss -from .activations import gelu_new, silu -from .configuration_openai import OpenAIGPTConfig -from .file_utils import ( +from ...activations import gelu_new, silu +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput -from .modeling_utils import ( +from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput +from ...modeling_utils import ( Conv1D, PreTrainedModel, SequenceSummary, find_pruneable_heads_and_indices, prune_conv1d_layer, ) -from .utils import logging +from ...utils import logging +from .configuration_openai import OpenAIGPTConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_openai.py b/src/transformers/models/openai/modeling_tf_openai.py similarity index 99% rename from src/transformers/modeling_tf_openai.py rename to src/transformers/models/openai/modeling_tf_openai.py index fb4eba7c54c..65f67c1e770 100644 --- a/src/transformers/modeling_tf_openai.py +++ b/src/transformers/models/openai/modeling_tf_openai.py @@ -21,17 +21,16 @@ from typing import Optional, Tuple import tensorflow as tf -from .activations_tf import get_tf_activation -from .configuration_openai import OpenAIGPTConfig -from .file_utils import ( +from ...activations_tf import get_tf_activation +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput -from .modeling_tf_utils import ( +from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput +from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFConv1D, TFPreTrainedModel, @@ -41,8 +40,9 @@ from .modeling_tf_utils import ( keras_serializable, shape_list, ) -from .tokenization_utils import BatchEncoding -from .utils import logging +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from .configuration_openai import OpenAIGPTConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_openai.py b/src/transformers/models/openai/tokenization_openai.py similarity index 98% rename from src/transformers/tokenization_openai.py rename to src/transformers/models/openai/tokenization_openai.py index 6bf1307855b..d06bd2d3dd6 100644 --- a/src/transformers/tokenization_openai.py +++ b/src/transformers/models/openai/tokenization_openai.py @@ -20,9 +20,9 @@ import os import re from typing import Optional, Tuple -from .tokenization_bert import BasicTokenizer -from .tokenization_utils import PreTrainedTokenizer -from .utils import logging +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging +from ..bert.tokenization_bert import BasicTokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_openai_fast.py b/src/transformers/models/openai/tokenization_openai_fast.py similarity index 96% rename from src/transformers/tokenization_openai_fast.py rename to src/transformers/models/openai/tokenization_openai_fast.py index 7286b31217c..1c6e565e7c5 100644 --- a/src/transformers/tokenization_openai_fast.py +++ b/src/transformers/models/openai/tokenization_openai_fast.py @@ -17,9 +17,9 @@ from typing import Optional, Tuple +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging from .tokenization_openai import OpenAIGPTTokenizer -from .tokenization_utils_fast import PreTrainedTokenizerFast -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/pegasus/__init__.py b/src/transformers/models/pegasus/__init__.py new file mode 100644 index 00000000000..d2ec1286be8 --- /dev/null +++ b/src/transformers/models/pegasus/__init__.py @@ -0,0 +1,19 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_pegasus import PegasusConfig + + +if is_sentencepiece_available(): + from .tokenization_pegasus import PegasusTokenizer + +if is_tokenizers_available(): + from .tokenization_pegasus_fast import PegasusTokenizerFast + +if is_torch_available(): + from .modeling_pegasus import PegasusForConditionalGeneration + +if is_tf_available(): + from .modeling_tf_pegasus import TFPegasusForConditionalGeneration diff --git a/src/transformers/configuration_pegasus.py b/src/transformers/models/pegasus/configuration_pegasus.py similarity index 99% rename from src/transformers/configuration_pegasus.py rename to src/transformers/models/pegasus/configuration_pegasus.py index ed56f0b22c2..f134ea58320 100644 --- a/src/transformers/configuration_pegasus.py +++ b/src/transformers/models/pegasus/configuration_pegasus.py @@ -14,8 +14,8 @@ # limitations under the License. """ PEGASUS model configuration """ -from .configuration_bart import BartConfig -from .utils import logging +from ...utils import logging +from ..bart.configuration_bart import BartConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_pegasus_tf_to_pytorch.py b/src/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py similarity index 98% rename from src/transformers/convert_pegasus_tf_to_pytorch.py rename to src/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py index f764a2d3ee6..9254a0ba941 100644 --- a/src/transformers/convert_pegasus_tf_to_pytorch.py +++ b/src/transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py @@ -23,7 +23,7 @@ import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer -from transformers.configuration_pegasus import DEFAULTS, task_specific_params +from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params PATTERNS = [ diff --git a/src/transformers/modeling_pegasus.py b/src/transformers/models/pegasus/modeling_pegasus.py similarity index 92% rename from src/transformers/modeling_pegasus.py rename to src/transformers/models/pegasus/modeling_pegasus.py index 950a9374179..3d721d5ae88 100644 --- a/src/transformers/modeling_pegasus.py +++ b/src/transformers/models/pegasus/modeling_pegasus.py @@ -15,9 +15,9 @@ """PyTorch Pegasus model, ported from https://github.com/google-research/pegasus""" +from ...file_utils import add_start_docstrings +from ..bart.modeling_bart import BART_START_DOCSTRING, BartForConditionalGeneration from .configuration_pegasus import PegasusConfig -from .file_utils import add_start_docstrings -from .modeling_bart import BART_START_DOCSTRING, BartForConditionalGeneration @add_start_docstrings("The Pegasus Model for summarization ", BART_START_DOCSTRING) @@ -44,7 +44,7 @@ class PegasusForConditionalGeneration(BartForConditionalGeneration): >>> assert summary == "California's largest electricity provider has turned off power to tens of thousands of customers." """ - # All the code is in src/transformers/modeling_bart.py + # All the code is in src/transformers/models/bart/modeling_bart.py config_class = PegasusConfig authorized_missing_keys = [ r"final_logits_bias", diff --git a/src/transformers/modeling_tf_pegasus.py b/src/transformers/models/pegasus/modeling_tf_pegasus.py similarity index 85% rename from src/transformers/modeling_tf_pegasus.py rename to src/transformers/models/pegasus/modeling_tf_pegasus.py index 262c7bdb28c..7f53dba8e00 100644 --- a/src/transformers/modeling_tf_pegasus.py +++ b/src/transformers/models/pegasus/modeling_tf_pegasus.py @@ -13,10 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. """TF Pegasus model, ported from the fairseq repo.""" +from ...file_utils import add_start_docstrings +from ...utils import logging +from ..bart.modeling_tf_bart import BART_START_DOCSTRING, TFBartForConditionalGeneration from .configuration_pegasus import PegasusConfig -from .file_utils import add_start_docstrings -from .modeling_tf_bart import BART_START_DOCSTRING, TFBartForConditionalGeneration -from .utils import logging _CONFIG_FOR_DOC = "PegasusConfig" @@ -38,4 +38,4 @@ class TFPegasusForConditionalGeneration(TFBartForConditionalGeneration): r"model.decoder.embed_positions.weight", ] config_class = PegasusConfig - # All the code is in src/transformers/modeling_tf_bart.py + # All the code is in src/transformers/models/bart/modeling_tf_bart.py diff --git a/src/transformers/tokenization_pegasus.py b/src/transformers/models/pegasus/tokenization_pegasus.py similarity index 97% rename from src/transformers/tokenization_pegasus.py rename to src/transformers/models/pegasus/tokenization_pegasus.py index bf1d6fef39f..170eb37e5f9 100644 --- a/src/transformers/tokenization_pegasus.py +++ b/src/transformers/models/pegasus/tokenization_pegasus.py @@ -14,9 +14,9 @@ # limitations under the License. from typing import Dict, List, Optional -from .file_utils import add_start_docstrings -from .tokenization_reformer import ReformerTokenizer -from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING, BatchEncoding +from ...file_utils import add_start_docstrings +from ...tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING, BatchEncoding +from ..reformer.tokenization_reformer import ReformerTokenizer SPIECE_UNDERLINE = "▁" diff --git a/src/transformers/tokenization_pegasus_fast.py b/src/transformers/models/pegasus/tokenization_pegasus_fast.py similarity index 95% rename from src/transformers/tokenization_pegasus_fast.py rename to src/transformers/models/pegasus/tokenization_pegasus_fast.py index d6e1f01561c..30fb45e0be9 100644 --- a/src/transformers/tokenization_pegasus_fast.py +++ b/src/transformers/models/pegasus/tokenization_pegasus_fast.py @@ -14,9 +14,9 @@ # limitations under the License. from typing import List, Optional -from .file_utils import add_start_docstrings, is_sentencepiece_available -from .tokenization_reformer_fast import ReformerTokenizerFast -from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING, BatchEncoding +from ...file_utils import add_start_docstrings, is_sentencepiece_available +from ...tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING, BatchEncoding +from ..reformer.tokenization_reformer_fast import ReformerTokenizerFast if is_sentencepiece_available(): diff --git a/src/transformers/models/phobert/__init__.py b/src/transformers/models/phobert/__init__.py new file mode 100644 index 00000000000..e709b9000db --- /dev/null +++ b/src/transformers/models/phobert/__init__.py @@ -0,0 +1,5 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from .tokenization_phobert import PhobertTokenizer diff --git a/src/transformers/tokenization_phobert.py b/src/transformers/models/phobert/tokenization_phobert.py similarity index 99% rename from src/transformers/tokenization_phobert.py rename to src/transformers/models/phobert/tokenization_phobert.py index a9a4d3dd7d8..684f2b3f390 100644 --- a/src/transformers/tokenization_phobert.py +++ b/src/transformers/models/phobert/tokenization_phobert.py @@ -21,8 +21,8 @@ import re from shutil import copyfile from typing import List, Optional, Tuple -from .tokenization_utils import PreTrainedTokenizer -from .utils import logging +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/prophetnet/__init__.py b/src/transformers/models/prophetnet/__init__.py new file mode 100644 index 00000000000..67030a5eb08 --- /dev/null +++ b/src/transformers/models/prophetnet/__init__.py @@ -0,0 +1,19 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_torch_available +from .configuration_prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig +from .tokenization_prophetnet import ProphetNetTokenizer + + +if is_torch_available(): + from .modeling_prophetnet import ( + PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, + ProphetNetDecoder, + ProphetNetEncoder, + ProphetNetForCausalLM, + ProphetNetForConditionalGeneration, + ProphetNetModel, + ProphetNetPreTrainedModel, + ) diff --git a/src/transformers/configuration_prophetnet.py b/src/transformers/models/prophetnet/configuration_prophetnet.py similarity index 98% rename from src/transformers/configuration_prophetnet.py rename to src/transformers/models/prophetnet/configuration_prophetnet.py index e62a22b09a3..f652043e660 100644 --- a/src/transformers/configuration_prophetnet.py +++ b/src/transformers/models/prophetnet/configuration_prophetnet.py @@ -15,8 +15,8 @@ """ ProphetNet model configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py similarity index 97% rename from src/transformers/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py rename to src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py index e6f9e78c7a9..cbd8c49956e 100644 --- a/src/transformers/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py @@ -19,9 +19,7 @@ import argparse import torch -from transformers import logging -from transformers.modeling_prophetnet import ProphetNetForConditionalGeneration -from transformers.modeling_xlm_prophetnet import XLMProphetNetForConditionalGeneration +from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively diff --git a/src/transformers/modeling_prophetnet.py b/src/transformers/models/prophetnet/modeling_prophetnet.py similarity index 99% rename from src/transformers/modeling_prophetnet.py rename to src/transformers/models/prophetnet/modeling_prophetnet.py index 227817e45bc..7117a5c858b 100644 --- a/src/transformers/modeling_prophetnet.py +++ b/src/transformers/models/prophetnet/modeling_prophetnet.py @@ -24,17 +24,17 @@ import torch import torch.nn.functional as F from torch import Tensor, nn -from .activations import ACT2FN -from .configuration_prophetnet import ProphetNetConfig -from .file_utils import ( +from ...activations import ACT2FN +from ...file_utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_outputs import BaseModelOutput -from .modeling_utils import PreTrainedModel -from .utils import logging +from ...modeling_outputs import BaseModelOutput +from ...modeling_utils import PreTrainedModel +from ...utils import logging +from .configuration_prophetnet import ProphetNetConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_prophetnet.py b/src/transformers/models/prophetnet/tokenization_prophetnet.py similarity index 97% rename from src/transformers/tokenization_prophetnet.py rename to src/transformers/models/prophetnet/tokenization_prophetnet.py index bea131ead88..5f0c125e3da 100644 --- a/src/transformers/tokenization_prophetnet.py +++ b/src/transformers/models/prophetnet/tokenization_prophetnet.py @@ -17,11 +17,11 @@ import collections import os from typing import List, Optional, Tuple -from .file_utils import add_start_docstrings -from .tokenization_bert import BasicTokenizer, WordpieceTokenizer -from .tokenization_utils import BatchEncoding, PreTrainedTokenizer -from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING -from .utils import logging +from ...file_utils import add_start_docstrings +from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer +from ...tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING +from ...utils import logging +from ..bert.tokenization_bert import BasicTokenizer, WordpieceTokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/models/rag/__init__.py b/src/transformers/models/rag/__init__.py new file mode 100644 index 00000000000..289cd3778b0 --- /dev/null +++ b/src/transformers/models/rag/__init__.py @@ -0,0 +1,12 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_torch_available +from .configuration_rag import RagConfig +from .retrieval_rag import RagRetriever +from .tokenization_rag import RagTokenizer + + +if is_torch_available(): + from .modeling_rag import RagModel, RagSequenceForGeneration, RagTokenForGeneration diff --git a/src/transformers/configuration_rag.py b/src/transformers/models/rag/configuration_rag.py similarity index 97% rename from src/transformers/configuration_rag.py rename to src/transformers/models/rag/configuration_rag.py index eaf353a2132..6c49c81faec 100644 --- a/src/transformers/configuration_rag.py +++ b/src/transformers/models/rag/configuration_rag.py @@ -16,8 +16,8 @@ import copy -from .configuration_utils import PretrainedConfig -from .file_utils import add_start_docstrings +from ...configuration_utils import PretrainedConfig +from ...file_utils import add_start_docstrings RAG_CONFIG_DOC = r""" @@ -53,7 +53,7 @@ RAG_CONFIG_DOC = r""" The path to the serialized faiss index on disk. passages_path: (:obj:`str`, `optional`): A path to text passages compatible with the faiss index. Required if using - :class:`~transformers.retrieval_rag.LegacyIndex` + :class:`~transformers.models.rag.retrieval_rag.LegacyIndex` use_dummy_dataset (:obj:`bool`, `optional`, defaults to ``False``) Whether to load a "dummy" variant of the dataset specified by :obj:`dataset`. label_smoothing (:obj:`float`, `optional`, defaults to 0.0): @@ -127,7 +127,7 @@ class RagConfig(PretrainedConfig): decoder_config = kwargs.pop("generator") decoder_model_type = decoder_config.pop("model_type") - from .configuration_auto import AutoConfig + from ..auto.configuration_auto import AutoConfig self.question_encoder = AutoConfig.for_model(question_encoder_model_type, **question_encoder_config) self.generator = AutoConfig.for_model(decoder_model_type, **decoder_config) diff --git a/src/transformers/modeling_rag.py b/src/transformers/models/rag/modeling_rag.py similarity index 99% rename from src/transformers/modeling_rag.py rename to src/transformers/models/rag/modeling_rag.py index fe134712887..236aa4cfb93 100644 --- a/src/transformers/modeling_rag.py +++ b/src/transformers/models/rag/modeling_rag.py @@ -19,14 +19,14 @@ from typing import List, Optional, Tuple import torch +from ...configuration_utils import PretrainedConfig +from ...file_utils import add_start_docstrings_to_model_forward, replace_return_docstrings +from ...generation_beam_search import BeamSearchScorer +from ...modeling_outputs import ModelOutput +from ...modeling_utils import PreTrainedModel +from ...utils import logging from .configuration_rag import RagConfig -from .configuration_utils import PretrainedConfig -from .file_utils import add_start_docstrings_to_model_forward, replace_return_docstrings -from .generation_beam_search import BeamSearchScorer -from .modeling_outputs import ModelOutput -from .modeling_utils import PreTrainedModel from .retrieval_rag import RagRetriever -from .utils import logging logger = logging.get_logger(__name__) @@ -316,10 +316,10 @@ class RagPreTrainedModel(PreTrainedModel): assert ( question_encoder_pretrained_model_name_or_path is not None ), "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to be defined" - from .modeling_auto import AutoModel + from ..auto.modeling_auto import AutoModel if "config" not in kwargs_question_encoder: - from .configuration_auto import AutoConfig + from ..auto.configuration_auto import AutoConfig question_encoder_config = AutoConfig.from_pretrained(question_encoder_pretrained_model_name_or_path) kwargs_question_encoder["config"] = question_encoder_config @@ -333,10 +333,10 @@ class RagPreTrainedModel(PreTrainedModel): assert ( generator_pretrained_model_name_or_path is not None ), "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has to be defined" - from .modeling_auto import AutoModelForSeq2SeqLM + from ..auto.modeling_auto import AutoModelForSeq2SeqLM if "config" not in kwargs_generator: - from .configuration_auto import AutoConfig + from ..auto.configuration_auto import AutoConfig generator_config = AutoConfig.from_pretrained(generator_pretrained_model_name_or_path) kwargs_generator["config"] = generator_config @@ -484,12 +484,12 @@ class RagModel(RagPreTrainedModel): ) super().__init__(config) if question_encoder is None: - from .modeling_auto import AutoModel + from ..auto.modeling_auto import AutoModel question_encoder = AutoModel.from_config(config.question_encoder) if generator is None: - from .modeling_auto import AutoModelForSeq2SeqLM + from ..auto.modeling_auto import AutoModelForSeq2SeqLM generator = AutoModelForSeq2SeqLM.from_config(config.generator) diff --git a/src/transformers/retrieval_rag.py b/src/transformers/models/rag/retrieval_rag.py similarity index 98% rename from src/transformers/retrieval_rag.py rename to src/transformers/models/rag/retrieval_rag.py index 36c0d371a41..fb47fd20596 100644 --- a/src/transformers/retrieval_rag.py +++ b/src/transformers/models/rag/retrieval_rag.py @@ -21,8 +21,7 @@ from typing import Iterable, List, Optional, Tuple import numpy as np -from .configuration_rag import RagConfig -from .file_utils import ( +from ...file_utils import ( cached_path, is_datasets_available, is_faiss_available, @@ -30,9 +29,10 @@ from .file_utils import ( requires_datasets, requires_faiss, ) +from ...tokenization_utils_base import BatchEncoding +from ...utils import logging +from .configuration_rag import RagConfig from .tokenization_rag import RagTokenizer -from .tokenization_utils_base import BatchEncoding -from .utils import logging if is_datasets_available(): @@ -105,7 +105,7 @@ class LegacyIndex(Index): The dimension of indexed vectors. index_path (:obj:`str`): A path to a `directory` containing index files compatible with - :class:`~transformers.retrieval_rag.LegacyIndex` + :class:`~transformers.models.rag.retrieval_rag.LegacyIndex` """ INDEX_FILENAME = "hf_bert_base.hnswSQ8_correct_phi_128.c_index" @@ -344,7 +344,7 @@ class RagRetriever: generator_tokenizer. generator_tokenizer (:class:`~transformers.PreTrainedTokenizer`): The tokenizer used for the generator part of the RagModel. - index (:class:`~transformers.retrieval_rag.Index`, optional, defaults to the one defined by the configuration): + index (:class:`~transformers.models.rag.retrieval_rag.Index`, optional, defaults to the one defined by the configuration): If specified, use this index instead of the one built using the configuration Examples:: diff --git a/src/transformers/tokenization_rag.py b/src/transformers/models/rag/tokenization_rag.py similarity index 94% rename from src/transformers/tokenization_rag.py rename to src/transformers/models/rag/tokenization_rag.py index ca133b9144a..4acfc018bd1 100644 --- a/src/transformers/tokenization_rag.py +++ b/src/transformers/models/rag/tokenization_rag.py @@ -16,10 +16,10 @@ import os from typing import List, Optional +from ...file_utils import add_start_docstrings +from ...tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING, BatchEncoding +from ...utils import logging from .configuration_rag import RagConfig -from .file_utils import add_start_docstrings -from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING, BatchEncoding -from .utils import logging logger = logging.get_logger(__name__) @@ -42,7 +42,7 @@ class RagTokenizer: @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): # dynamically import AutoTokenizer - from .tokenization_auto import AutoTokenizer + from ..auto.tokenization_auto import AutoTokenizer config = kwargs.pop("config", None) diff --git a/src/transformers/models/reformer/__init__.py b/src/transformers/models/reformer/__init__.py new file mode 100644 index 00000000000..3a823851aba --- /dev/null +++ b/src/transformers/models/reformer/__init__.py @@ -0,0 +1,25 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_sentencepiece_available, is_tokenizers_available, is_torch_available +from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig + + +if is_sentencepiece_available(): + from .tokenization_reformer import ReformerTokenizer + +if is_tokenizers_available(): + from .tokenization_reformer_fast import ReformerTokenizerFast + +if is_torch_available(): + from .modeling_reformer import ( + REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, + ReformerAttention, + ReformerForMaskedLM, + ReformerForQuestionAnswering, + ReformerForSequenceClassification, + ReformerLayer, + ReformerModel, + ReformerModelWithLMHead, + ) diff --git a/src/transformers/configuration_reformer.py b/src/transformers/models/reformer/configuration_reformer.py similarity index 99% rename from src/transformers/configuration_reformer.py rename to src/transformers/models/reformer/configuration_reformer.py index 55367d11888..69d178875ea 100755 --- a/src/transformers/configuration_reformer.py +++ b/src/transformers/models/reformer/configuration_reformer.py @@ -15,8 +15,8 @@ # limitations under the License. """ Reformer model configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_reformer_trax_checkpoint_to_pytorch.py b/src/transformers/models/reformer/convert_reformer_trax_checkpoint_to_pytorch.py similarity index 100% rename from src/transformers/convert_reformer_trax_checkpoint_to_pytorch.py rename to src/transformers/models/reformer/convert_reformer_trax_checkpoint_to_pytorch.py diff --git a/src/transformers/modeling_reformer.py b/src/transformers/models/reformer/modeling_reformer.py similarity index 99% rename from src/transformers/modeling_reformer.py rename to src/transformers/models/reformer/modeling_reformer.py index 434369934f5..f7dd925c76f 100755 --- a/src/transformers/modeling_reformer.py +++ b/src/transformers/models/reformer/modeling_reformer.py @@ -28,9 +28,8 @@ from torch import nn from torch.autograd.function import Function from torch.nn import CrossEntropyLoss, MSELoss -from .activations import ACT2FN -from .configuration_reformer import ReformerConfig -from .file_utils import ( +from ...activations import ACT2FN +from ...file_utils import ( DUMMY_INPUTS, DUMMY_MASK, ModelOutput, @@ -38,9 +37,10 @@ from .file_utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, ) -from .modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput -from .modeling_utils import PreTrainedModel, apply_chunking_to_forward -from .utils import logging +from ...modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput +from ...modeling_utils import PreTrainedModel, apply_chunking_to_forward +from ...utils import logging +from .configuration_reformer import ReformerConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_reformer.py b/src/transformers/models/reformer/tokenization_reformer.py similarity index 98% rename from src/transformers/tokenization_reformer.py rename to src/transformers/models/reformer/tokenization_reformer.py index 13ff15e2263..37efa5f6e76 100644 --- a/src/transformers/tokenization_reformer.py +++ b/src/transformers/models/reformer/tokenization_reformer.py @@ -21,8 +21,8 @@ from typing import Dict, Optional, Tuple import sentencepiece as spm -from .tokenization_utils import PreTrainedTokenizer -from .utils import logging +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_reformer_fast.py b/src/transformers/models/reformer/tokenization_reformer_fast.py similarity index 97% rename from src/transformers/tokenization_reformer_fast.py rename to src/transformers/models/reformer/tokenization_reformer_fast.py index 2442662da72..ff73ea6cd2a 100644 --- a/src/transformers/tokenization_reformer_fast.py +++ b/src/transformers/models/reformer/tokenization_reformer_fast.py @@ -19,9 +19,9 @@ import os from shutil import copyfile from typing import Optional, Tuple -from .file_utils import is_sentencepiece_available -from .tokenization_utils_fast import PreTrainedTokenizerFast -from .utils import logging +from ...file_utils import is_sentencepiece_available +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging if is_sentencepiece_available(): diff --git a/src/transformers/models/retribert/__init__.py b/src/transformers/models/retribert/__init__.py new file mode 100644 index 00000000000..7f781c85bf5 --- /dev/null +++ b/src/transformers/models/retribert/__init__.py @@ -0,0 +1,14 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tokenizers_available, is_torch_available +from .configuration_retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig +from .tokenization_retribert import RetriBertTokenizer + + +if is_tokenizers_available(): + from .tokenization_retribert_fast import RetriBertTokenizerFast + +if is_torch_available(): + from .modeling_retribert import RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RetriBertModel, RetriBertPreTrainedModel diff --git a/src/transformers/configuration_retribert.py b/src/transformers/models/retribert/configuration_retribert.py similarity index 98% rename from src/transformers/configuration_retribert.py rename to src/transformers/models/retribert/configuration_retribert.py index 1011c19c9c8..ffbb2af72fc 100644 --- a/src/transformers/configuration_retribert.py +++ b/src/transformers/models/retribert/configuration_retribert.py @@ -14,8 +14,8 @@ # limitations under the License. """ RetriBERT model configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_retribert.py b/src/transformers/models/retribert/modeling_retribert.py similarity index 98% rename from src/transformers/modeling_retribert.py rename to src/transformers/models/retribert/modeling_retribert.py index 7801f34a8de..2e6c23c241a 100644 --- a/src/transformers/modeling_retribert.py +++ b/src/transformers/models/retribert/modeling_retribert.py @@ -23,11 +23,11 @@ import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint +from ...file_utils import add_start_docstrings +from ...modeling_utils import PreTrainedModel +from ...utils import logging +from ..bert.modeling_bert import BertModel from .configuration_retribert import RetriBertConfig -from .file_utils import add_start_docstrings -from .modeling_bert import BertModel -from .modeling_utils import PreTrainedModel -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_retribert.py b/src/transformers/models/retribert/tokenization_retribert.py similarity index 95% rename from src/transformers/tokenization_retribert.py rename to src/transformers/models/retribert/tokenization_retribert.py index 8627d639bd7..32966a05119 100644 --- a/src/transformers/tokenization_retribert.py +++ b/src/transformers/models/retribert/tokenization_retribert.py @@ -14,8 +14,8 @@ # limitations under the License. """Tokenization classes for RetriBERT.""" -from .tokenization_bert import BertTokenizer -from .utils import logging +from ...utils import logging +from ..bert.tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_retribert_fast.py b/src/transformers/models/retribert/tokenization_retribert_fast.py similarity index 95% rename from src/transformers/tokenization_retribert_fast.py rename to src/transformers/models/retribert/tokenization_retribert_fast.py index c137caa8931..f8ff3ad0c9c 100644 --- a/src/transformers/tokenization_retribert_fast.py +++ b/src/transformers/models/retribert/tokenization_retribert_fast.py @@ -14,9 +14,9 @@ # limitations under the License. """Tokenization classes for RetriBERT.""" -from .tokenization_bert_fast import BertTokenizerFast +from ...utils import logging +from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_retribert import RetriBertTokenizer -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/roberta/__init__.py b/src/transformers/models/roberta/__init__.py new file mode 100644 index 00000000000..fa9f253cd0c --- /dev/null +++ b/src/transformers/models/roberta/__init__.py @@ -0,0 +1,39 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig +from .tokenization_roberta import RobertaTokenizer + + +if is_tokenizers_available(): + from .tokenization_roberta_fast import RobertaTokenizerFast + +if is_torch_available(): + from .modeling_roberta import ( + ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, + RobertaForCausalLM, + RobertaForMaskedLM, + RobertaForMultipleChoice, + RobertaForQuestionAnswering, + RobertaForSequenceClassification, + RobertaForTokenClassification, + RobertaModel, + ) + +if is_tf_available(): + from .modeling_tf_roberta import ( + TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, + TFRobertaForMaskedLM, + TFRobertaForMultipleChoice, + TFRobertaForQuestionAnswering, + TFRobertaForSequenceClassification, + TFRobertaForTokenClassification, + TFRobertaMainLayer, + TFRobertaModel, + TFRobertaPreTrainedModel, + ) + +if is_flax_available(): + from .modeling_flax_roberta import FlaxRobertaModel diff --git a/src/transformers/configuration_roberta.py b/src/transformers/models/roberta/configuration_roberta.py similarity index 97% rename from src/transformers/configuration_roberta.py rename to src/transformers/models/roberta/configuration_roberta.py index 5e214700ed9..14598a305f7 100644 --- a/src/transformers/configuration_roberta.py +++ b/src/transformers/models/roberta/configuration_roberta.py @@ -15,8 +15,8 @@ # limitations under the License. """ RoBERTa configuration """ -from .configuration_bert import BertConfig -from .utils import logging +from ...utils import logging +from ..bert.configuration_bert import BertConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py similarity index 96% rename from src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py rename to src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py index dbb73db2820..67a14e8dd12 100644 --- a/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py @@ -24,8 +24,18 @@ from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version -from transformers.modeling_bert import BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput -from transformers.modeling_roberta import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification +from transformers.models.bertmodeling_bert import ( + BertIntermediate, + BertLayer, + BertOutput, + BertSelfAttention, + BertSelfOutput, +) +from transformers.models.roberta.modeling_roberta import ( + RobertaConfig, + RobertaForMaskedLM, + RobertaForSequenceClassification, +) from transformers.utils import logging diff --git a/src/transformers/modeling_flax_roberta.py b/src/transformers/models/roberta/modeling_flax_roberta.py similarity index 93% rename from src/transformers/modeling_flax_roberta.py rename to src/transformers/models/roberta/modeling_flax_roberta.py index 8ae15bdc516..1e2a76c6b69 100644 --- a/src/transformers/modeling_flax_roberta.py +++ b/src/transformers/models/roberta/modeling_flax_roberta.py @@ -20,10 +20,10 @@ import flax.linen as nn import jax import jax.numpy as jnp +from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward +from ...modeling_flax_utils import FlaxPreTrainedModel, gelu +from ...utils import logging from .configuration_roberta import RobertaConfig -from .file_utils import add_start_docstrings, add_start_docstrings_to_model_forward -from .modeling_flax_utils import FlaxPreTrainedModel, gelu -from .utils import logging logger = logging.get_logger(__name__) @@ -89,7 +89,7 @@ ROBERTA_INPUTS_DOCSTRING = r""" """ -# Copied from transformers.modeling_flax_bert.FlaxBertLayerNorm with Bert->Roberta +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerNorm with Bert->Roberta class FlaxRobertaLayerNorm(nn.Module): """ Layer normalization (https://arxiv.org/abs/1607.06450). Operates on the last axis of the input data. @@ -130,7 +130,7 @@ class FlaxRobertaLayerNorm(nn.Module): return y -# Copied from transformers.modeling_flax_bert.FlaxBertEmbedding with Bert->Roberta +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbedding with Bert->Roberta class FlaxRobertaEmbedding(nn.Module): """ Specify a new class for doing the embedding stuff as Flax's one use 'embedding' for the parameter name and PyTorch @@ -147,7 +147,7 @@ class FlaxRobertaEmbedding(nn.Module): return jnp.take(embedding, inputs, axis=0) -# Copied from transformers.modeling_flax_bert.FlaxBertEmbeddings with Bert->Roberta +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings with Bert->Roberta class FlaxRobertaEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" @@ -179,7 +179,7 @@ class FlaxRobertaEmbeddings(nn.Module): return layer_norm -# Copied from transformers.modeling_flax_bert.FlaxBertAttention with Bert->Roberta +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertAttention with Bert->Roberta class FlaxRobertaAttention(nn.Module): num_heads: int head_size: int @@ -194,7 +194,7 @@ class FlaxRobertaAttention(nn.Module): return layer_norm -# Copied from transformers.modeling_flax_bert.FlaxBertIntermediate with Bert->Roberta +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertIntermediate with Bert->Roberta class FlaxRobertaIntermediate(nn.Module): output_size: int @@ -205,7 +205,7 @@ class FlaxRobertaIntermediate(nn.Module): return gelu(dense) -# Copied from transformers.modeling_flax_bert.FlaxBertOutput with Bert->Roberta +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOutput with Bert->Roberta class FlaxRobertaOutput(nn.Module): @nn.compact def __call__(self, intermediate_output, attention_output): @@ -230,7 +230,7 @@ class FlaxRobertaLayer(nn.Module): return output -# Copied from transformers.modeling_flax_bert.FlaxBertLayerCollection with Bert->Roberta +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerCollection with Bert->Roberta class FlaxRobertaLayerCollection(nn.Module): """ Stores N RobertaLayer(s) @@ -255,7 +255,7 @@ class FlaxRobertaLayerCollection(nn.Module): return input_i -# Copied from transformers.modeling_flax_bert.FlaxBertEncoder with Bert->Roberta +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEncoder with Bert->Roberta class FlaxRobertaEncoder(nn.Module): num_layers: int num_heads: int @@ -270,7 +270,7 @@ class FlaxRobertaEncoder(nn.Module): return layer -# Copied from transformers.modeling_flax_bert.FlaxBertPooler with Bert->Roberta +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPooler with Bert->Roberta class FlaxRobertaPooler(nn.Module): @nn.compact def __call__(self, hidden_state): @@ -279,7 +279,7 @@ class FlaxRobertaPooler(nn.Module): return jax.lax.tanh(out) -# Copied from transformers.modeling_flax_bert.FlaxBertModule with Bert->Roberta +# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertModule with Bert->Roberta class FlaxRobertaModule(nn.Module): vocab_size: int hidden_size: int diff --git a/src/transformers/modeling_roberta.py b/src/transformers/models/roberta/modeling_roberta.py similarity index 97% rename from src/transformers/modeling_roberta.py rename to src/transformers/models/roberta/modeling_roberta.py index dab19ed7d9e..ae9d6dd5b67 100644 --- a/src/transformers/modeling_roberta.py +++ b/src/transformers/models/roberta/modeling_roberta.py @@ -22,15 +22,14 @@ import torch import torch.nn as nn from torch.nn import CrossEntropyLoss, MSELoss -from .activations import ACT2FN, gelu -from .configuration_roberta import RobertaConfig -from .file_utils import ( +from ...activations import ACT2FN, gelu +from ...file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_outputs import ( +from ...modeling_outputs import ( BaseModelOutputWithCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, @@ -40,13 +39,14 @@ from .modeling_outputs import ( SequenceClassifierOutput, TokenClassifierOutput, ) -from .modeling_utils import ( +from ...modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) -from .utils import logging +from ...utils import logging +from .configuration_roberta import RobertaConfig logger = logging.get_logger(__name__) @@ -70,7 +70,7 @@ class RobertaEmbeddings(nn.Module): Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ - # Copied from transformers.modeling_bert.BertEmbeddings.__init__ + # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) @@ -99,7 +99,7 @@ class RobertaEmbeddings(nn.Module): else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) - # Copied from transformers.modeling_bert.BertEmbeddings.forward + # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward if input_ids is not None: input_shape = input_ids.size() else: @@ -141,7 +141,7 @@ class RobertaEmbeddings(nn.Module): return position_ids.unsqueeze(0).expand(input_shape) -# Copied from transformers.modeling_bert.BertSelfAttention with Bert->Roberta +# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta class RobertaSelfAttention(nn.Module): def __init__(self, config): super().__init__() @@ -220,7 +220,7 @@ class RobertaSelfAttention(nn.Module): return outputs -# Copied from transformers.modeling_bert.BertSelfOutput +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput class RobertaSelfOutput(nn.Module): def __init__(self, config): super().__init__() @@ -235,7 +235,7 @@ class RobertaSelfOutput(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertAttention with Bert->Roberta +# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta class RobertaAttention(nn.Module): def __init__(self, config): super().__init__() @@ -283,7 +283,7 @@ class RobertaAttention(nn.Module): return outputs -# Copied from transformers.modeling_bert.BertIntermediate +# Copied from transformers.models.bert.modeling_bert.BertIntermediate class RobertaIntermediate(nn.Module): def __init__(self, config): super().__init__() @@ -299,7 +299,7 @@ class RobertaIntermediate(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertOutput +# Copied from transformers.models.bert.modeling_bert.BertOutput class RobertaOutput(nn.Module): def __init__(self, config): super().__init__() @@ -314,7 +314,7 @@ class RobertaOutput(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertLayer with Bert->Roberta +# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta class RobertaLayer(nn.Module): def __init__(self, config): super().__init__() @@ -374,7 +374,7 @@ class RobertaLayer(nn.Module): return layer_output -# Copied from transformers.modeling_bert.BertEncoder with Bert->Roberta +# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta class RobertaEncoder(nn.Module): def __init__(self, config): super().__init__() @@ -449,7 +449,7 @@ class RobertaEncoder(nn.Module): ) -# Copied from transformers.modeling_bert.BertPooler +# Copied from transformers.models.bert.modeling_bert.BertPooler class RobertaPooler(nn.Module): def __init__(self, config): super().__init__() @@ -474,7 +474,7 @@ class RobertaPreTrainedModel(PreTrainedModel): config_class = RobertaConfig base_model_prefix = "roberta" - # Copied from transformers.modeling_bert.BertPreTrainedModel._init_weights + # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): @@ -579,7 +579,7 @@ class RobertaModel(RobertaPreTrainedModel): authorized_missing_keys = [r"position_ids"] - # Copied from transformers.modeling_bert.BertModel.__init__ with Bert->Roberta + # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config @@ -612,7 +612,7 @@ class RobertaModel(RobertaPreTrainedModel): output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) - # Copied from transformers.modeling_bert.BertModel.forward + # Copied from transformers.models.bert.modeling_bert.BertModel.forward def forward( self, input_ids=None, diff --git a/src/transformers/modeling_tf_roberta.py b/src/transformers/models/roberta/modeling_tf_roberta.py similarity index 97% rename from src/transformers/modeling_tf_roberta.py rename to src/transformers/models/roberta/modeling_tf_roberta.py index d36f935c335..2da67c9bd63 100644 --- a/src/transformers/modeling_tf_roberta.py +++ b/src/transformers/models/roberta/modeling_tf_roberta.py @@ -18,15 +18,14 @@ import tensorflow as tf -from .activations_tf import get_tf_activation -from .configuration_roberta import RobertaConfig -from .file_utils import ( +from ...activations_tf import get_tf_activation +from ...file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, ) -from .modeling_tf_outputs import ( +from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPooling, TFMaskedLMOutput, @@ -35,7 +34,7 @@ from .modeling_tf_outputs import ( TFSequenceClassifierOutput, TFTokenClassifierOutput, ) -from .modeling_tf_utils import ( +from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFMultipleChoiceLoss, TFPreTrainedModel, @@ -46,8 +45,9 @@ from .modeling_tf_utils import ( keras_serializable, shape_list, ) -from .tokenization_utils_base import BatchEncoding -from .utils import logging +from ...tokenization_utils_base import BatchEncoding +from ...utils import logging +from .configuration_roberta import RobertaConfig logger = logging.get_logger(__name__) @@ -223,7 +223,7 @@ class TFRobertaEmbeddings(tf.keras.layers.Layer): return tf.reshape(logits, [batch_size, length, self.vocab_size]) -# Copied from transformers.modeling_tf_bert.TFBertPooler +# Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler class TFRobertaPooler(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -244,7 +244,7 @@ class TFRobertaPooler(tf.keras.layers.Layer): return pooled_output -# Copied from transformers.modeling_tf_bert.TFBertSelfAttention +# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention class TFRobertaSelfAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -316,7 +316,7 @@ class TFRobertaSelfAttention(tf.keras.layers.Layer): return outputs -# Copied from transformers.modeling_tf_bert.TFBertSelfOutput +# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput class TFRobertaSelfOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -335,7 +335,7 @@ class TFRobertaSelfOutput(tf.keras.layers.Layer): return hidden_states -# Copied from transformers.modeling_tf_bert.TFBertAttention with Bert->Roberta +# Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Roberta class TFRobertaAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -356,7 +356,7 @@ class TFRobertaAttention(tf.keras.layers.Layer): return outputs -# Copied from transformers.modeling_tf_bert.TFBertIntermediate +# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate class TFRobertaIntermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -377,7 +377,7 @@ class TFRobertaIntermediate(tf.keras.layers.Layer): return hidden_states -# Copied from transformers.modeling_tf_bert.TFBertOutput +# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput class TFRobertaOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -396,7 +396,7 @@ class TFRobertaOutput(tf.keras.layers.Layer): return hidden_states -# Copied from transformers.modeling_tf_bert.TFBertLayer with Bert->Roberta +# Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Roberta class TFRobertaLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -417,7 +417,7 @@ class TFRobertaLayer(tf.keras.layers.Layer): return outputs -# Copied from transformers.modeling_tf_bert.TFBertEncoder with Bert->Roberta +# Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Roberta class TFRobertaEncoder(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -478,16 +478,16 @@ class TFRobertaMainLayer(tf.keras.layers.Layer): # The embeddings must be the last declaration in order to follow the weights order self.embeddings = TFRobertaEmbeddings(config, name="embeddings") - # Copied from transformers.modeling_tf_bert.TFBertMainLayer.get_input_embeddings + # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings def get_input_embeddings(self): return self.embeddings - # Copied from transformers.modeling_tf_bert.TFBertMainLayer.set_input_embeddings + # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value self.embeddings.vocab_size = value.shape[0] - # Copied from transformers.modeling_tf_bert.TFBertMainLayer._prune_heads + # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base @@ -495,7 +495,7 @@ class TFRobertaMainLayer(tf.keras.layers.Layer): """ raise NotImplementedError - # Copied from transformers.modeling_tf_bert.TFBertMainLayer.call + # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.call def call( self, inputs, diff --git a/src/transformers/tokenization_roberta.py b/src/transformers/models/roberta/tokenization_roberta.py similarity index 98% rename from src/transformers/tokenization_roberta.py rename to src/transformers/models/roberta/tokenization_roberta.py index 9d7731baef0..91475defbe7 100644 --- a/src/transformers/tokenization_roberta.py +++ b/src/transformers/models/roberta/tokenization_roberta.py @@ -17,9 +17,9 @@ import warnings from typing import List, Optional -from .tokenization_gpt2 import GPT2Tokenizer -from .tokenization_utils import AddedToken -from .utils import logging +from ...tokenization_utils import AddedToken +from ...utils import logging +from ..gpt2.tokenization_gpt2 import GPT2Tokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_roberta_fast.py b/src/transformers/models/roberta/tokenization_roberta_fast.py similarity index 98% rename from src/transformers/tokenization_roberta_fast.py rename to src/transformers/models/roberta/tokenization_roberta_fast.py index d8f6fb16a5c..056aba6a466 100644 --- a/src/transformers/tokenization_roberta_fast.py +++ b/src/transformers/models/roberta/tokenization_roberta_fast.py @@ -16,10 +16,10 @@ from typing import List, Optional -from .tokenization_gpt2_fast import GPT2TokenizerFast +from ...tokenization_utils_base import AddedToken +from ...utils import logging +from ..gpt2.tokenization_gpt2_fast import GPT2TokenizerFast from .tokenization_roberta import RobertaTokenizer -from .tokenization_utils_base import AddedToken -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/squeezebert/__init__.py b/src/transformers/models/squeezebert/__init__.py new file mode 100644 index 00000000000..63eb3203e1d --- /dev/null +++ b/src/transformers/models/squeezebert/__init__.py @@ -0,0 +1,24 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tokenizers_available, is_torch_available +from .configuration_squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig +from .tokenization_squeezebert import SqueezeBertTokenizer + + +if is_tokenizers_available(): + from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast + +if is_torch_available(): + from .modeling_squeezebert import ( + SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + SqueezeBertForMaskedLM, + SqueezeBertForMultipleChoice, + SqueezeBertForQuestionAnswering, + SqueezeBertForSequenceClassification, + SqueezeBertForTokenClassification, + SqueezeBertModel, + SqueezeBertModule, + SqueezeBertPreTrainedModel, + ) diff --git a/src/transformers/configuration_squeezebert.py b/src/transformers/models/squeezebert/configuration_squeezebert.py similarity index 98% rename from src/transformers/configuration_squeezebert.py rename to src/transformers/models/squeezebert/configuration_squeezebert.py index 82ce12eb3d6..c3ed53e5dc5 100644 --- a/src/transformers/configuration_squeezebert.py +++ b/src/transformers/models/squeezebert/configuration_squeezebert.py @@ -14,8 +14,8 @@ # limitations under the License. """ SqueezeBERT model configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_squeezebert.py b/src/transformers/models/squeezebert/modeling_squeezebert.py similarity index 99% rename from src/transformers/modeling_squeezebert.py rename to src/transformers/models/squeezebert/modeling_squeezebert.py index 54c5cb7b02f..ba61c3e70f7 100644 --- a/src/transformers/modeling_squeezebert.py +++ b/src/transformers/models/squeezebert/modeling_squeezebert.py @@ -21,10 +21,9 @@ import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss -from .activations import ACT2FN -from .configuration_squeezebert import SqueezeBertConfig -from .file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward -from .modeling_outputs import ( +from ...activations import ACT2FN +from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward +from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, @@ -33,8 +32,9 @@ from .modeling_outputs import ( SequenceClassifierOutput, TokenClassifierOutput, ) -from .modeling_utils import PreTrainedModel -from .utils import logging +from ...modeling_utils import PreTrainedModel +from ...utils import logging +from .configuration_squeezebert import SqueezeBertConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_squeezebert.py b/src/transformers/models/squeezebert/tokenization_squeezebert.py similarity index 96% rename from src/transformers/tokenization_squeezebert.py rename to src/transformers/models/squeezebert/tokenization_squeezebert.py index 8629597b8e9..d73bb732d64 100644 --- a/src/transformers/tokenization_squeezebert.py +++ b/src/transformers/models/squeezebert/tokenization_squeezebert.py @@ -14,8 +14,8 @@ # limitations under the License. """Tokenization classes for SqueezeBERT.""" -from .tokenization_bert import BertTokenizer -from .utils import logging +from ...utils import logging +from ..bert.tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_squeezebert_fast.py b/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py similarity index 97% rename from src/transformers/tokenization_squeezebert_fast.py rename to src/transformers/models/squeezebert/tokenization_squeezebert_fast.py index faa84b99a7d..d6de6e63f8a 100644 --- a/src/transformers/tokenization_squeezebert_fast.py +++ b/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py @@ -14,9 +14,9 @@ # limitations under the License. """Tokenization classes for SqueezeBERT.""" -from .tokenization_bert_fast import BertTokenizerFast +from ...utils import logging +from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_squeezebert import SqueezeBertTokenizer -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/t5/__init__.py b/src/transformers/models/t5/__init__.py new file mode 100644 index 00000000000..49c8a877b2f --- /dev/null +++ b/src/transformers/models/t5/__init__.py @@ -0,0 +1,30 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config + + +if is_sentencepiece_available(): + from .tokenization_t5 import T5Tokenizer + +if is_tokenizers_available(): + from .tokenization_t5_fast import T5TokenizerFast + +if is_torch_available(): + from .modeling_t5 import ( + T5_PRETRAINED_MODEL_ARCHIVE_LIST, + T5ForConditionalGeneration, + T5Model, + T5PreTrainedModel, + load_tf_weights_in_t5, + ) + +if is_tf_available(): + from .modeling_tf_t5 import ( + TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST, + TFT5ForConditionalGeneration, + TFT5Model, + TFT5PreTrainedModel, + ) diff --git a/src/transformers/configuration_t5.py b/src/transformers/models/t5/configuration_t5.py similarity index 98% rename from src/transformers/configuration_t5.py rename to src/transformers/models/t5/configuration_t5.py index 7c9e3f38d79..b1a045cb18d 100644 --- a/src/transformers/configuration_t5.py +++ b/src/transformers/models/t5/configuration_t5.py @@ -14,8 +14,8 @@ # limitations under the License. """ T5 model configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_t5_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/t5/convert_t5_original_tf_checkpoint_to_pytorch.py similarity index 100% rename from src/transformers/convert_t5_original_tf_checkpoint_to_pytorch.py rename to src/transformers/models/t5/convert_t5_original_tf_checkpoint_to_pytorch.py diff --git a/src/transformers/modeling_t5.py b/src/transformers/models/t5/modeling_t5.py similarity index 99% rename from src/transformers/modeling_t5.py rename to src/transformers/models/t5/modeling_t5.py index 21f185e2464..b2fee2503e3 100644 --- a/src/transformers/modeling_t5.py +++ b/src/transformers/models/t5/modeling_t5.py @@ -25,22 +25,22 @@ import torch.nn.functional as F from torch import nn from torch.nn import CrossEntropyLoss -from .configuration_t5 import T5Config -from .file_utils import ( +from ...file_utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_outputs import ( +from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) -from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer -from .utils import logging +from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import logging +from .configuration_t5 import T5Config logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_t5.py b/src/transformers/models/t5/modeling_tf_t5.py similarity index 99% rename from src/transformers/modeling_tf_t5.py rename to src/transformers/models/t5/modeling_tf_t5.py index 0b01002c8cc..74e703cb9de 100644 --- a/src/transformers/modeling_tf_t5.py +++ b/src/transformers/models/t5/modeling_tf_t5.py @@ -26,16 +26,15 @@ import tensorflow as tf from transformers.modeling_tf_utils import TFWrappedEmbeddings -from .configuration_t5 import T5Config -from .file_utils import ( +from ...file_utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_tf_outputs import TFSeq2SeqLMOutput, TFSeq2SeqModelOutput -from .modeling_tf_utils import ( +from ...modeling_tf_outputs import TFSeq2SeqLMOutput, TFSeq2SeqModelOutput +from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFPreTrainedModel, TFSharedEmbeddings, @@ -43,8 +42,9 @@ from .modeling_tf_utils import ( keras_serializable, shape_list, ) -from .tokenization_utils import BatchEncoding -from .utils import logging +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from .configuration_t5 import T5Config logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_t5.py b/src/transformers/models/t5/tokenization_t5.py similarity index 98% rename from src/transformers/tokenization_t5.py rename to src/transformers/models/t5/tokenization_t5.py index 781791b5bac..34ecd555802 100644 --- a/src/transformers/tokenization_t5.py +++ b/src/transformers/models/t5/tokenization_t5.py @@ -23,10 +23,10 @@ from typing import List, Optional, Tuple import sentencepiece as spm -from .file_utils import add_start_docstrings -from .tokenization_utils import BatchEncoding, PreTrainedTokenizer -from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING -from .utils import logging +from ...file_utils import add_start_docstrings +from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer +from ...tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_t5_fast.py b/src/transformers/models/t5/tokenization_t5_fast.py similarity index 97% rename from src/transformers/tokenization_t5_fast.py rename to src/transformers/models/t5/tokenization_t5_fast.py index 0aba4763dfc..53608e64601 100644 --- a/src/transformers/tokenization_t5_fast.py +++ b/src/transformers/models/t5/tokenization_t5_fast.py @@ -19,11 +19,11 @@ import os from shutil import copyfile from typing import List, Optional, Tuple -from .file_utils import add_start_docstrings, is_sentencepiece_available -from .tokenization_utils import BatchEncoding -from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING -from .tokenization_utils_fast import PreTrainedTokenizerFast -from .utils import logging +from ...file_utils import add_start_docstrings, is_sentencepiece_available +from ...tokenization_utils import BatchEncoding +from ...tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging if is_sentencepiece_available(): diff --git a/src/transformers/models/transfo_xl/__init__.py b/src/transformers/models/transfo_xl/__init__.py new file mode 100644 index 00000000000..2dc009b7f6e --- /dev/null +++ b/src/transformers/models/transfo_xl/__init__.py @@ -0,0 +1,28 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_torch_available +from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig +from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer + + +if is_torch_available(): + from .modeling_transfo_xl import ( + TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, + AdaptiveEmbedding, + TransfoXLLMHeadModel, + TransfoXLModel, + TransfoXLPreTrainedModel, + load_tf_weights_in_transfo_xl, + ) + +if is_tf_available(): + from .modeling_tf_transfo_xl import ( + TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, + TFAdaptiveEmbedding, + TFTransfoXLLMHeadModel, + TFTransfoXLMainLayer, + TFTransfoXLModel, + TFTransfoXLPreTrainedModel, + ) diff --git a/src/transformers/configuration_transfo_xl.py b/src/transformers/models/transfo_xl/configuration_transfo_xl.py similarity index 99% rename from src/transformers/configuration_transfo_xl.py rename to src/transformers/models/transfo_xl/configuration_transfo_xl.py index 03afa6e1324..3585a97b931 100644 --- a/src/transformers/configuration_transfo_xl.py +++ b/src/transformers/models/transfo_xl/configuration_transfo_xl.py @@ -18,8 +18,8 @@ import warnings -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py similarity index 96% rename from src/transformers/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py rename to src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py index 9135b0d088e..a5d8e194ce9 100755 --- a/src/transformers/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py @@ -22,7 +22,7 @@ import sys import torch -import transformers.tokenization_transfo_xl as data_utils +import transformers.models.transfo_xl.tokenization_transfo_xl as data_utils from transformers import ( CONFIG_NAME, WEIGHTS_NAME, @@ -30,7 +30,7 @@ from transformers import ( TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl, ) -from transformers.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES +from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import logging diff --git a/src/transformers/modeling_tf_transfo_xl.py b/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py similarity index 99% rename from src/transformers/modeling_tf_transfo_xl.py rename to src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py index 3883a370c77..dda6204356c 100644 --- a/src/transformers/modeling_tf_transfo_xl.py +++ b/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py @@ -22,17 +22,17 @@ from typing import List, Optional, Tuple import tensorflow as tf -from .configuration_transfo_xl import TransfoXLConfig -from .file_utils import ( +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, ) +from ...modeling_tf_utils import TFPreTrainedModel, get_initializer, keras_serializable, shape_list +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from .configuration_transfo_xl import TransfoXLConfig from .modeling_tf_transfo_xl_utilities import TFAdaptiveSoftmaxMask -from .modeling_tf_utils import TFPreTrainedModel, get_initializer, keras_serializable, shape_list -from .tokenization_utils import BatchEncoding -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_transfo_xl_utilities.py b/src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py similarity index 99% rename from src/transformers/modeling_tf_transfo_xl_utilities.py rename to src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py index b4ed4f7e162..84994f9b442 100644 --- a/src/transformers/modeling_tf_transfo_xl_utilities.py +++ b/src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py @@ -20,7 +20,7 @@ import tensorflow as tf -from .modeling_tf_utils import shape_list +from ...modeling_tf_utils import shape_list class TFAdaptiveSoftmaxMask(tf.keras.layers.Layer): diff --git a/src/transformers/modeling_transfo_xl.py b/src/transformers/models/transfo_xl/modeling_transfo_xl.py similarity index 99% rename from src/transformers/modeling_transfo_xl.py rename to src/transformers/models/transfo_xl/modeling_transfo_xl.py index 0f188533e75..8843febfe73 100644 --- a/src/transformers/modeling_transfo_xl.py +++ b/src/transformers/models/transfo_xl/modeling_transfo_xl.py @@ -25,16 +25,16 @@ import torch import torch.nn as nn import torch.nn.functional as F -from .configuration_transfo_xl import TransfoXLConfig -from .file_utils import ( +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, ) +from ...modeling_utils import PreTrainedModel +from ...utils import logging +from .configuration_transfo_xl import TransfoXLConfig from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax -from .modeling_utils import PreTrainedModel -from .utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_transfo_xl_utilities.py b/src/transformers/models/transfo_xl/modeling_transfo_xl_utilities.py similarity index 100% rename from src/transformers/modeling_transfo_xl_utilities.py rename to src/transformers/models/transfo_xl/modeling_transfo_xl_utilities.py diff --git a/src/transformers/tokenization_transfo_xl.py b/src/transformers/models/transfo_xl/tokenization_transfo_xl.py similarity index 99% rename from src/transformers/tokenization_transfo_xl.py rename to src/transformers/models/transfo_xl/tokenization_transfo_xl.py index 9d728a07565..89a6ffdfeb7 100644 --- a/src/transformers/tokenization_transfo_xl.py +++ b/src/transformers/models/transfo_xl/tokenization_transfo_xl.py @@ -29,9 +29,9 @@ import numpy as np import sacremoses as sm -from .file_utils import cached_path, is_torch_available, torch_only_method -from .tokenization_utils import PreTrainedTokenizer -from .utils import logging +from ...file_utils import cached_path, is_torch_available, torch_only_method +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging if is_torch_available(): diff --git a/src/transformers/models/xlm/__init__.py b/src/transformers/models/xlm/__init__.py new file mode 100644 index 00000000000..7dbfb7373dd --- /dev/null +++ b/src/transformers/models/xlm/__init__.py @@ -0,0 +1,34 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_tf_available, is_torch_available +from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig +from .tokenization_xlm import XLMTokenizer + + +if is_torch_available(): + from .modeling_xlm import ( + XLM_PRETRAINED_MODEL_ARCHIVE_LIST, + XLMForMultipleChoice, + XLMForQuestionAnswering, + XLMForQuestionAnsweringSimple, + XLMForSequenceClassification, + XLMForTokenClassification, + XLMModel, + XLMPreTrainedModel, + XLMWithLMHeadModel, + ) + +if is_tf_available(): + from .modeling_tf_xlm import ( + TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, + TFXLMForMultipleChoice, + TFXLMForQuestionAnsweringSimple, + TFXLMForSequenceClassification, + TFXLMForTokenClassification, + TFXLMMainLayer, + TFXLMModel, + TFXLMPreTrainedModel, + TFXLMWithLMHeadModel, + ) diff --git a/src/transformers/configuration_xlm.py b/src/transformers/models/xlm/configuration_xlm.py similarity index 99% rename from src/transformers/configuration_xlm.py rename to src/transformers/models/xlm/configuration_xlm.py index 5903eac763c..839e4337ff1 100644 --- a/src/transformers/configuration_xlm.py +++ b/src/transformers/models/xlm/configuration_xlm.py @@ -14,8 +14,8 @@ # limitations under the License. """ XLM configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_xlm_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py similarity index 97% rename from src/transformers/convert_xlm_original_pytorch_checkpoint_to_pytorch.py rename to src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py index 9baf1159125..37ee8a25e80 100755 --- a/src/transformers/convert_xlm_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py @@ -22,7 +22,7 @@ import numpy import torch from transformers import CONFIG_NAME, WEIGHTS_NAME -from transformers.tokenization_xlm import VOCAB_FILES_NAMES +from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import logging diff --git a/src/transformers/modeling_tf_xlm.py b/src/transformers/models/xlm/modeling_tf_xlm.py similarity index 99% rename from src/transformers/modeling_tf_xlm.py rename to src/transformers/models/xlm/modeling_tf_xlm.py index d3724986c57..6f4cf368e49 100644 --- a/src/transformers/modeling_tf_xlm.py +++ b/src/transformers/models/xlm/modeling_tf_xlm.py @@ -25,23 +25,22 @@ from typing import Optional, Tuple import numpy as np import tensorflow as tf -from .activations_tf import get_tf_activation -from .configuration_xlm import XLMConfig -from .file_utils import ( +from ...activations_tf import get_tf_activation +from ...file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, ) -from .modeling_tf_outputs import ( +from ...modeling_tf_outputs import ( TFBaseModelOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) -from .modeling_tf_utils import ( +from ...modeling_tf_utils import ( TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, @@ -53,8 +52,9 @@ from .modeling_tf_utils import ( keras_serializable, shape_list, ) -from .tokenization_utils import BatchEncoding -from .utils import logging +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from .configuration_xlm import XLMConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_xlm.py b/src/transformers/models/xlm/modeling_xlm.py similarity index 99% rename from src/transformers/modeling_xlm.py rename to src/transformers/models/xlm/modeling_xlm.py index 7b423d7e1fa..94e303db14f 100755 --- a/src/transformers/modeling_xlm.py +++ b/src/transformers/models/xlm/modeling_xlm.py @@ -29,16 +29,15 @@ from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from torch.nn import functional as F -from .activations import gelu -from .configuration_xlm import XLMConfig -from .file_utils import ( +from ...activations import gelu +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_outputs import ( +from ...modeling_outputs import ( BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, @@ -46,7 +45,7 @@ from .modeling_outputs import ( SequenceClassifierOutput, TokenClassifierOutput, ) -from .modeling_utils import ( +from ...modeling_utils import ( PreTrainedModel, SequenceSummary, SQuADHead, @@ -54,7 +53,8 @@ from .modeling_utils import ( find_pruneable_heads_and_indices, prune_linear_layer, ) -from .utils import logging +from ...utils import logging +from .configuration_xlm import XLMConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_xlm.py b/src/transformers/models/xlm/tokenization_xlm.py similarity index 99% rename from src/transformers/tokenization_xlm.py rename to src/transformers/models/xlm/tokenization_xlm.py index 577cdc6efa7..1ee4d71cd48 100644 --- a/src/transformers/tokenization_xlm.py +++ b/src/transformers/models/xlm/tokenization_xlm.py @@ -24,8 +24,8 @@ from typing import List, Optional, Tuple import sacremoses as sm -from .tokenization_utils import PreTrainedTokenizer -from .utils import logging +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/xlm_prophetnet/__init__.py b/src/transformers/models/xlm_prophetnet/__init__.py new file mode 100644 index 00000000000..5daafbe433e --- /dev/null +++ b/src/transformers/models/xlm_prophetnet/__init__.py @@ -0,0 +1,20 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_sentencepiece_available, is_torch_available +from .configuration_xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig + + +if is_sentencepiece_available(): + from .tokenization_xlm_prophetnet import XLMProphetNetTokenizer + +if is_torch_available(): + from .modeling_xlm_prophetnet import ( + XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, + XLMProphetNetDecoder, + XLMProphetNetEncoder, + XLMProphetNetForCausalLM, + XLMProphetNetForConditionalGeneration, + XLMProphetNetModel, + ) diff --git a/src/transformers/configuration_xlm_prophetnet.py b/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py similarity index 92% rename from src/transformers/configuration_xlm_prophetnet.py rename to src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py index 6ebf8899e01..32ea91a9eaf 100644 --- a/src/transformers/configuration_xlm_prophetnet.py +++ b/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py @@ -15,8 +15,8 @@ """ XLM-ProphetNet model configuration """ -from .configuration_prophetnet import ProphetNetConfig -from .utils import logging +from ...utils import logging +from ..prophetnet.configuration_prophetnet import ProphetNetConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_xlm_prophetnet.py b/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py similarity index 98% rename from src/transformers/modeling_xlm_prophetnet.py rename to src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py index 903d3d6e08e..9240cea230b 100644 --- a/src/transformers/modeling_xlm_prophetnet.py +++ b/src/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py @@ -14,15 +14,15 @@ # limitations under the License. """ PyTorch XLM-ProphetNet model.""" -from .configuration_xlm_prophetnet import XLMProphetNetConfig -from .modeling_prophetnet import ( +from ...utils import logging +from ..prophetnet.modeling_prophetnet import ( ProphetNetDecoder, ProphetNetEncoder, ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel, ) -from .utils import logging +from .configuration_xlm_prophetnet import XLMProphetNetConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_xlm_prophetnet.py b/src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py similarity index 99% rename from src/transformers/tokenization_xlm_prophetnet.py rename to src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py index 994461ea78b..c1df1481383 100644 --- a/src/transformers/tokenization_xlm_prophetnet.py +++ b/src/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py @@ -18,8 +18,8 @@ import os from shutil import copyfile from typing import List, Optional, Tuple -from .tokenization_utils import PreTrainedTokenizer -from .utils import logging +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/models/xlm_roberta/__init__.py b/src/transformers/models/xlm_roberta/__init__.py new file mode 100644 index 00000000000..bb1fa7ae771 --- /dev/null +++ b/src/transformers/models/xlm_roberta/__init__.py @@ -0,0 +1,36 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig + + +if is_sentencepiece_available(): + from .tokenization_xlm_roberta import XLMRobertaTokenizer + +if is_tokenizers_available(): + from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast + +if is_torch_available(): + from .modeling_xlm_roberta import ( + XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, + XLMRobertaForCausalLM, + XLMRobertaForMaskedLM, + XLMRobertaForMultipleChoice, + XLMRobertaForQuestionAnswering, + XLMRobertaForSequenceClassification, + XLMRobertaForTokenClassification, + XLMRobertaModel, + ) + +if is_tf_available(): + from .modeling_tf_xlm_roberta import ( + TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, + TFXLMRobertaForMaskedLM, + TFXLMRobertaForMultipleChoice, + TFXLMRobertaForQuestionAnswering, + TFXLMRobertaForSequenceClassification, + TFXLMRobertaForTokenClassification, + TFXLMRobertaModel, + ) diff --git a/src/transformers/configuration_xlm_roberta.py b/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py similarity index 95% rename from src/transformers/configuration_xlm_roberta.py rename to src/transformers/models/xlm_roberta/configuration_xlm_roberta.py index 76e93610c01..2ca58306c08 100644 --- a/src/transformers/configuration_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/configuration_xlm_roberta.py @@ -15,8 +15,8 @@ # limitations under the License. """ XLM-RoBERTa configuration """ -from .configuration_roberta import RobertaConfig -from .utils import logging +from ...utils import logging +from ..roberta.configuration_roberta import RobertaConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_tf_xlm_roberta.py b/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py similarity index 98% rename from src/transformers/modeling_tf_xlm_roberta.py rename to src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py index a18433cad8e..01dc6490abe 100644 --- a/src/transformers/modeling_tf_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py @@ -15,9 +15,9 @@ # limitations under the License. """ TF 2.0 XLM-RoBERTa model. """ -from .configuration_xlm_roberta import XLMRobertaConfig -from .file_utils import add_start_docstrings -from .modeling_tf_roberta import ( +from ...file_utils import add_start_docstrings +from ...utils import logging +from ..roberta.modeling_tf_roberta import ( TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, @@ -25,7 +25,7 @@ from .modeling_tf_roberta import ( TFRobertaForTokenClassification, TFRobertaModel, ) -from .utils import logging +from .configuration_xlm_roberta import XLMRobertaConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_xlm_roberta.py b/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py similarity index 98% rename from src/transformers/modeling_xlm_roberta.py rename to src/transformers/models/xlm_roberta/modeling_xlm_roberta.py index 3b71082b77c..edcf151878c 100644 --- a/src/transformers/modeling_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/modeling_xlm_roberta.py @@ -15,9 +15,9 @@ # limitations under the License. """PyTorch XLM-RoBERTa model. """ -from .configuration_xlm_roberta import XLMRobertaConfig -from .file_utils import add_start_docstrings -from .modeling_roberta import ( +from ...file_utils import add_start_docstrings +from ...utils import logging +from ..roberta.modeling_roberta import ( RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, @@ -26,7 +26,7 @@ from .modeling_roberta import ( RobertaForTokenClassification, RobertaModel, ) -from .utils import logging +from .configuration_xlm_roberta import XLMRobertaConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_xlm_roberta.py b/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py similarity index 99% rename from src/transformers/tokenization_xlm_roberta.py rename to src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py index 892abbd826e..708522fe745 100644 --- a/src/transformers/tokenization_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py @@ -21,8 +21,8 @@ from typing import List, Optional, Tuple import sentencepiece as spm -from .tokenization_utils import PreTrainedTokenizer -from .utils import logging +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_xlm_roberta_fast.py b/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py similarity index 98% rename from src/transformers/tokenization_xlm_roberta_fast.py rename to src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py index 4c59d2d79f1..8a7b1580774 100644 --- a/src/transformers/tokenization_xlm_roberta_fast.py +++ b/src/transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py @@ -19,9 +19,9 @@ import os from shutil import copyfile from typing import List, Optional, Tuple -from .file_utils import is_sentencepiece_available -from .tokenization_utils_fast import PreTrainedTokenizerFast -from .utils import logging +from ...file_utils import is_sentencepiece_available +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging if is_sentencepiece_available(): diff --git a/src/transformers/models/xlnet/__init__.py b/src/transformers/models/xlnet/__init__.py new file mode 100644 index 00000000000..acb1cd54684 --- /dev/null +++ b/src/transformers/models/xlnet/__init__.py @@ -0,0 +1,40 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ...file_utils import is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available +from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig + + +if is_sentencepiece_available(): + from .tokenization_xlnet import XLNetTokenizer + +if is_tokenizers_available(): + from .tokenization_xlnet_fast import XLNetTokenizerFast + +if is_torch_available(): + from .modeling_xlnet import ( + XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, + XLNetForMultipleChoice, + XLNetForQuestionAnswering, + XLNetForQuestionAnsweringSimple, + XLNetForSequenceClassification, + XLNetForTokenClassification, + XLNetLMHeadModel, + XLNetModel, + XLNetPreTrainedModel, + load_tf_weights_in_xlnet, + ) + +if is_tf_available(): + from .modeling_tf_xlnet import ( + TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, + TFXLNetForMultipleChoice, + TFXLNetForQuestionAnsweringSimple, + TFXLNetForSequenceClassification, + TFXLNetForTokenClassification, + TFXLNetLMHeadModel, + TFXLNetMainLayer, + TFXLNetModel, + TFXLNetPreTrainedModel, + ) diff --git a/src/transformers/configuration_xlnet.py b/src/transformers/models/xlnet/configuration_xlnet.py similarity index 99% rename from src/transformers/configuration_xlnet.py rename to src/transformers/models/xlnet/configuration_xlnet.py index 150f10e3e3f..db102317903 100644 --- a/src/transformers/configuration_xlnet.py +++ b/src/transformers/models/xlnet/configuration_xlnet.py @@ -15,8 +15,8 @@ # limitations under the License. """ XLNet configuration """ -from .configuration_utils import PretrainedConfig -from .utils import logging +from ...configuration_utils import PretrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/convert_xlnet_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py similarity index 100% rename from src/transformers/convert_xlnet_original_tf_checkpoint_to_pytorch.py rename to src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py diff --git a/src/transformers/modeling_tf_xlnet.py b/src/transformers/models/xlnet/modeling_tf_xlnet.py similarity index 99% rename from src/transformers/modeling_tf_xlnet.py rename to src/transformers/models/xlnet/modeling_tf_xlnet.py index c6ecb7747ec..05fdf8831fc 100644 --- a/src/transformers/modeling_tf_xlnet.py +++ b/src/transformers/models/xlnet/modeling_tf_xlnet.py @@ -23,9 +23,8 @@ from typing import List, Optional, Tuple import tensorflow as tf -from .activations_tf import get_tf_activation -from .configuration_xlnet import XLNetConfig -from .file_utils import ( +from ...activations_tf import get_tf_activation +from ...file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, @@ -33,7 +32,7 @@ from .file_utils import ( add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_tf_utils import ( +from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFMultipleChoiceLoss, TFPreTrainedModel, @@ -46,8 +45,9 @@ from .modeling_tf_utils import ( keras_serializable, shape_list, ) -from .tokenization_utils import BatchEncoding -from .utils import logging +from ...tokenization_utils import BatchEncoding +from ...utils import logging +from .configuration_xlnet import XLNetConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/modeling_xlnet.py b/src/transformers/models/xlnet/modeling_xlnet.py similarity index 99% rename from src/transformers/modeling_xlnet.py rename to src/transformers/models/xlnet/modeling_xlnet.py index 601b201635d..f526d55373b 100755 --- a/src/transformers/modeling_xlnet.py +++ b/src/transformers/models/xlnet/modeling_xlnet.py @@ -24,16 +24,15 @@ from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from torch.nn import functional as F -from .activations import ACT2FN -from .configuration_xlnet import XLNetConfig -from .file_utils import ( +from ...activations import ACT2FN +from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) -from .modeling_utils import ( +from ...modeling_utils import ( PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, @@ -41,7 +40,8 @@ from .modeling_utils import ( SequenceSummary, apply_chunking_to_forward, ) -from .utils import logging +from ...utils import logging +from .configuration_xlnet import XLNetConfig logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_xlnet.py b/src/transformers/models/xlnet/tokenization_xlnet.py similarity index 99% rename from src/transformers/tokenization_xlnet.py rename to src/transformers/models/xlnet/tokenization_xlnet.py index 4aaad1604ac..82d7122b6fc 100644 --- a/src/transformers/tokenization_xlnet.py +++ b/src/transformers/models/xlnet/tokenization_xlnet.py @@ -22,9 +22,9 @@ from typing import List, Optional, Tuple import sentencepiece as spm -from .file_utils import SPIECE_UNDERLINE -from .tokenization_utils import PreTrainedTokenizer -from .utils import logging +from ...file_utils import SPIECE_UNDERLINE +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging logger = logging.get_logger(__name__) diff --git a/src/transformers/tokenization_xlnet_fast.py b/src/transformers/models/xlnet/tokenization_xlnet_fast.py similarity index 98% rename from src/transformers/tokenization_xlnet_fast.py rename to src/transformers/models/xlnet/tokenization_xlnet_fast.py index 336090124c7..60e1010dae2 100644 --- a/src/transformers/tokenization_xlnet_fast.py +++ b/src/transformers/models/xlnet/tokenization_xlnet_fast.py @@ -19,9 +19,9 @@ import os from shutil import copyfile from typing import List, Optional, Tuple -from .file_utils import is_sentencepiece_available -from .tokenization_utils_fast import PreTrainedTokenizerFast -from .utils import logging +from ...file_utils import is_sentencepiece_available +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging if is_sentencepiece_available(): diff --git a/src/transformers/pipelines.py b/src/transformers/pipelines.py index 19d5dfc2f38..c6b60f902cd 100755 --- a/src/transformers/pipelines.py +++ b/src/transformers/pipelines.py @@ -30,13 +30,13 @@ from uuid import UUID import numpy as np -from .configuration_auto import AutoConfig from .configuration_utils import PretrainedConfig from .data import SquadExample, SquadFeatures, squad_convert_examples_to_features from .file_utils import add_end_docstrings, is_tf_available, is_torch_available from .modelcard import ModelCard -from .tokenization_auto import AutoTokenizer -from .tokenization_bert import BasicTokenizer +from .models.auto.configuration_auto import AutoConfig +from .models.auto.tokenization_auto import AutoTokenizer +from .models.bert.tokenization_bert import BasicTokenizer from .tokenization_utils import PreTrainedTokenizer from .tokenization_utils_base import PaddingStrategy from .utils import logging @@ -45,7 +45,7 @@ from .utils import logging if is_tf_available(): import tensorflow as tf - from .modeling_tf_auto import ( + from .models.auto.modeling_tf_auto import ( TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, @@ -63,7 +63,7 @@ if is_tf_available(): if is_torch_available(): import torch - from .modeling_auto import ( + from .models.auto.modeling_auto import ( MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 05c1a5613a7..9e31d085bae 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -53,8 +53,8 @@ from torch.utils.data.sampler import RandomSampler, SequentialSampler from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator from .file_utils import WEIGHTS_NAME, is_datasets_available, is_in_notebook, is_torch_tpu_available -from .modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from .modeling_utils import PreTrainedModel +from .models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from .optimization import AdamW, get_linear_schedule_with_warmup from .tokenization_utils_base import PreTrainedTokenizerBase from .trainer_callback import ( diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index c0702985a0b..979e906fcf0 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -172,6 +172,28 @@ def top_k_top_p_filtering(*args, **kwargs): requires_pytorch(top_k_top_p_filtering) +class Conv1D: + def __init__(self, *args, **kwargs): + requires_pytorch(self) + + +class PreTrainedModel: + def __init__(self, *args, **kwargs): + requires_pytorch(self) + + @classmethod + def from_pretrained(self, *args, **kwargs): + requires_pytorch(self) + + +def apply_chunking_to_forward(*args, **kwargs): + requires_pytorch(apply_chunking_to_forward) + + +def prune_layer(*args, **kwargs): + requires_pytorch(prune_layer) + + ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None @@ -1749,28 +1771,6 @@ def load_tf_weights_in_transfo_xl(*args, **kwargs): requires_pytorch(load_tf_weights_in_transfo_xl) -class Conv1D: - def __init__(self, *args, **kwargs): - requires_pytorch(self) - - -class PreTrainedModel: - def __init__(self, *args, **kwargs): - requires_pytorch(self) - - @classmethod - def from_pretrained(self, *args, **kwargs): - requires_pytorch(self) - - -def apply_chunking_to_forward(*args, **kwargs): - requires_pytorch(apply_chunking_to_forward) - - -def prune_layer(*args, **kwargs): - requires_pytorch(prune_layer) - - XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index 8eb912a1a26..33b58b90a47 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -16,6 +16,29 @@ def tf_top_k_top_p_filtering(*args, **kwargs): requires_tf(tf_top_k_top_p_filtering) +class TFPreTrainedModel: + def __init__(self, *args, **kwargs): + requires_tf(self) + + @classmethod + def from_pretrained(self, *args, **kwargs): + requires_tf(self) + + +class TFSequenceSummary: + def __init__(self, *args, **kwargs): + requires_tf(self) + + +class TFSharedEmbeddings: + def __init__(self, *args, **kwargs): + requires_tf(self) + + +def shape_list(*args, **kwargs): + requires_tf(shape_list) + + TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None @@ -1141,29 +1164,6 @@ class TFTransfoXLPreTrainedModel: requires_tf(self) -class TFPreTrainedModel: - def __init__(self, *args, **kwargs): - requires_tf(self) - - @classmethod - def from_pretrained(self, *args, **kwargs): - requires_tf(self) - - -class TFSequenceSummary: - def __init__(self, *args, **kwargs): - requires_tf(self) - - -class TFSharedEmbeddings: - def __init__(self, *args, **kwargs): - requires_tf(self) - - -def shape_list(*args, **kwargs): - requires_tf(shape_list) - - TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/dummy_tokenizers_objects.py b/src/transformers/utils/dummy_tokenizers_objects.py index a9ae88b371e..7a5fef4e7cd 100644 --- a/src/transformers/utils/dummy_tokenizers_objects.py +++ b/src/transformers/utils/dummy_tokenizers_objects.py @@ -218,15 +218,6 @@ class T5TokenizerFast: requires_tokenizers(self) -class PreTrainedTokenizerFast: - def __init__(self, *args, **kwargs): - requires_tokenizers(self) - - @classmethod - def from_pretrained(self, *args, **kwargs): - requires_tokenizers(self) - - class XLMRobertaTokenizerFast: def __init__(self, *args, **kwargs): requires_tokenizers(self) @@ -245,6 +236,15 @@ class XLNetTokenizerFast: requires_tokenizers(self) +class PreTrainedTokenizerFast: + def __init__(self, *args, **kwargs): + requires_tokenizers(self) + + @classmethod + def from_pretrained(self, *args, **kwargs): + requires_tokenizers(self) + + SLOW_TO_FAST_CONVERTERS = None diff --git a/templates/adding_a_new_model/README.md b/templates/adding_a_new_model/README.md index 93c5950e356..05b2739e9d3 100644 --- a/templates/adding_a_new_model/README.md +++ b/templates/adding_a_new_model/README.md @@ -66,10 +66,10 @@ Choose from 1, 2 [1]: Once the command has finished, you should have a total of 7 new files spread across the repository: ``` docs/source/model_doc/.rst -src/transformers/configuration_.py -src/transformers/modeling_.py -src/transformers/modeling_tf_.py -src/transformers/tokenization_.py +src/transformers/models//configuration_.py +src/transformers/models//modeling_.py +src/transformers/models//modeling_tf_.py +src/transformers/models//tokenization_.py tests/test_modeling_.py tests/test_modeling_tf_.py ``` diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py index d32a1279a91..5b20528f72c 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py @@ -61,7 +61,7 @@ TF_{{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST = [ ] -# Copied from transformers.modeling_tf_bert.TFBertEmbeddings with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}Embeddings(tf.keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" @@ -175,7 +175,7 @@ class TF{{cookiecutter.camelcase_modelname}}Embeddings(tf.keras.layers.Layer): return tf.reshape(logits, [batch_size, length, self.vocab_size]) -# Copied from transformers.modeling_tf_bert.TFBertSelfAttention with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}SelfAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -247,7 +247,7 @@ class TF{{cookiecutter.camelcase_modelname}}SelfAttention(tf.keras.layers.Layer) return outputs -# Copied from transformers.modeling_tf_bert.TFBertSelfOutput with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}SelfOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -266,7 +266,7 @@ class TF{{cookiecutter.camelcase_modelname}}SelfOutput(tf.keras.layers.Layer): return hidden_states -# Copied from transformers.modeling_tf_bert.TFBertAttention with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}Attention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -287,7 +287,7 @@ class TF{{cookiecutter.camelcase_modelname}}Attention(tf.keras.layers.Layer): return outputs -# Copied from transformers.modeling_tf_bert.TFBertIntermediate with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}Intermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -308,7 +308,7 @@ class TF{{cookiecutter.camelcase_modelname}}Intermediate(tf.keras.layers.Layer): return hidden_states -# Copied from transformers.modeling_tf_bert.TFBertOutput with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}Output(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -327,7 +327,7 @@ class TF{{cookiecutter.camelcase_modelname}}Output(tf.keras.layers.Layer): return hidden_states -# Copied from transformers.modeling_tf_bert.TFBertLayer with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}Layer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -391,7 +391,7 @@ class TF{{cookiecutter.camelcase_modelname}}Encoder(tf.keras.layers.Layer): ) -# Copied from transformers.modeling_tf_bert.TFBertPredictionHead with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHead with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}PredictionHeadTransform(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -415,7 +415,7 @@ class TF{{cookiecutter.camelcase_modelname}}PredictionHeadTransform(tf.keras.lay return hidden_states -# Copied from transformers.modeling_tf_bert.TFBertLMPredictionHead with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}LMPredictionHead(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) @@ -440,7 +440,7 @@ class TF{{cookiecutter.camelcase_modelname}}LMPredictionHead(tf.keras.layers.Lay return hidden_states -# Copied from transformers.modeling_tf_bert.TFBertMLMHead with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}MLMHead(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) @@ -606,7 +606,7 @@ class TF{{cookiecutter.camelcase_modelname}}MainLayer(tf.keras.layers.Layer): ) -# Copied from transformers.modeling_tf_bert.TFBertPreTrainedModel with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_tf_bert.TFBertPreTrainedModel with Bert->{{cookiecutter.camelcase_modelname}} class TF{{cookiecutter.camelcase_modelname}}PreTrainedModel(TFPreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py index cb65d5246d6..fb8593e61be 100755 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py @@ -140,7 +140,7 @@ def mish(x): return x * torch.tanh(nn.functional.softplus(x)) -# Copied from transformers.modeling_bert.BertEmbeddings with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Embeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" @@ -183,7 +183,7 @@ class {{cookiecutter.camelcase_modelname}}Embeddings(nn.Module): return embeddings -# Copied from transformers.modeling_bert.BertSelfAttention with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}SelfAttention(nn.Module): def __init__(self, config): super().__init__() @@ -262,7 +262,7 @@ class {{cookiecutter.camelcase_modelname}}SelfAttention(nn.Module): return outputs -# Copied from transformers.modeling_bert.BertSelfOutput with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}SelfOutput(nn.Module): def __init__(self, config): super().__init__() @@ -277,7 +277,7 @@ class {{cookiecutter.camelcase_modelname}}SelfOutput(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertAttention with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Attention(nn.Module): def __init__(self, config): super().__init__() @@ -325,7 +325,7 @@ class {{cookiecutter.camelcase_modelname}}Attention(nn.Module): return outputs -# Copied from transformers.modeling_bert.BertIntermediate with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Intermediate(nn.Module): def __init__(self, config): super().__init__() @@ -341,7 +341,7 @@ class {{cookiecutter.camelcase_modelname}}Intermediate(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertOutput with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Output(nn.Module): def __init__(self, config): super().__init__() @@ -356,7 +356,7 @@ class {{cookiecutter.camelcase_modelname}}Output(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertLayer with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Layer(nn.Module): def __init__(self, config): super().__init__() @@ -416,7 +416,7 @@ class {{cookiecutter.camelcase_modelname}}Layer(nn.Module): return layer_output -# Copied from transformers.modeling_bert.BertEncoder with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}Encoder(nn.Module): def __init__(self, config): super().__init__() @@ -481,7 +481,7 @@ class {{cookiecutter.camelcase_modelname}}Encoder(nn.Module): ) -# Copied from transformers.modeling_bert.BertPredictionHead with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_bert.BertPredictionHead with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}PredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() @@ -499,7 +499,7 @@ class {{cookiecutter.camelcase_modelname}}PredictionHeadTransform(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertLMPredictionHead with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}LMPredictionHead(nn.Module): def __init__(self, config): super().__init__() @@ -520,7 +520,7 @@ class {{cookiecutter.camelcase_modelname}}LMPredictionHead(nn.Module): return hidden_states -# Copied from transformers.modeling_bert.BertOnlyMLMHead with Bert->{{cookiecutter.camelcase_modelname}} +# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->{{cookiecutter.camelcase_modelname}} class {{cookiecutter.camelcase_modelname}}OnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py index 9846fce625c..a5fe719ad7f 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py @@ -26,7 +26,7 @@ from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf - from transformers.modeling_tf_{{cookiecutter.lowercase_modelname}} import ( + from transformers import ( TF{{cookiecutter.camelcase_modelname}}ForMaskedLM, TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice, TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_{{cookiecutter.lowercase_modelname}}.py index 0859b16b5cc..f01183d9973 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_{{cookiecutter.lowercase_modelname}}.py @@ -34,7 +34,7 @@ if is_torch_available(): {{cookiecutter.camelcase_modelname}}ForTokenClassification, {{cookiecutter.camelcase_modelname}}Model, ) - from transformers.modeling_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.{{cookiecutter.lowercase_modelname}}.modeling_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST class {{cookiecutter.camelcase_modelname}}ModelTester: diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/to_replace_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/to_replace_{{cookiecutter.lowercase_modelname}}.py index 1de0b01a068..16ee5916980 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/to_replace_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/to_replace_{{cookiecutter.lowercase_modelname}}.py @@ -74,7 +74,7 @@ from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.u -# To replace in: "src/transformers/modeling_auto.py" if generating PyTorch +# To replace in: "src/transformers/models/auto/modeling_auto.py" if generating PyTorch # Below: "from .configuration_auto import (" # Replace with: {{cookiecutter.camelcase_modelname}}Config, @@ -129,7 +129,7 @@ from .modeling_{{cookiecutter.lowercase_modelname}} import ( # End. -# To replace in: "src/transformers/modeling_tf_auto.py" if generating TensorFlow +# To replace in: "src/transformers/models/auto/modeling_tf_auto.py" if generating TensorFlow # Below: "from .configuration_auto import (" # Replace with: {{cookiecutter.camelcase_modelname}}Config, diff --git a/tests/test_configuration_auto.py b/tests/test_configuration_auto.py index e3a66eb85ba..ac9a755a7c3 100644 --- a/tests/test_configuration_auto.py +++ b/tests/test_configuration_auto.py @@ -16,9 +16,9 @@ import os import unittest -from transformers.configuration_auto import CONFIG_MAPPING, AutoConfig -from transformers.configuration_bert import BertConfig -from transformers.configuration_roberta import RobertaConfig +from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig +from transformers.models.bert.configuration_bert import BertConfig +from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKWOWN_IDENTIFIER diff --git a/tests/test_flax_auto.py b/tests/test_flax_auto.py index 322c98b77ad..148cd886366 100644 --- a/tests/test_flax_auto.py +++ b/tests/test_flax_auto.py @@ -6,9 +6,9 @@ from transformers.testing_utils import require_flax, slow if is_flax_available(): import jax - from transformers.modeling_flax_auto import FlaxAutoModel - from transformers.modeling_flax_bert import FlaxBertModel - from transformers.modeling_flax_roberta import FlaxRobertaModel + from transformers.models.auto.modeling_flax_auto import FlaxAutoModel + from transformers.models.bert.modeling_flax_bert import FlaxBertModel + from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax diff --git a/tests/test_logging.py b/tests/test_logging.py index 843d59b9812..c706a798621 100644 --- a/tests/test_logging.py +++ b/tests/test_logging.py @@ -1,7 +1,7 @@ import os import unittest -import transformers.tokenization_bart +import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv @@ -31,7 +31,7 @@ class HfArgumentParserTest(unittest.TestCase): def test_integration(self): level_origin = logging.get_verbosity() - logger = logging.get_logger("transformers.tokenization_bart") + logger = logging.get_logger("transformers.models.bart.tokenization_bart") msg = "Testing 1, 2, 3" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) @@ -62,7 +62,7 @@ class HfArgumentParserTest(unittest.TestCase): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var - _ = logging.get_logger("transformers.tokenization_bart") + _ = logging.get_logger("transformers.models.bart.tokenization_bart") env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) env_level = logging.log_levels[env_level_str] @@ -85,7 +85,7 @@ class HfArgumentParserTest(unittest.TestCase): logger = logging.logging.getLogger() with CaptureLogger(logger) as cl: # this action activates the env var - logging.get_logger("transformers.tokenization_bart") + logging.get_logger("transformers.models.bart.tokenization_bart") self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error", cl.out) # no need to restore as nothing was changed diff --git a/tests/test_modeling_albert.py b/tests/test_modeling_albert.py index a53fa069af7..964bc836038 100644 --- a/tests/test_modeling_albert.py +++ b/tests/test_modeling_albert.py @@ -37,7 +37,7 @@ if is_torch_available(): AlbertForTokenClassification, AlbertModel, ) - from transformers.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class AlbertModelTester: diff --git a/tests/test_modeling_auto.py b/tests/test_modeling_auto.py index 50340811f13..b7d3ed9fbad 100644 --- a/tests/test_modeling_auto.py +++ b/tests/test_modeling_auto.py @@ -45,7 +45,7 @@ if is_torch_available(): T5Config, T5ForConditionalGeneration, ) - from transformers.modeling_auto import ( + from transformers.models.auto.modeling_auto import ( MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_PRETRAINING_MAPPING, @@ -56,9 +56,9 @@ if is_torch_available(): MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, ) - from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST - from transformers.modeling_gpt2 import GPT2_PRETRAINED_MODEL_ARCHIVE_LIST - from transformers.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.bert.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.gpt2.modeling_gpt2 import GPT2_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.t5.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST @require_torch diff --git a/tests/test_modeling_bart.py b/tests/test_modeling_bart.py index 3b997b26493..76ebe2d3d95 100644 --- a/tests/test_modeling_bart.py +++ b/tests/test_modeling_bart.py @@ -48,7 +48,7 @@ if is_torch_available(): PegasusConfig, pipeline, ) - from transformers.modeling_bart import ( + from transformers.models.bart.modeling_bart import ( SinusoidalPositionalEmbedding, _prepare_bart_decoder_inputs, invert_mask, diff --git a/tests/test_modeling_bert.py b/tests/test_modeling_bert.py index 7e80465c072..73a8ec9ca4a 100755 --- a/tests/test_modeling_bert.py +++ b/tests/test_modeling_bert.py @@ -40,7 +40,7 @@ if is_torch_available(): BertLMHeadModel, BertModel, ) - from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.bert.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST class BertModelTester: diff --git a/tests/test_modeling_deberta.py b/tests/test_modeling_deberta.py index 96b08cdb62c..c0f60ffeb7e 100644 --- a/tests/test_modeling_deberta.py +++ b/tests/test_modeling_deberta.py @@ -34,7 +34,7 @@ if is_torch_available(): DebertaForSequenceClassification, DebertaModel, ) - from transformers.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST @require_torch diff --git a/tests/test_modeling_dpr.py b/tests/test_modeling_dpr.py index f9a04b1a9f8..2526a0c362b 100644 --- a/tests/test_modeling_dpr.py +++ b/tests/test_modeling_dpr.py @@ -27,7 +27,7 @@ if is_torch_available(): import torch from transformers import BertConfig, DPRConfig, DPRContextEncoder, DPRQuestionEncoder, DPRReader - from transformers.modeling_dpr import ( + from transformers.models.dpr.modeling_dpr import ( DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, diff --git a/tests/test_modeling_electra.py b/tests/test_modeling_electra.py index ee39fd1b1d8..9f2925aa524 100644 --- a/tests/test_modeling_electra.py +++ b/tests/test_modeling_electra.py @@ -37,7 +37,7 @@ if is_torch_available(): ElectraForTokenClassification, ElectraModel, ) - from transformers.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.electra.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST class ElectraModelTester: diff --git a/tests/test_modeling_flaubert.py b/tests/test_modeling_flaubert.py index d07a8f5138f..c48f25a667a 100644 --- a/tests/test_modeling_flaubert.py +++ b/tests/test_modeling_flaubert.py @@ -36,7 +36,7 @@ if is_torch_available(): FlaubertModel, FlaubertWithLMHeadModel, ) - from transformers.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class FlaubertModelTester(object): diff --git a/tests/test_modeling_flax_bert.py b/tests/test_modeling_flax_bert.py index f06d2559164..c8c2da1ff16 100644 --- a/tests/test_modeling_flax_bert.py +++ b/tests/test_modeling_flax_bert.py @@ -2,18 +2,17 @@ import unittest from numpy import ndarray -from transformers import TensorType, is_flax_available, is_torch_available +from transformers import BertTokenizerFast, TensorType, is_flax_available, is_torch_available from transformers.testing_utils import require_flax, require_torch -from transformers.tokenization_bert_fast import BertTokenizerFast if is_flax_available(): - from transformers.modeling_flax_bert import FlaxBertModel + from transformers.models.bert.modeling_flax_bert import FlaxBertModel if is_torch_available(): import torch - from transformers.modeling_bert import BertModel + from transformers.models.bert.modeling_bert import BertModel @require_flax diff --git a/tests/test_modeling_flax_roberta.py b/tests/test_modeling_flax_roberta.py index b8b89776b8d..7bfdb54a12c 100644 --- a/tests/test_modeling_flax_roberta.py +++ b/tests/test_modeling_flax_roberta.py @@ -2,18 +2,17 @@ import unittest from numpy import ndarray -from transformers import TensorType, is_flax_available, is_torch_available +from transformers import RobertaTokenizerFast, TensorType, is_flax_available, is_torch_available from transformers.testing_utils import require_flax, require_torch -from transformers.tokenization_roberta_fast import RobertaTokenizerFast if is_flax_available(): - from transformers.modeling_flax_roberta import FlaxRobertaModel + from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel if is_torch_available(): import torch - from transformers.modeling_roberta import RobertaModel + from transformers.models.roberta.modeling_roberta import RobertaModel @require_flax diff --git a/tests/test_modeling_fsmt.py b/tests/test_modeling_fsmt.py index b5e3e8d1e34..d5583a864ff 100644 --- a/tests/test_modeling_fsmt.py +++ b/tests/test_modeling_fsmt.py @@ -32,7 +32,7 @@ if is_torch_available(): import torch from transformers import FSMTConfig, FSMTForConditionalGeneration, FSMTModel, FSMTTokenizer - from transformers.modeling_fsmt import ( + from transformers.models.fsmt.modeling_fsmt import ( SinusoidalPositionalEmbedding, _prepare_fsmt_decoder_inputs, invert_mask, diff --git a/tests/test_modeling_lxmert.py b/tests/test_modeling_lxmert.py index 3222c21687c..d4e540bcaa1 100644 --- a/tests/test_modeling_lxmert.py +++ b/tests/test_modeling_lxmert.py @@ -35,7 +35,7 @@ if is_torch_available(): LxmertForQuestionAnswering, LxmertModel, ) - from transformers.modeling_lxmert import LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.lxmert.modeling_lxmert import LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST class LxmertModelTester: diff --git a/tests/test_modeling_marian.py b/tests/test_modeling_marian.py index a20c4a20d86..cdbb92f5047 100644 --- a/tests/test_modeling_marian.py +++ b/tests/test_modeling_marian.py @@ -28,12 +28,12 @@ if is_torch_available(): import torch from transformers import AutoModelWithLMHead, MarianMTModel - from transformers.convert_marian_to_pytorch import ( + from transformers.models.bart.modeling_bart import shift_tokens_right + from transformers.models.marian.convert_marian_to_pytorch import ( ORG_NAME, convert_hf_name_to_opus_name, convert_opus_name_to_hf_name, ) - from transformers.modeling_bart import shift_tokens_right from transformers.pipelines import TranslationPipeline diff --git a/tests/test_modeling_pegasus.py b/tests/test_modeling_pegasus.py index 07f3326d13d..61435270119 100644 --- a/tests/test_modeling_pegasus.py +++ b/tests/test_modeling_pegasus.py @@ -1,8 +1,8 @@ import unittest from transformers import AutoConfig, AutoTokenizer, is_torch_available -from transformers.configuration_pegasus import task_specific_params from transformers.file_utils import cached_property +from transformers.models.pegasus.configuration_pegasus import task_specific_params from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils.logging import ERROR, set_verbosity diff --git a/tests/test_modeling_rag.py b/tests/test_modeling_rag.py index ec8fe1b508a..b2b4f14dbfb 100644 --- a/tests/test_modeling_rag.py +++ b/tests/test_modeling_rag.py @@ -25,6 +25,9 @@ import numpy as np from transformers import BartTokenizer, T5Tokenizer from transformers.file_utils import cached_property, is_datasets_available, is_faiss_available, is_torch_available +from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES +from transformers.models.dpr.tokenization_dpr import DPRQuestionEncoderTokenizer +from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, @@ -33,9 +36,6 @@ from transformers.testing_utils import ( slow, torch_device, ) -from transformers.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES -from transformers.tokenization_dpr import DPRQuestionEncoderTokenizer -from transformers.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from .test_modeling_bart import ModelTester as BartModelTester from .test_modeling_dpr import DPRModelTester @@ -205,7 +205,7 @@ class RagTestMixin: ) dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT) tokenizer = self.bart_tokenizer if config.generator.model_type == "bart" else self.t5_tokenizer - with patch("transformers.retrieval_rag.load_dataset") as mock_load_dataset: + with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = dataset retriever = RagRetriever( config, diff --git a/tests/test_modeling_roberta.py b/tests/test_modeling_roberta.py index 20b7dfcb6cf..dc32b330fb7 100644 --- a/tests/test_modeling_roberta.py +++ b/tests/test_modeling_roberta.py @@ -37,7 +37,7 @@ if is_torch_available(): RobertaForTokenClassification, RobertaModel, ) - from transformers.modeling_roberta import ( + from transformers.models.roberta.modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaEmbeddings, create_position_ids_from_input_ids, diff --git a/tests/test_modeling_t5.py b/tests/test_modeling_t5.py index 7bf81df9df3..31a73c10010 100644 --- a/tests/test_modeling_t5.py +++ b/tests/test_modeling_t5.py @@ -31,7 +31,7 @@ if is_torch_available(): import torch from transformers import T5Config, T5ForConditionalGeneration, T5Model, T5Tokenizer - from transformers.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.t5.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST class T5ModelTester: diff --git a/tests/test_modeling_tf_albert.py b/tests/test_modeling_tf_albert.py index 96cfdfb3c9f..ddcb1fa2eb0 100644 --- a/tests/test_modeling_tf_albert.py +++ b/tests/test_modeling_tf_albert.py @@ -26,7 +26,7 @@ from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf - from transformers.modeling_tf_albert import ( + from transformers.models.albert.modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, diff --git a/tests/test_modeling_tf_auto.py b/tests/test_modeling_tf_auto.py index 759e6c83e4a..dec1905c370 100644 --- a/tests/test_modeling_tf_auto.py +++ b/tests/test_modeling_tf_auto.py @@ -43,7 +43,7 @@ if is_tf_available(): TFRobertaForMaskedLM, TFT5ForConditionalGeneration, ) - from transformers.modeling_tf_auto import ( + from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, @@ -54,9 +54,9 @@ if is_tf_available(): TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING, ) - from transformers.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST - from transformers.modeling_tf_gpt2 import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST - from transformers.modeling_tf_t5 import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.gpt2.modeling_tf_gpt2 import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.t5.modeling_tf_t5 import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST @require_tf diff --git a/tests/test_modeling_tf_bart.py b/tests/test_modeling_tf_bart.py index 999692404d2..c8718aa2053 100644 --- a/tests/test_modeling_tf_bart.py +++ b/tests/test_modeling_tf_bart.py @@ -31,7 +31,7 @@ if is_tf_available(): import tensorflow as tf from transformers import TFBartForConditionalGeneration, TFBartModel - from transformers.modeling_tf_bart import TFSinusoidalPositionalEmbedding + from transformers.models.bart.modeling_tf_bart import TFSinusoidalPositionalEmbedding @require_tf diff --git a/tests/test_modeling_tf_bert.py b/tests/test_modeling_tf_bert.py index f6122b09ef4..1b3c50f717e 100644 --- a/tests/test_modeling_tf_bert.py +++ b/tests/test_modeling_tf_bert.py @@ -27,7 +27,7 @@ if is_tf_available(): import tensorflow as tf from transformers import TF_MODEL_FOR_PRETRAINING_MAPPING - from transformers.modeling_tf_bert import ( + from transformers.models.bert.modeling_tf_bert import ( TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, diff --git a/tests/test_modeling_tf_ctrl.py b/tests/test_modeling_tf_ctrl.py index 4cae35634a7..f2ef243861d 100644 --- a/tests/test_modeling_tf_ctrl.py +++ b/tests/test_modeling_tf_ctrl.py @@ -26,7 +26,11 @@ from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf - from transformers.modeling_tf_ctrl import TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLLMHeadModel, TFCTRLModel + from transformers.models.ctrl.modeling_tf_ctrl import ( + TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, + TFCTRLLMHeadModel, + TFCTRLModel, + ) class TFCTRLModelTester(object): diff --git a/tests/test_modeling_tf_distilbert.py b/tests/test_modeling_tf_distilbert.py index 73bcd7d00eb..bab94cb380c 100644 --- a/tests/test_modeling_tf_distilbert.py +++ b/tests/test_modeling_tf_distilbert.py @@ -26,7 +26,7 @@ from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf - from transformers.modeling_tf_distilbert import ( + from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, diff --git a/tests/test_modeling_tf_electra.py b/tests/test_modeling_tf_electra.py index b5d3c933bb3..a353c8b666c 100644 --- a/tests/test_modeling_tf_electra.py +++ b/tests/test_modeling_tf_electra.py @@ -26,7 +26,7 @@ from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf - from transformers.modeling_tf_electra import ( + from transformers.models.electra.modeling_tf_electra import ( TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, diff --git a/tests/test_modeling_tf_funnel.py b/tests/test_modeling_tf_funnel.py index 804b3075261..03f8bc05891 100644 --- a/tests/test_modeling_tf_funnel.py +++ b/tests/test_modeling_tf_funnel.py @@ -26,7 +26,7 @@ from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf - from transformers.modeling_tf_funnel import ( + from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, diff --git a/tests/test_modeling_tf_gpt2.py b/tests/test_modeling_tf_gpt2.py index b63a843dc98..4bc8b125f01 100644 --- a/tests/test_modeling_tf_gpt2.py +++ b/tests/test_modeling_tf_gpt2.py @@ -26,7 +26,7 @@ from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf - from transformers.modeling_tf_gpt2 import ( + from transformers.models.gpt2.modeling_tf_gpt2 import ( TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, TFGPT2DoubleHeadsModel, TFGPT2LMHeadModel, diff --git a/tests/test_modeling_tf_lxmert.py b/tests/test_modeling_tf_lxmert.py index 5037208229c..3bf9f16d3a0 100644 --- a/tests/test_modeling_tf_lxmert.py +++ b/tests/test_modeling_tf_lxmert.py @@ -25,7 +25,7 @@ from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf - from transformers.modeling_tf_lxmert import TFLxmertForPreTraining, TFLxmertModel + from transformers.models.lxmert.modeling_tf_lxmert import TFLxmertForPreTraining, TFLxmertModel class TFLxmertModelTester(object): diff --git a/tests/test_modeling_tf_mobilebert.py b/tests/test_modeling_tf_mobilebert.py index 1ea2b663c58..a39ffb316fe 100644 --- a/tests/test_modeling_tf_mobilebert.py +++ b/tests/test_modeling_tf_mobilebert.py @@ -26,7 +26,7 @@ from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf - from transformers.modeling_tf_mobilebert import ( + from transformers import ( TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, diff --git a/tests/test_modeling_tf_openai.py b/tests/test_modeling_tf_openai.py index 1c9dab78c94..f32f9ed385c 100644 --- a/tests/test_modeling_tf_openai.py +++ b/tests/test_modeling_tf_openai.py @@ -26,7 +26,7 @@ from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf - from transformers.modeling_tf_openai import ( + from transformers.models.openai.modeling_tf_openai import ( TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTLMHeadModel, diff --git a/tests/test_modeling_tf_pytorch.py b/tests/test_modeling_tf_pytorch.py index f76ea40fd5e..eb8f812e0c5 100644 --- a/tests/test_modeling_tf_pytorch.py +++ b/tests/test_modeling_tf_pytorch.py @@ -43,9 +43,9 @@ if is_tf_available(): TFRobertaForMaskedLM, TFT5ForConditionalGeneration, ) - from transformers.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST - from transformers.modeling_tf_gpt2 import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST - from transformers.modeling_tf_t5 import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.gpt2.modeling_tf_gpt2 import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.t5.modeling_tf_t5 import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( diff --git a/tests/test_modeling_tf_roberta.py b/tests/test_modeling_tf_roberta.py index b9614dd4d8a..77be7ee6bc1 100644 --- a/tests/test_modeling_tf_roberta.py +++ b/tests/test_modeling_tf_roberta.py @@ -27,7 +27,7 @@ if is_tf_available(): import numpy import tensorflow as tf - from transformers.modeling_tf_roberta import ( + from transformers.models.roberta.modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, diff --git a/tests/test_modeling_tf_xlnet.py b/tests/test_modeling_tf_xlnet.py index da5a66c8bc7..290b97065b0 100644 --- a/tests/test_modeling_tf_xlnet.py +++ b/tests/test_modeling_tf_xlnet.py @@ -27,7 +27,7 @@ from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf - from transformers.modeling_tf_xlnet import ( + from transformers.models.xlnet.modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, diff --git a/tests/test_modeling_transfo_xl.py b/tests/test_modeling_transfo_xl.py index 2c9c893623b..75c853fbd48 100644 --- a/tests/test_modeling_transfo_xl.py +++ b/tests/test_modeling_transfo_xl.py @@ -28,7 +28,7 @@ if is_torch_available(): import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, TransfoXLModel - from transformers.modeling_transfo_xl import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.transfo_xl.modeling_transfo_xl import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST class TransfoXLModelTester: diff --git a/tests/test_modeling_xlm.py b/tests/test_modeling_xlm.py index 9fd0de1bc38..34d9a152eb0 100644 --- a/tests/test_modeling_xlm.py +++ b/tests/test_modeling_xlm.py @@ -37,7 +37,7 @@ if is_torch_available(): XLMModel, XLMWithLMHeadModel, ) - from transformers.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class XLMModelTester: diff --git a/tests/test_modeling_xlnet.py b/tests/test_modeling_xlnet.py index 72497dbd55b..1f8f2337a14 100644 --- a/tests/test_modeling_xlnet.py +++ b/tests/test_modeling_xlnet.py @@ -38,7 +38,7 @@ if is_torch_available(): XLNetLMHeadModel, XLNetModel, ) - from transformers.modeling_xlnet import XLNET_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.models.xlnet.modeling_xlnet import XLNET_PRETRAINED_MODEL_ARCHIVE_LIST class XLNetModelTester: diff --git a/tests/test_retrieval_rag.py b/tests/test_retrieval_rag.py index a95324535b8..47ad714b54e 100644 --- a/tests/test_retrieval_rag.py +++ b/tests/test_retrieval_rag.py @@ -10,10 +10,14 @@ import numpy as np from datasets import Dataset from transformers import is_faiss_available -from transformers.configuration_bart import BartConfig -from transformers.configuration_dpr import DPRConfig -from transformers.configuration_rag import RagConfig -from transformers.retrieval_rag import CustomHFIndex, RagRetriever +from transformers.models.bart.configuration_bart import BartConfig +from transformers.models.bart.tokenization_bart import BartTokenizer +from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES +from transformers.models.dpr.configuration_dpr import DPRConfig +from transformers.models.dpr.tokenization_dpr import DPRQuestionEncoderTokenizer +from transformers.models.rag.configuration_rag import RagConfig +from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever +from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import ( require_datasets, require_faiss, @@ -21,10 +25,6 @@ from transformers.testing_utils import ( require_tokenizers, require_torch, ) -from transformers.tokenization_bart import BartTokenizer -from transformers.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES -from transformers.tokenization_dpr import DPRQuestionEncoderTokenizer -from transformers.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES if is_faiss_available(): @@ -126,7 +126,7 @@ class RagRetrieverTest(TestCase): question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), ) - with patch("transformers.retrieval_rag.load_dataset") as mock_load_dataset: + with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = dataset retriever = RagRetriever( config, @@ -213,7 +213,7 @@ class RagRetrieverTest(TestCase): def test_canonical_hf_index_retriever_save_and_from_pretrained(self): retriever = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: - with patch("transformers.retrieval_rag.load_dataset") as mock_load_dataset: + with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = self.get_dummy_dataset() retriever.save_pretrained(tmp_dirname) retriever = RagRetriever.from_pretrained(tmp_dirname) diff --git a/tests/test_tokenization_auto.py b/tests/test_tokenization_auto.py index e06d7800bb1..b090570e84e 100644 --- a/tests/test_tokenization_auto.py +++ b/tests/test_tokenization_auto.py @@ -27,8 +27,9 @@ from transformers import ( RobertaTokenizer, RobertaTokenizerFast, ) -from transformers.configuration_auto import AutoConfig -from transformers.configuration_roberta import RobertaConfig +from transformers.models.auto.configuration_auto import AutoConfig +from transformers.models.auto.tokenization_auto import TOKENIZER_MAPPING +from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKWOWN_IDENTIFIER, @@ -36,7 +37,6 @@ from transformers.testing_utils import ( require_tokenizers, slow, ) -from transformers.tokenization_auto import TOKENIZER_MAPPING class AutoTokenizerTest(unittest.TestCase): diff --git a/tests/test_tokenization_bart.py b/tests/test_tokenization_bart.py index 3c6c88ef7ae..94fbf63a0b8 100644 --- a/tests/test_tokenization_bart.py +++ b/tests/test_tokenization_bart.py @@ -4,8 +4,8 @@ import unittest from transformers import BartTokenizer, BartTokenizerFast, BatchEncoding from transformers.file_utils import cached_property +from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch -from transformers.tokenization_roberta import VOCAB_FILES_NAMES from .test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors diff --git a/tests/test_tokenization_bert.py b/tests/test_tokenization_bert.py index 43bda26df45..efb1aa826df 100644 --- a/tests/test_tokenization_bert.py +++ b/tests/test_tokenization_bert.py @@ -18,8 +18,7 @@ import os import unittest from transformers import BertTokenizerFast -from transformers.testing_utils import require_tokenizers, slow -from transformers.tokenization_bert import ( +from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, @@ -28,6 +27,7 @@ from transformers.tokenization_bert import ( _is_punctuation, _is_whitespace, ) +from transformers.testing_utils import require_tokenizers, slow from .test_tokenization_common import TokenizerTesterMixin, filter_non_english diff --git a/tests/test_tokenization_bert_japanese.py b/tests/test_tokenization_bert_japanese.py index 092237f1abd..55ae6f41c47 100644 --- a/tests/test_tokenization_bert_japanese.py +++ b/tests/test_tokenization_bert_japanese.py @@ -18,14 +18,14 @@ import os import pickle import unittest -from transformers.testing_utils import custom_tokenizers -from transformers.tokenization_bert_japanese import ( +from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer, WordpieceTokenizer, ) +from transformers.testing_utils import custom_tokenizers from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_bertweet.py b/tests/test_tokenization_bertweet.py index 7175f201925..66de1ff6af7 100644 --- a/tests/test_tokenization_bertweet.py +++ b/tests/test_tokenization_bertweet.py @@ -16,7 +16,7 @@ import os import unittest -from transformers.tokenization_bertweet import VOCAB_FILES_NAMES, BertweetTokenizer +from transformers.models.bertweet.tokenization_bertweet import VOCAB_FILES_NAMES, BertweetTokenizer from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_blenderbot.py b/tests/test_tokenization_blenderbot.py index fee7f19372f..fffe6f2d981 100644 --- a/tests/test_tokenization_blenderbot.py +++ b/tests/test_tokenization_blenderbot.py @@ -20,7 +20,11 @@ import os import unittest from transformers.file_utils import cached_property -from transformers.tokenization_blenderbot import VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, BlenderbotTokenizer +from transformers.models.blenderbot.tokenization_blenderbot import ( + VOCAB_FILES_NAMES, + BlenderbotSmallTokenizer, + BlenderbotTokenizer, +) from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_ctrl.py b/tests/test_tokenization_ctrl.py index 34b2ec9789a..435e1f3bb40 100644 --- a/tests/test_tokenization_ctrl.py +++ b/tests/test_tokenization_ctrl.py @@ -17,7 +17,7 @@ import json import os import unittest -from transformers.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer +from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_deberta.py b/tests/test_tokenization_deberta.py index 29437d6cc80..6426535a032 100644 --- a/tests/test_tokenization_deberta.py +++ b/tests/test_tokenization_deberta.py @@ -18,8 +18,8 @@ import re import unittest from typing import Tuple +from transformers.models.deberta.tokenization_deberta import DebertaTokenizer from transformers.testing_utils import require_torch -from transformers.tokenization_deberta import DebertaTokenizer from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_fsmt.py b/tests/test_tokenization_fsmt.py index 790df2247cd..2eb92d2f652 100644 --- a/tests/test_tokenization_fsmt.py +++ b/tests/test_tokenization_fsmt.py @@ -19,8 +19,8 @@ import os import unittest from transformers.file_utils import cached_property +from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES, FSMTTokenizer from transformers.testing_utils import slow -from transformers.tokenization_fsmt import VOCAB_FILES_NAMES, FSMTTokenizer from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_funnel.py b/tests/test_tokenization_funnel.py index b2c9d6fc2e5..0cb76a7ef07 100644 --- a/tests/test_tokenization_funnel.py +++ b/tests/test_tokenization_funnel.py @@ -18,8 +18,8 @@ import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast +from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers -from transformers.tokenization_funnel import VOCAB_FILES_NAMES from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_gpt2.py b/tests/test_tokenization_gpt2.py index cb479f2e34f..5178f4f6a8a 100644 --- a/tests/test_tokenization_gpt2.py +++ b/tests/test_tokenization_gpt2.py @@ -19,8 +19,8 @@ import os import unittest from transformers import GPT2Tokenizer, GPT2TokenizerFast +from transformers.models.gpt2.tokenization_gpt2 import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers -from transformers.tokenization_gpt2 import VOCAB_FILES_NAMES from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_herbert.py b/tests/test_tokenization_herbert.py index 7565241af7a..e8569406bf9 100644 --- a/tests/test_tokenization_herbert.py +++ b/tests/test_tokenization_herbert.py @@ -19,8 +19,8 @@ import os import unittest from transformers import HerbertTokenizer, HerbertTokenizerFast +from transformers.models.herbert.tokenization_herbert import VOCAB_FILES_NAMES from transformers.testing_utils import get_tests_dir, require_tokenizers, slow -from transformers.tokenization_herbert import VOCAB_FILES_NAMES from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_layoutlm.py b/tests/test_tokenization_layoutlm.py index 654d857ceb9..7e119bd27d2 100644 --- a/tests/test_tokenization_layoutlm.py +++ b/tests/test_tokenization_layoutlm.py @@ -18,8 +18,8 @@ import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast +from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers -from transformers.tokenization_layoutlm import VOCAB_FILES_NAMES from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_lxmert.py b/tests/test_tokenization_lxmert.py index a4677bcb5fe..716386016ef 100644 --- a/tests/test_tokenization_lxmert.py +++ b/tests/test_tokenization_lxmert.py @@ -18,8 +18,8 @@ import os import unittest from transformers import LxmertTokenizer, LxmertTokenizerFast +from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers -from transformers.tokenization_bert import VOCAB_FILES_NAMES from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_marian.py b/tests/test_tokenization_marian.py index 5759c91b9e1..7c50184f5c5 100644 --- a/tests/test_tokenization_marian.py +++ b/tests/test_tokenization_marian.py @@ -25,7 +25,7 @@ from transformers.testing_utils import _sentencepiece_available, _torch_availabl if _sentencepiece_available: - from transformers.tokenization_marian import save_json, vocab_files_names + from transformers.models.marian.tokenization_marian import save_json, vocab_files_names from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_mbart.py b/tests/test_tokenization_mbart.py index 171d08880f5..dd8d6e3f4fb 100644 --- a/tests/test_tokenization_mbart.py +++ b/tests/test_tokenization_mbart.py @@ -24,7 +24,7 @@ if _sentencepiece_available: if is_torch_available(): - from transformers.modeling_bart import shift_tokens_right + from transformers.models.bart.modeling_bart import shift_tokens_right EN_CODE = 250004 RO_CODE = 250020 diff --git a/tests/test_tokenization_openai.py b/tests/test_tokenization_openai.py index 97f674a428a..ad6fbb0715f 100644 --- a/tests/test_tokenization_openai.py +++ b/tests/test_tokenization_openai.py @@ -19,8 +19,8 @@ import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast +from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers -from transformers.tokenization_openai import VOCAB_FILES_NAMES from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_phobert.py b/tests/test_tokenization_phobert.py index 95625f7a2d8..3466a34b59b 100644 --- a/tests/test_tokenization_phobert.py +++ b/tests/test_tokenization_phobert.py @@ -16,7 +16,7 @@ import os import unittest -from transformers.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer +from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_prophetnet.py b/tests/test_tokenization_prophetnet.py index 34f32c66ca6..918612329ff 100644 --- a/tests/test_tokenization_prophetnet.py +++ b/tests/test_tokenization_prophetnet.py @@ -18,15 +18,15 @@ import os import unittest from transformers import BatchEncoding -from transformers.testing_utils import require_torch, slow -from transformers.tokenization_bert import ( +from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) -from transformers.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer +from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer +from transformers.testing_utils import require_torch, slow from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_rag.py b/tests/test_tokenization_rag.py index 63bdb541e61..fa995f41693 100644 --- a/tests/test_tokenization_rag.py +++ b/tests/test_tokenization_rag.py @@ -5,17 +5,17 @@ import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast -from transformers.configuration_bart import BartConfig -from transformers.configuration_dpr import DPRConfig from transformers.file_utils import is_datasets_available, is_faiss_available, is_torch_available +from transformers.models.bart.configuration_bart import BartConfig +from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES +from transformers.models.dpr.configuration_dpr import DPRConfig +from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_datasets, require_faiss, require_tokenizers, require_torch, slow -from transformers.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES -from transformers.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES if is_torch_available() and is_datasets_available() and is_faiss_available(): - from transformers.configuration_rag import RagConfig - from transformers.tokenization_rag import RagTokenizer + from transformers.models.rag.configuration_rag import RagConfig + from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss diff --git a/tests/test_tokenization_roberta.py b/tests/test_tokenization_roberta.py index 30d5c41782d..eadc2b42d54 100644 --- a/tests/test_tokenization_roberta.py +++ b/tests/test_tokenization_roberta.py @@ -19,8 +19,8 @@ import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast +from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow -from transformers.tokenization_roberta import VOCAB_FILES_NAMES from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_transfo_xl.py b/tests/test_tokenization_transfo_xl.py index 7e513277421..557cc67c64c 100644 --- a/tests/test_tokenization_transfo_xl.py +++ b/tests/test_tokenization_transfo_xl.py @@ -17,7 +17,7 @@ import os import unittest -from transformers.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer +from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_utils.py b/tests/test_tokenization_utils.py index 3bc09d2f0fa..05c6d19c32f 100644 --- a/tests/test_tokenization_utils.py +++ b/tests/test_tokenization_utils.py @@ -19,8 +19,8 @@ from typing import Callable, Optional import numpy as np from transformers import BatchEncoding, BertTokenizer, BertTokenizerFast, PreTrainedTokenizer, TensorType, TokenSpan +from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow -from transformers.tokenization_gpt2 import GPT2Tokenizer class TokenizerUtilsTest(unittest.TestCase): diff --git a/tests/test_tokenization_xlm.py b/tests/test_tokenization_xlm.py index 4bd40635f34..b164ded0533 100644 --- a/tests/test_tokenization_xlm.py +++ b/tests/test_tokenization_xlm.py @@ -18,8 +18,8 @@ import json import os import unittest +from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow -from transformers.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_tokenization_xlm_prophetnet.py b/tests/test_tokenization_xlm_prophetnet.py index 7dfdee6b5f8..dd426547ac8 100644 --- a/tests/test_tokenization_xlm_prophetnet.py +++ b/tests/test_tokenization_xlm_prophetnet.py @@ -18,8 +18,8 @@ import os import unittest from transformers.file_utils import cached_property +from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import require_sentencepiece, slow -from transformers.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from .test_tokenization_common import TokenizerTesterMixin diff --git a/tests/test_utils_check_copies.py b/tests/test_utils_check_copies.py index 24d05f7c4fe..715807fe3fd 100644 --- a/tests/test_utils_check_copies.py +++ b/tests/test_utils_check_copies.py @@ -37,10 +37,11 @@ REFERENCE_CODE = """ def __init__(self, config): class CopyCheckTester(unittest.TestCase): def setUp(self): self.transformer_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(self.transformer_dir, "models/bert/")) check_copies.TRANSFORMER_PATH = self.transformer_dir shutil.copy( - os.path.join(git_repo_path, "src/transformers/modeling_bert.py"), - os.path.join(self.transformer_dir, "modeling_bert.py"), + os.path.join(git_repo_path, "src/transformers/models/bert/modeling_bert.py"), + os.path.join(self.transformer_dir, "models/bert/modeling_bert.py"), ) def tearDown(self): @@ -62,27 +63,27 @@ class CopyCheckTester(unittest.TestCase): self.assertTrue(f.read(), expected) def test_find_code_in_transformers(self): - code = check_copies.find_code_in_transformers("modeling_bert.BertLMPredictionHead") + code = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead") self.assertEqual(code, REFERENCE_CODE) def test_is_copy_consistent(self): # Base copy consistency self.check_copy_consistency( - "# Copied from transformers.modeling_bert.BertLMPredictionHead", + "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead", "BertLMPredictionHead", REFERENCE_CODE + "\n", ) # With no empty line at the end self.check_copy_consistency( - "# Copied from transformers.modeling_bert.BertLMPredictionHead", + "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead", "BertLMPredictionHead", REFERENCE_CODE, ) # Copy consistency with rename self.check_copy_consistency( - "# Copied from transformers.modeling_bert.BertLMPredictionHead with Bert->TestModel", + "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel", "TestModelLMPredictionHead", re.sub("Bert", "TestModel", REFERENCE_CODE), ) @@ -90,14 +91,14 @@ class CopyCheckTester(unittest.TestCase): # Copy consistency with a really long name long_class_name = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReasonIReallyDontUnderstand" self.check_copy_consistency( - f"# Copied from transformers.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}", + f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}", f"{long_class_name}LMPredictionHead", re.sub("Bert", long_class_name, REFERENCE_CODE), ) # Copy consistency with overwrite self.check_copy_consistency( - "# Copied from transformers.modeling_bert.BertLMPredictionHead with Bert->TestModel", + "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel", "TestModelLMPredictionHead", REFERENCE_CODE, overwrite_result=re.sub("Bert", "TestModel", REFERENCE_CODE), diff --git a/utils/check_repo.py b/utils/check_repo.py index f79a9c2642b..fd4316cd41d 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -308,12 +308,12 @@ def check_all_models_are_documented(): def get_all_auto_configured_models(): """ Return the list of all models in at least one auto class.""" result = set() # To avoid duplicates we concatenate all model classes in a set. - for attr_name in dir(transformers.modeling_auto): + for attr_name in dir(transformers.models.auto.modeling_auto): if attr_name.startswith("MODEL_") and attr_name.endswith("MAPPING"): - result = result | set(getattr(transformers.modeling_auto, attr_name).values()) - for attr_name in dir(transformers.modeling_tf_auto): + result = result | set(getattr(transformers.models.auto.modeling_auto, attr_name).values()) + for attr_name in dir(transformers.models.auto.modeling_tf_auto): if attr_name.startswith("TF_MODEL_") and attr_name.endswith("MAPPING"): - result = result | set(getattr(transformers.modeling_tf_auto, attr_name).values()) + result = result | set(getattr(transformers.models.auto.modeling_tf_auto, attr_name).values()) return [cls.__name__ for cls in result]