From 7293fdc5b9cc809c2aa2ceb84f903ad47e5c06f0 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 24 Nov 2023 11:48:02 +0100 Subject: [PATCH] Deprecate `TransfoXL` (#27607) * fix * fix * trigger * Apply suggestions from code review Co-authored-by: Lysandre Debut * tic * revert * revert --------- Co-authored-by: ydshieh Co-authored-by: Lysandre Debut --- docs/source/en/model_doc/transfo-xl.md | 31 +- .../source/es/converting_tensorflow_models.md | 14 - .../source/it/converting_tensorflow_models.md | 15 - .../source/pt/converting_tensorflow_models.md | 14 - src/transformers/__init__.py | 104 ++-- src/transformers/commands/convert.py | 21 +- src/transformers/models/__init__.py | 1 - .../models/auto/configuration_auto.py | 3 + .../{ => deprecated}/transfo_xl/__init__.py | 2 +- .../transfo_xl/configuration_transfo_xl.py | 10 +- ...fo_xl_original_tf_checkpoint_to_pytorch.py | 4 +- .../transfo_xl/modeling_tf_transfo_xl.py | 6 +- .../modeling_tf_transfo_xl_utilities.py | 2 +- .../transfo_xl/modeling_transfo_xl.py | 4 +- .../modeling_transfo_xl_utilities.py | 0 .../transfo_xl/tokenization_transfo_xl.py | 4 +- src/transformers/utils/dummy_pt_objects.py | 84 +-- src/transformers/utils/dummy_tf_objects.py | 90 +-- tests/generation/test_utils.py | 6 +- tests/models/transfo_xl/__init__.py | 0 .../transfo_xl/test_modeling_tf_transfo_xl.py | 282 --------- .../transfo_xl/test_modeling_transfo_xl.py | 533 ------------------ .../test_tokenization_transfo_xl.py | 156 ----- tests/test_modeling_common.py | 1 - tests/test_modeling_tf_common.py | 1 - utils/check_config_attributes.py | 1 - utils/check_docstrings.py | 1 - utils/check_repo.py | 2 - utils/not_doctested.txt | 12 +- 29 files changed, 194 insertions(+), 1210 deletions(-) rename src/transformers/models/{ => deprecated}/transfo_xl/__init__.py (96%) rename src/transformers/models/{ => deprecated}/transfo_xl/configuration_transfo_xl.py (97%) rename src/transformers/models/{ => deprecated}/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py (95%) mode change 100755 => 100644 rename src/transformers/models/{ => deprecated}/transfo_xl/modeling_tf_transfo_xl.py (99%) rename src/transformers/models/{ => deprecated}/transfo_xl/modeling_tf_transfo_xl_utilities.py (99%) rename src/transformers/models/{ => deprecated}/transfo_xl/modeling_transfo_xl.py (99%) rename src/transformers/models/{ => deprecated}/transfo_xl/modeling_transfo_xl_utilities.py (100%) rename src/transformers/models/{ => deprecated}/transfo_xl/tokenization_transfo_xl.py (99%) delete mode 100644 tests/models/transfo_xl/__init__.py delete mode 100644 tests/models/transfo_xl/test_modeling_tf_transfo_xl.py delete mode 100644 tests/models/transfo_xl/test_modeling_transfo_xl.py delete mode 100644 tests/models/transfo_xl/test_tokenization_transfo_xl.py diff --git a/docs/source/en/model_doc/transfo-xl.md b/docs/source/en/model_doc/transfo-xl.md index d75e3a37b99..05afc76f111 100644 --- a/docs/source/en/model_doc/transfo-xl.md +++ b/docs/source/en/model_doc/transfo-xl.md @@ -16,6 +16,29 @@ rendered properly in your Markdown viewer. # Transformer XL + + +This model is in maintenance mode only, so we won't accept any new PRs changing its code. This model was deprecated due to security issues linked to `pickle.load`. + +We recommend switching to more recent models for improved security. + +In case you would still like to use `TransfoXL` in your experiments, we recommend using the [Hub checkpoint](https://huggingface.co/transfo-xl-wt103) with a specific revision to ensure you are downloading safe files from the Hub: + +``` +from transformers import TransfoXLTokenizer, TransfoXLLMHeadModel + +checkpoint = 'transfo-xl-wt103' +revision = '40a186da79458c9f9de846edfaea79c412137f97' + +tokenizer = TransfoXLTokenizer.from_pretrained(checkpoint, revision=revision) +model = TransfoXLLMHeadModel.from_pretrained(checkpoint, revision=revision) +``` + +If you run into any issues running this model, please reinstall the last version that supported this model: v4.35.0. +You can do so by running the following command: `pip install -U transformers==4.35.0`. + + +
Models @@ -79,13 +102,13 @@ TransformerXL does **not** work with *torch.nn.DataParallel* due to a bug in PyT ## TransfoXL specific outputs -[[autodoc]] models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput +[[autodoc]] models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput -[[autodoc]] models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput +[[autodoc]] models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput -[[autodoc]] models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput +[[autodoc]] models.deprecated.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput -[[autodoc]] models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput +[[autodoc]] models.deprecated.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput diff --git a/docs/source/es/converting_tensorflow_models.md b/docs/source/es/converting_tensorflow_models.md index c7e22bddac7..8e5b1ad1e28 100644 --- a/docs/source/es/converting_tensorflow_models.md +++ b/docs/source/es/converting_tensorflow_models.md @@ -96,20 +96,6 @@ transformers-cli convert --model_type gpt2 \ [--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK] ``` -## Transformer-XL - -Aquí hay un ejemplo del proceso para convertir un modelo Transformer-XL pre-entrenado (más información [aquí](https://github.com/kimiyoung/transformer-xl/tree/master/tf#obtain-and-evaluate-pretrained-sota-models)): - -```bash -export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint - -transformers-cli convert --model_type transfo_xl \ - --tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \ - --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ - [--config TRANSFO_XL_CONFIG] \ - [--finetuning_task_name TRANSFO_XL_FINETUNED_TASK] -``` - ## XLNet Aquí hay un ejemplo del proceso para convertir un modelo XLNet pre-entrenado: diff --git a/docs/source/it/converting_tensorflow_models.md b/docs/source/it/converting_tensorflow_models.md index 04398636359..f6326daa735 100644 --- a/docs/source/it/converting_tensorflow_models.md +++ b/docs/source/it/converting_tensorflow_models.md @@ -104,21 +104,6 @@ transformers-cli convert --model_type gpt2 \ [--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK] ``` -## Transformer-XL - - -Ecco un esempio del processo di conversione di un modello Transformer-XL pre-allenato -(vedi [qui](https://github.com/kimiyoung/transformer-xl/tree/master/tf#obtain-and-evaluate-pretrained-sota-models)): - -```bash -export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint -transformers-cli convert --model_type transfo_xl \ - --tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \ - --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ - [--config TRANSFO_XL_CONFIG] \ - [--finetuning_task_name TRANSFO_XL_FINETUNED_TASK] -``` - ## XLNet Ecco un esempio del processo di conversione di un modello XLNet pre-allenato: diff --git a/docs/source/pt/converting_tensorflow_models.md b/docs/source/pt/converting_tensorflow_models.md index ac1271d2764..97767b2ad42 100644 --- a/docs/source/pt/converting_tensorflow_models.md +++ b/docs/source/pt/converting_tensorflow_models.md @@ -109,20 +109,6 @@ transformers-cli convert --model_type gpt2 \ [--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK] ``` -## Transformer-XL - -Aqui está um exemplo do processo de conversão para um modelo Transformer-XL pré-treinado (consulte [aqui](https://github.com/kimiyoung/transformer-xl/tree/master/tf#obtain-and-evaluate-pretrained-modelos-sota)) - -```bash -export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint - -transformers-cli convert --model_type transfo_xl \ - --tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \ - --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ - [--config TRANSFO_XL_CONFIG] \ - [--finetuning_task_name TRANSFO_XL_FINETUNED_TASK] -``` - ## XLNet Aqui está um exemplo do processo de conversão para um modelo XLNet pré-treinado: diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index e09752f5f39..3c3b70fd724 100644 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -307,6 +307,12 @@ _import_structure = { "TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrajectoryTransformerConfig", ], + "models.deprecated.transfo_xl": [ + "TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", + "TransfoXLConfig", + "TransfoXLCorpus", + "TransfoXLTokenizer", + ], "models.deprecated.van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"], "models.deta": ["DETA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetaConfig"], "models.detr": ["DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetrConfig"], @@ -580,12 +586,6 @@ _import_structure = { ], "models.timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"], "models.timm_backbone": ["TimmBackboneConfig"], - "models.transfo_xl": [ - "TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", - "TransfoXLConfig", - "TransfoXLCorpus", - "TransfoXLTokenizer", - ], "models.trocr": [ "TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig", @@ -1661,6 +1661,17 @@ else: "TrajectoryTransformerPreTrainedModel", ] ) + _import_structure["models.deprecated.transfo_xl"].extend( + [ + "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", + "AdaptiveEmbedding", + "TransfoXLForSequenceClassification", + "TransfoXLLMHeadModel", + "TransfoXLModel", + "TransfoXLPreTrainedModel", + "load_tf_weights_in_transfo_xl", + ] + ) _import_structure["models.deprecated.van"].extend( [ "VAN_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -2919,17 +2930,6 @@ else: ] ) _import_structure["models.timm_backbone"].extend(["TimmBackbone"]) - _import_structure["models.transfo_xl"].extend( - [ - "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", - "AdaptiveEmbedding", - "TransfoXLForSequenceClassification", - "TransfoXLLMHeadModel", - "TransfoXLModel", - "TransfoXLPreTrainedModel", - "load_tf_weights_in_transfo_xl", - ] - ) _import_structure["models.trocr"].extend( ["TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel"] ) @@ -3525,6 +3525,17 @@ else: "TFDeiTPreTrainedModel", ] ) + _import_structure["models.deprecated.transfo_xl"].extend( + [ + "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFAdaptiveEmbedding", + "TFTransfoXLForSequenceClassification", + "TFTransfoXLLMHeadModel", + "TFTransfoXLMainLayer", + "TFTransfoXLModel", + "TFTransfoXLPreTrainedModel", + ] + ) _import_structure["models.distilbert"].extend( [ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -3889,17 +3900,6 @@ else: "TFTapasPreTrainedModel", ] ) - _import_structure["models.transfo_xl"].extend( - [ - "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", - "TFAdaptiveEmbedding", - "TFTransfoXLForSequenceClassification", - "TFTransfoXLLMHeadModel", - "TFTransfoXLMainLayer", - "TFTransfoXLModel", - "TFTransfoXLPreTrainedModel", - ] - ) _import_structure["models.vision_encoder_decoder"].extend(["TFVisionEncoderDecoderModel"]) _import_structure["models.vision_text_dual_encoder"].extend(["TFVisionTextDualEncoderModel"]) _import_structure["models.vit"].extend( @@ -4552,6 +4552,12 @@ if TYPE_CHECKING: TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) + from .models.deprecated.transfo_xl import ( + TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, + TransfoXLConfig, + TransfoXLCorpus, + TransfoXLTokenizer, + ) from .models.deprecated.van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig from .models.deta import DETA_PRETRAINED_CONFIG_ARCHIVE_MAP, DetaConfig from .models.detr import DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DetrConfig @@ -4812,12 +4818,6 @@ if TYPE_CHECKING: ) from .models.timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig from .models.timm_backbone import TimmBackboneConfig - from .models.transfo_xl import ( - TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, - TransfoXLConfig, - TransfoXLCorpus, - TransfoXLTokenizer, - ) from .models.trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig, TrOCRProcessor from .models.tvlt import TVLT_PRETRAINED_CONFIG_ARCHIVE_MAP, TvltConfig, TvltFeatureExtractor, TvltProcessor from .models.tvp import ( @@ -5746,6 +5746,15 @@ if TYPE_CHECKING: TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, ) + from .models.deprecated.transfo_xl import ( + TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, + AdaptiveEmbedding, + TransfoXLForSequenceClassification, + TransfoXLLMHeadModel, + TransfoXLModel, + TransfoXLPreTrainedModel, + load_tf_weights_in_transfo_xl, + ) from .models.deprecated.van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, @@ -6774,15 +6783,6 @@ if TYPE_CHECKING: TimesformerPreTrainedModel, ) from .models.timm_backbone import TimmBackbone - from .models.transfo_xl import ( - TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, - AdaptiveEmbedding, - TransfoXLForSequenceClassification, - TransfoXLLMHeadModel, - TransfoXLModel, - TransfoXLPreTrainedModel, - load_tf_weights_in_transfo_xl, - ) from .models.trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel from .models.tvlt import ( TVLT_PRETRAINED_MODEL_ARCHIVE_LIST, @@ -7269,6 +7269,15 @@ if TYPE_CHECKING: TFDeiTModel, TFDeiTPreTrainedModel, ) + from .models.deprecated.transfo_xl import ( + TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, + TFAdaptiveEmbedding, + TFTransfoXLForSequenceClassification, + TFTransfoXLLMHeadModel, + TFTransfoXLMainLayer, + TFTransfoXLModel, + TFTransfoXLPreTrainedModel, + ) from .models.distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, @@ -7554,15 +7563,6 @@ if TYPE_CHECKING: TFTapasModel, TFTapasPreTrainedModel, ) - from .models.transfo_xl import ( - TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, - TFAdaptiveEmbedding, - TFTransfoXLForSequenceClassification, - TFTransfoXLLMHeadModel, - TFTransfoXLMainLayer, - TFTransfoXLModel, - TFTransfoXLPreTrainedModel, - ) from .models.vision_encoder_decoder import TFVisionEncoderDecoderModel from .models.vision_text_dual_encoder import TFVisionTextDualEncoderModel from .models.vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel diff --git a/src/transformers/commands/convert.py b/src/transformers/commands/convert.py index b46e14f5a67..77df8ea1106 100644 --- a/src/transformers/commands/convert.py +++ b/src/transformers/commands/convert.py @@ -123,23 +123,6 @@ class ConvertCommand(BaseTransformersCLICommand): ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) - elif self._model_type == "transfo_xl": - try: - from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( - convert_transfo_xl_checkpoint_to_pytorch, - ) - except ImportError: - raise ImportError(IMPORT_ERROR_MESSAGE) - - if "ckpt" in self._tf_checkpoint.lower(): - TF_CHECKPOINT = self._tf_checkpoint - TF_DATASET_FILE = "" - else: - TF_DATASET_FILE = self._tf_checkpoint - TF_CHECKPOINT = "" - convert_transfo_xl_checkpoint_to_pytorch( - TF_CHECKPOINT, self._config, self._pytorch_dump_output, TF_DATASET_FILE - ) elif self._model_type == "gpt2": try: from ..models.gpt2.convert_gpt2_original_tf_checkpoint_to_pytorch import ( @@ -179,6 +162,4 @@ class ConvertCommand(BaseTransformersCLICommand): convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output) else: - raise ValueError( - "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" - ) + raise ValueError("--model_type should be selected in the list [bert, gpt, gpt2, t5, xlnet, xlm, lxmert]") diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 997ee82b432..317402650e5 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -204,7 +204,6 @@ from . import ( time_series_transformer, timesformer, timm_backbone, - transfo_xl, trocr, tvlt, tvp, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index 78a33270e7a..663d9515999 100755 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -706,6 +706,8 @@ MODEL_NAMES_MAPPING = OrderedDict( ] ) +# This is tied to the processing `-` -> `_` in `model_type_to_module_name`. For example, instead of putting +# `transfo-xl` (as in `CONFIG_MAPPING_NAMES`), we should use `transfo_xl`. DEPRECATED_MODELS = [ "bort", "mctct", @@ -714,6 +716,7 @@ DEPRECATED_MODELS = [ "retribert", "tapex", "trajectory_transformer", + "transfo_xl", "van", ] diff --git a/src/transformers/models/transfo_xl/__init__.py b/src/transformers/models/deprecated/transfo_xl/__init__.py similarity index 96% rename from src/transformers/models/transfo_xl/__init__.py rename to src/transformers/models/deprecated/transfo_xl/__init__.py index ce4215b0217..f3674e19665 100644 --- a/src/transformers/models/transfo_xl/__init__.py +++ b/src/transformers/models/deprecated/transfo_xl/__init__.py @@ -14,7 +14,7 @@ from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available +from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _import_structure = { diff --git a/src/transformers/models/transfo_xl/configuration_transfo_xl.py b/src/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py similarity index 97% rename from src/transformers/models/transfo_xl/configuration_transfo_xl.py rename to src/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py index 8550e718028..842c1643a00 100644 --- a/src/transformers/models/transfo_xl/configuration_transfo_xl.py +++ b/src/transformers/models/deprecated/transfo_xl/configuration_transfo_xl.py @@ -15,8 +15,8 @@ # limitations under the License. """ Transformer XL configuration""" -from ...configuration_utils import PretrainedConfig -from ...utils import logging +from ....configuration_utils import PretrainedConfig +from ....utils import logging logger = logging.get_logger(__name__) @@ -74,7 +74,7 @@ class TransfoXLConfig(PretrainedConfig): Whether or not to use adaptive softmax. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. - dropatt (`float`, *optional*, defaults to 0): + dropatt (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. untie_r (`boolean`, *optional*, defaults to `True`): Whether ot not to untie relative position biases. @@ -86,8 +86,10 @@ class TransfoXLConfig(PretrainedConfig): Parameters initialized by N(0, init_std) init_std (`float`, *optional*, defaults to 0.02): Parameters initialized by N(0, init_std) - layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): + layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers + eos_token_id (`int`, *optional*, defaults to 0): + End of stream token id. Examples: diff --git a/src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py b/src/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py old mode 100755 new mode 100644 similarity index 95% rename from src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py rename to src/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py index 646c8a2342f..d2693ac333b --- a/src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py +++ b/src/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py @@ -23,8 +23,8 @@ import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl -from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils -from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES +from transformers.models.deprecated.transfo_xl import tokenization_transfo_xl as data_utils +from transformers.models.deprecated.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging diff --git a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py b/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py similarity index 99% rename from src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py rename to src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py index 88005b7e060..45a4ea56fd7 100644 --- a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py +++ b/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py @@ -25,7 +25,7 @@ from typing import List, Optional, Tuple, Union import numpy as np import tensorflow as tf -from ...modeling_tf_utils import ( +from ....modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, @@ -33,8 +33,8 @@ from ...modeling_tf_utils import ( keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax -from ...utils import ( +from ....tf_utils import shape_list, stable_softmax +from ....utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, diff --git a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py b/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py similarity index 99% rename from src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py rename to src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py index dcfa84d0f94..c6a380842e4 100644 --- a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py +++ b/src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py @@ -20,7 +20,7 @@ import tensorflow as tf -from ...tf_utils import shape_list +from ....tf_utils import shape_list class TFAdaptiveSoftmaxMask(tf.keras.layers.Layer): diff --git a/src/transformers/models/transfo_xl/modeling_transfo_xl.py b/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py similarity index 99% rename from src/transformers/models/transfo_xl/modeling_transfo_xl.py rename to src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py index 1e944c335ae..2d343bd7157 100644 --- a/src/transformers/models/transfo_xl/modeling_transfo_xl.py +++ b/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py @@ -25,8 +25,8 @@ import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss -from ...modeling_utils import PreTrainedModel -from ...utils import ( +from ....modeling_utils import PreTrainedModel +from ....utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, diff --git a/src/transformers/models/transfo_xl/modeling_transfo_xl_utilities.py b/src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py similarity index 100% rename from src/transformers/models/transfo_xl/modeling_transfo_xl_utilities.py rename to src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py diff --git a/src/transformers/models/transfo_xl/tokenization_transfo_xl.py b/src/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py similarity index 99% rename from src/transformers/models/transfo_xl/tokenization_transfo_xl.py rename to src/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py index eaa5ecee4ba..7f1ab6cd13c 100644 --- a/src/transformers/models/transfo_xl/tokenization_transfo_xl.py +++ b/src/transformers/models/deprecated/transfo_xl/tokenization_transfo_xl.py @@ -27,8 +27,8 @@ from typing import List, Optional, Tuple import numpy as np -from ...tokenization_utils import PreTrainedTokenizer -from ...utils import ( +from ....tokenization_utils import PreTrainedTokenizer +from ....utils import ( cached_file, is_sacremoses_available, is_torch_available, diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 278a97592c7..80f61489382 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -2676,6 +2676,48 @@ class TrajectoryTransformerPreTrainedModel(metaclass=DummyObject): requires_backends(self, ["torch"]) +TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class AdaptiveEmbedding(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLLMHeadModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TransfoXLPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +def load_tf_weights_in_transfo_xl(*args, **kwargs): + requires_backends(load_tf_weights_in_transfo_xl, ["torch"]) + + VAN_PRETRAINED_MODEL_ARCHIVE_LIST = None @@ -7739,48 +7781,6 @@ class TimmBackbone(metaclass=DummyObject): requires_backends(self, ["torch"]) -TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None - - -class AdaptiveEmbedding(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class TransfoXLForSequenceClassification(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class TransfoXLLMHeadModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class TransfoXLModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -class TransfoXLPreTrainedModel(metaclass=DummyObject): - _backends = ["torch"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["torch"]) - - -def load_tf_weights_in_transfo_xl(*args, **kwargs): - requires_backends(load_tf_weights_in_transfo_xl, ["torch"]) - - TROCR_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index 5bc238f5427..2099c18bcd7 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -1075,6 +1075,51 @@ class TFDeiTPreTrainedModel(metaclass=DummyObject): requires_backends(self, ["tf"]) +TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFAdaptiveEmbedding(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLLMHeadModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLMainLayer(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFTransfoXLPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None @@ -2613,51 +2658,6 @@ class TFTapasPreTrainedModel(metaclass=DummyObject): requires_backends(self, ["tf"]) -TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None - - -class TFAdaptiveEmbedding(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - -class TFTransfoXLForSequenceClassification(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - -class TFTransfoXLLMHeadModel(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - -class TFTransfoXLMainLayer(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - -class TFTransfoXLModel(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - -class TFTransfoXLPreTrainedModel(metaclass=DummyObject): - _backends = ["tf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["tf"]) - - class TFVisionEncoderDecoderModel(metaclass=DummyObject): _backends = ["tf"] diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 729c7f87340..f4050c582b8 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -104,11 +104,7 @@ class GenerationTesterMixin: if isinstance(config.eos_token_id, int): config.eos_token_id = [config.eos_token_id] config.pad_token_id = config.eos_token_id[0] - # TransfoXL has no attention mask - if "transfoxl" in config.__class__.__name__.lower(): - attention_mask = None - else: - attention_mask = torch.ones_like(input_ids, dtype=torch.long)[:batch_size, :sequence_length] + attention_mask = torch.ones_like(input_ids, dtype=torch.long)[:batch_size, :sequence_length] return config, input_ids, attention_mask, max_length diff --git a/tests/models/transfo_xl/__init__.py b/tests/models/transfo_xl/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/models/transfo_xl/test_modeling_tf_transfo_xl.py b/tests/models/transfo_xl/test_modeling_tf_transfo_xl.py deleted file mode 100644 index fdbff90b24b..00000000000 --- a/tests/models/transfo_xl/test_modeling_tf_transfo_xl.py +++ /dev/null @@ -1,282 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import annotations - -import random -import unittest - -from transformers import TransfoXLConfig, is_tf_available -from transformers.testing_utils import require_tf, slow - -from ...test_configuration_common import ConfigTester -from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor -from ...test_pipeline_mixin import PipelineTesterMixin - - -if is_tf_available(): - import tensorflow as tf - - from transformers import ( - TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, - TFTransfoXLForSequenceClassification, - TFTransfoXLLMHeadModel, - TFTransfoXLModel, - ) - - -class TFTransfoXLModelTester: - def __init__( - self, - parent, - ): - self.parent = parent - self.batch_size = 13 - self.seq_length = 7 - self.mem_len = 30 - self.key_length = self.seq_length + self.mem_len - self.clamp_len = 15 - self.is_training = True - self.use_labels = True - self.vocab_size = 99 - self.cutoffs = [10, 50, 80] - self.hidden_size = 32 - self.d_embed = 32 - self.num_attention_heads = 4 - self.d_head = 8 - self.d_inner = 128 - self.div_val = 2 - self.num_hidden_layers = 2 - self.scope = None - self.seed = 1 - self.eos_token_id = 0 - self.num_labels = 3 - self.pad_token_id = self.vocab_size - 1 - self.init_range = 0.01 - - def prepare_config_and_inputs(self): - input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) - input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) - - lm_labels = None - if self.use_labels: - lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) - - config = TransfoXLConfig( - vocab_size=self.vocab_size, - mem_len=self.mem_len, - clamp_len=self.clamp_len, - cutoffs=self.cutoffs, - d_model=self.hidden_size, - d_embed=self.d_embed, - n_head=self.num_attention_heads, - d_head=self.d_head, - d_inner=self.d_inner, - div_val=self.div_val, - n_layer=self.num_hidden_layers, - eos_token_id=self.eos_token_id, - pad_token_id=self.vocab_size - 1, - init_range=self.init_range, - num_labels=self.num_labels, - ) - - return (config, input_ids_1, input_ids_2, lm_labels) - - def set_seed(self): - random.seed(self.seed) - tf.random.set_seed(self.seed) - - def create_and_check_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels): - model = TFTransfoXLModel(config) - - hidden_states_1, mems_1 = model(input_ids_1).to_tuple() - - inputs = {"input_ids": input_ids_2, "mems": mems_1} - - hidden_states_2, mems_2 = model(inputs).to_tuple() - - self.parent.assertEqual(hidden_states_1.shape, (self.batch_size, self.seq_length, self.hidden_size)) - self.parent.assertEqual(hidden_states_2.shape, (self.batch_size, self.seq_length, self.hidden_size)) - self.parent.assertListEqual( - [mem.shape for mem in mems_1], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - self.parent.assertListEqual( - [mem.shape for mem in mems_2], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - - def create_and_check_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels): - model = TFTransfoXLLMHeadModel(config) - - lm_logits_1, mems_1 = model(input_ids_1).to_tuple() - - inputs = {"input_ids": input_ids_1, "labels": lm_labels} - _, mems_1 = model(inputs).to_tuple() - - lm_logits_2, mems_2 = model([input_ids_2, mems_1]).to_tuple() - - inputs = {"input_ids": input_ids_1, "mems": mems_1, "labels": lm_labels} - - _, mems_2 = model(inputs).to_tuple() - - self.parent.assertEqual(lm_logits_1.shape, (self.batch_size, self.seq_length, self.vocab_size)) - self.parent.assertListEqual( - [mem.shape for mem in mems_1], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - - self.parent.assertEqual(lm_logits_2.shape, (self.batch_size, self.seq_length, self.vocab_size)) - self.parent.assertListEqual( - [mem.shape for mem in mems_2], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - - def create_and_check_transfo_xl_for_sequence_classification(self, config, input_ids_1, input_ids_2, lm_labels): - model = TFTransfoXLForSequenceClassification(config) - result = model(input_ids_1) - self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) - - def prepare_config_and_inputs_for_common(self): - config_and_inputs = self.prepare_config_and_inputs() - (config, input_ids_1, input_ids_2, lm_labels) = config_and_inputs - inputs_dict = {"input_ids": input_ids_1} - return config, inputs_dict - - -@require_tf -class TFTransfoXLModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): - all_model_classes = ( - (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () - ) - all_generative_model_classes = () if is_tf_available() else () - pipeline_model_mapping = ( - { - "feature-extraction": TFTransfoXLModel, - "text-classification": TFTransfoXLForSequenceClassification, - "text-generation": TFTransfoXLLMHeadModel, - "zero-shot": TFTransfoXLForSequenceClassification, - } - if is_tf_available() - else {} - ) - # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented - test_resize_embeddings = False - test_head_masking = False - test_onnx = False - test_mismatched_shapes = False - - # TODO: Fix the failed tests - def is_pipeline_test_to_skip( - self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name - ): - if pipeline_test_casse_name == "TextGenerationPipelineTests": - # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. - # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple - # tokenizer. - return True - - return False - - def setUp(self): - self.model_tester = TFTransfoXLModelTester(self) - self.config_tester = ConfigTester(self, config_class=TransfoXLConfig, d_embed=37) - - def test_config(self): - self.config_tester.run_common_tests() - - def test_transfo_xl_model(self): - self.model_tester.set_seed() - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_transfo_xl_model(*config_and_inputs) - - def test_transfo_xl_lm_head(self): - self.model_tester.set_seed() - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_transfo_xl_lm_head(*config_and_inputs) - - def test_transfo_xl_sequence_classification_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*config_and_inputs) - - def test_model_common_attributes(self): - config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - list_other_models_with_output_ebd = [TFTransfoXLForSequenceClassification] - - for model_class in self.all_model_classes: - model = model_class(config) - assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) - if model_class in list_other_models_with_output_ebd: - x = model.get_output_embeddings() - assert isinstance(x, tf.keras.layers.Layer) - name = model.get_bias() - assert name is None - else: - x = model.get_output_embeddings() - assert x is None - name = model.get_bias() - assert name is None - - def test_xla_mode(self): - # TODO JP: Make TransfoXL XLA compliant - pass - - @slow - def test_model_from_pretrained(self): - for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: - model = TFTransfoXLModel.from_pretrained(model_name) - self.assertIsNotNone(model) - - @unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.") - def test_dataset_conversion(self): - pass - - -@require_tf -class TFTransfoXLModelLanguageGenerationTest(unittest.TestCase): - @unittest.skip("Skip test until #12651 is resolved.") - @slow - def test_lm_generate_transfo_xl_wt103(self): - model = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103") - input_ids = tf.convert_to_tensor([[33, 1297, 2, 1, 1009, 4, 1109, 11739, 4762, 358, 5, 25, 245, 22, 1706, 17, 20098, 5, 3215, 21, 37, 1110, 3, 13, 1041, 4, 24, 603, 490, 2, 71477, 20098, 104447, 2, 20961, 1, 2604, 4, 1, 329, 3, 6224, 831, 16002, 2, 8, 603, 78967, 29546, 23, 803, 20, 25, 416, 5, 8, 232, 4, 277, 6, 1855, 4601, 3, 29546, 54, 8, 3609, 5, 57211, 49, 4, 1, 277, 18, 8, 1755, 15691, 3, 341, 25, 416, 693, 42573, 71, 17, 401, 94, 31, 17919, 2, 29546, 7873, 18, 1, 435, 23, 11011, 755, 5, 5167, 3, 7983, 98, 84, 2, 29546, 3267, 8, 3609, 4, 1, 4865, 1075, 2, 6087, 71, 6, 346, 8, 5854, 3, 29546, 824, 1400, 1868, 2, 19, 160, 2, 311, 8, 5496, 2, 20920, 17, 25, 15097, 3, 24, 24, 0]]) # fmt: skip - # In 1991 , the remains of Russian Tsar Nicholas II and his family - # ( except for Alexei and Maria ) are discovered . - # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the - # remainder of the story . 1883 Western Siberia , - # a young Grigori Rasputin is asked by his father and a group of men to perform magic . - # Rasputin has a vision and denounces one of the men as a horse thief . Although his - # father initially slaps him for making such an accusation , Rasputin watches as the - # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of - # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , - # with people , even a bishop , begging for his blessing . - - expected_output_ids = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,142,1298,188,2,29546,113,8,3654,4,1,1109,7136,833,3,13,1645,4,29546,11,104,7,1,1109,532,7129,2,10,83507,2,1162,1123,2,6,7245,10,2,5,11,104,7,1,1109,532,7129,2,10,24,24,10,22,10,13,770,5863,4,7245,10] # fmt: skip - # In 1991, the remains of Russian Tsar Nicholas II and his family ( except for - # Alexei and Maria ) are discovered. The voice of young son, Tsarevich Alexei - # Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young - # Grigori Rasputin is asked by his father and a group of men to perform magic. - # Rasputin has a vision and denounces one of the men as a horse thief. Although - # his father initially slaps him for making such an accusation, Rasputin watches - # as the man is chased outside and beaten. Twenty years later, Rasputin sees a - # vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly - # becomes famous, with people, even a bishop, begging for his blessing. In the - # early 20th century, Rasputin became a symbol of the Russian Orthodox Church. - # The image of Rasputin was used in the Russian national anthem, " Nearer, My God, - # to Heaven ", and was used in the Russian national anthem, " " ( " The Great Spirit - # of Heaven " - - output_ids = model.generate(input_ids, max_length=200, do_sample=False) - self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids) diff --git a/tests/models/transfo_xl/test_modeling_transfo_xl.py b/tests/models/transfo_xl/test_modeling_transfo_xl.py deleted file mode 100644 index 9534b13c852..00000000000 --- a/tests/models/transfo_xl/test_modeling_transfo_xl.py +++ /dev/null @@ -1,533 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import random -import unittest - -from transformers import TransfoXLConfig, is_torch_available -from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device - -from ...generation.test_utils import GenerationTesterMixin -from ...test_configuration_common import ConfigTester -from ...test_modeling_common import ModelTesterMixin, ids_tensor -from ...test_pipeline_mixin import PipelineTesterMixin - - -if is_torch_available(): - import torch - from torch import nn - - from transformers import TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel - from transformers.models.transfo_xl.modeling_transfo_xl import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST - - -class TransfoXLModelTester: - def __init__( - self, - parent, - batch_size=14, - seq_length=7, - mem_len=30, - clamp_len=15, - is_training=False, - use_labels=True, - vocab_size=99, - cutoffs=[10, 50, 80], - hidden_size=32, - d_embed=32, - num_attention_heads=4, - d_head=8, - d_inner=128, - div_val=2, - num_hidden_layers=2, - scope=None, - seed=1, - eos_token_id=0, - num_labels=3, - ): - self.parent = parent - self.batch_size = batch_size - self.seq_length = seq_length - self.mem_len = mem_len - self.key_length = self.seq_length + self.mem_len - self.clamp_len = clamp_len - self.is_training = is_training - self.use_labels = use_labels - self.vocab_size = vocab_size - self.cutoffs = cutoffs - self.hidden_size = hidden_size - self.d_embed = d_embed - self.num_attention_heads = num_attention_heads - self.d_head = d_head - self.d_inner = d_inner - self.div_val = div_val - self.num_hidden_layers = num_hidden_layers - self.scope = scope - self.seed = seed - self.eos_token_id = eos_token_id - self.num_labels = num_labels - self.pad_token_id = self.vocab_size - 1 - - def prepare_config_and_inputs(self): - input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) - input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) - - lm_labels = None - if self.use_labels: - lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) - - config = self.get_config() - - return (config, input_ids_1, input_ids_2, lm_labels) - - def get_config(self): - return TransfoXLConfig( - vocab_size=self.vocab_size, - mem_len=self.mem_len, - clamp_len=self.clamp_len, - cutoffs=self.cutoffs, - d_model=self.hidden_size, - d_embed=self.d_embed, - n_head=self.num_attention_heads, - d_head=self.d_head, - d_inner=self.d_inner, - div_val=self.div_val, - n_layer=self.num_hidden_layers, - eos_token_id=self.eos_token_id, - pad_token_id=self.pad_token_id, - ) - - def set_seed(self): - random.seed(self.seed) - torch.manual_seed(self.seed) - - def create_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels): - model = TransfoXLModel(config) - model.to(torch_device) - model.eval() - - outputs1 = model(input_ids_1) - outputs2 = model(input_ids_2, outputs1["mems"]) - outputs = { - "hidden_states_1": outputs1["last_hidden_state"], - "mems_1": outputs1["mems"], - "hidden_states_2": outputs2["last_hidden_state"], - "mems_2": outputs2["mems"], - } - return outputs - - def check_transfo_xl_model_output(self, result): - self.parent.assertEqual(result["hidden_states_1"].shape, (self.batch_size, self.seq_length, self.hidden_size)) - self.parent.assertEqual(result["hidden_states_2"].shape, (self.batch_size, self.seq_length, self.hidden_size)) - self.parent.assertListEqual( - [mem.shape for mem in result["mems_1"]], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - self.parent.assertListEqual( - [mem.shape for mem in result["mems_2"]], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - - def create_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels): - model = TransfoXLLMHeadModel(config) - model.to(torch_device) - model.eval() - - lm_logits_1 = model(input_ids_1)["prediction_scores"] - outputs1 = model(input_ids_1, labels=lm_labels) - lm_logits_2 = model(input_ids_2, mems=outputs1["mems"])["prediction_scores"] - outputs2 = model(input_ids_2, labels=lm_labels, mems=outputs1["mems"]) - - outputs = { - "loss_1": outputs1["loss"], - "losses_1": outputs1["losses"], - "mems_1": outputs1["mems"], - "lm_logits_1": lm_logits_1, - "loss_2": outputs2["loss"], - "losses_2": outputs2["losses"], - "mems_2": outputs2["mems"], - "lm_logits_2": lm_logits_2, - } - return outputs - - def check_transfo_xl_lm_head_output(self, result): - self.parent.assertEqual(result["loss_1"].shape, ()) - self.parent.assertEqual(result["losses_1"].shape, (self.batch_size, self.seq_length - 1)) - self.parent.assertEqual(result["lm_logits_1"].shape, (self.batch_size, self.seq_length, self.vocab_size)) - self.parent.assertListEqual( - [mem.shape for mem in result["mems_1"]], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - - self.parent.assertEqual(result["loss_2"].shape, ()) - self.parent.assertEqual(result["losses_2"].shape, (self.batch_size, self.seq_length - 1)) - self.parent.assertEqual(result["lm_logits_2"].shape, (self.batch_size, self.seq_length, self.vocab_size)) - self.parent.assertListEqual( - [mem.shape for mem in result["mems_2"]], - [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, - ) - - def create_transfo_xl_lm_head_trainer_compatible_tuple(self, config, input_ids_1, input_ids_2, lm_labels): - config.trainer_compatible = True - model = TransfoXLLMHeadModel(config) - model.to(torch_device) - model.eval() - - lm_logits_1 = model(input_ids_1, return_dict=False)[0] - outputs1 = model(input_ids_1, labels=lm_labels, return_dict=False) - loss_1, _, losses_1, mems_1 = outputs1[:4] - lm_logits_2 = model(input_ids_2, mems=mems_1, return_dict=False)[0] - outputs2 = model(input_ids_2, labels=lm_labels, mems=mems_1, return_dict=False) - loss_2, _, losses_2, mems_2 = outputs2[:4] - - outputs = { - "losses_1": losses_1, - "mems_1": mems_1, - "lm_logits_1": lm_logits_1, - "loss_1": loss_1, - "losses_2": losses_2, - "mems_2": mems_2, - "lm_logits_2": lm_logits_2, - "loss_2": loss_2, - } - - config.trainer_compatible = None - return outputs - - def create_transfo_xl_lm_head_trainer_incompatible_tuple(self, config, input_ids_1, input_ids_2, lm_labels): - config.trainer_compatible = False - model = TransfoXLLMHeadModel(config) - model.to(torch_device) - model.eval() - - lm_logits_1 = model(input_ids_1, return_dict=False)[0] - outputs1 = model(input_ids_1, labels=lm_labels, return_dict=False) - losses_1, _, mems_1 = outputs1[:3] - loss_1 = outputs1[-1] - lm_logits_2 = model(input_ids_2, mems=mems_1, return_dict=False)[0] - outputs2 = model(input_ids_2, labels=lm_labels, mems=mems_1) - losses_2, _, mems_2 = outputs2[:3] - loss_2 = outputs2[-1] - - outputs = { - "losses_1": losses_1, - "mems_1": mems_1, - "lm_logits_1": lm_logits_1, - "loss_1": loss_1, - "losses_2": losses_2, - "mems_2": mems_2, - "lm_logits_2": lm_logits_2, - "loss_2": loss_2, - } - - config.trainer_compatible = None - return outputs - - def create_and_check_transfo_xl_for_sequence_classification(self, config, input_ids_1, input_ids_2, lm_labels): - config.num_labels = self.num_labels - model = TransfoXLForSequenceClassification(config) - model.to(torch_device) - model.eval() - result = model(input_ids_1) - self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) - - def prepare_config_and_inputs_for_common(self): - config_and_inputs = self.prepare_config_and_inputs() - (config, input_ids_1, input_ids_2, lm_labels) = config_and_inputs - inputs_dict = {"input_ids": input_ids_1} - return config, inputs_dict - - -@require_torch -class TransfoXLModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): - all_model_classes = ( - (TransfoXLModel, TransfoXLLMHeadModel, TransfoXLForSequenceClassification) if is_torch_available() else () - ) - all_generative_model_classes = (TransfoXLLMHeadModel,) if is_torch_available() else () - pipeline_model_mapping = ( - { - "feature-extraction": TransfoXLModel, - "text-classification": TransfoXLForSequenceClassification, - "text-generation": TransfoXLLMHeadModel, - "zero-shot": TransfoXLForSequenceClassification, - } - if is_torch_available() - else {} - ) - test_pruning = False - test_resize_embeddings = True - test_mismatched_shapes = False - - # TODO: Fix the failed tests - def is_pipeline_test_to_skip( - self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name - ): - if pipeline_test_casse_name == "TextGenerationPipelineTests": - # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. - # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple - # tokenizer. - return True - - return False - - def check_cutoffs_and_n_token( - self, copied_cutoffs, layer, model_embed, model, model_class, resized_value, vocab_size - ): - # Check that the cutoffs were modified accordingly - for i in range(len(copied_cutoffs)): - if i < layer: - self.assertEqual(model_embed.cutoffs[i], copied_cutoffs[i]) - if model_class == TransfoXLLMHeadModel: - self.assertEqual(model.crit.cutoffs[i], copied_cutoffs[i]) - if i < len(model.config.cutoffs): - self.assertEqual(model.config.cutoffs[i], copied_cutoffs[i]) - else: - self.assertEqual(model_embed.cutoffs[i], copied_cutoffs[i] + resized_value) - if model_class == TransfoXLLMHeadModel: - self.assertEqual(model.crit.cutoffs[i], copied_cutoffs[i] + resized_value) - if i < len(model.config.cutoffs): - self.assertEqual(model.config.cutoffs[i], copied_cutoffs[i] + resized_value) - - self.assertEqual(model_embed.n_token, vocab_size + resized_value) - if model_class == TransfoXLLMHeadModel: - self.assertEqual(model.crit.n_token, vocab_size + resized_value) - - def setUp(self): - self.model_tester = TransfoXLModelTester(self) - self.config_tester = ConfigTester(self, config_class=TransfoXLConfig, d_embed=37) - - def test_config(self): - self.config_tester.run_common_tests() - - def test_transfo_xl_model(self): - self.model_tester.set_seed() - config_and_inputs = self.model_tester.prepare_config_and_inputs() - output_result = self.model_tester.create_transfo_xl_model(*config_and_inputs) - self.model_tester.check_transfo_xl_model_output(output_result) - - def test_transfo_xl_lm_head(self): - self.model_tester.set_seed() - config_and_inputs = self.model_tester.prepare_config_and_inputs() - - output_result = self.model_tester.create_transfo_xl_lm_head(*config_and_inputs) - self.model_tester.check_transfo_xl_lm_head_output(output_result) - - output_result = self.model_tester.create_transfo_xl_lm_head_trainer_compatible_tuple(*config_and_inputs) - self.model_tester.check_transfo_xl_lm_head_output(output_result) - - output_result = self.model_tester.create_transfo_xl_lm_head_trainer_incompatible_tuple(*config_and_inputs) - self.model_tester.check_transfo_xl_lm_head_output(output_result) - - def test_transfo_xl_sequence_classification_model(self): - config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*config_and_inputs) - - def test_retain_grad_hidden_states_attentions(self): - # xlnet cannot keep gradients in attentions or hidden states - return - - @require_torch_multi_gpu - def test_multi_gpu_data_parallel_forward(self): - # Opt-out of this test. - pass - - @slow - def test_model_from_pretrained(self): - for model_name in TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: - model = TransfoXLModel.from_pretrained(model_name) - self.assertIsNotNone(model) - - def test_resize_tokens_embeddings(self): - (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() - if not self.test_resize_embeddings: - return - - for model_class in self.all_model_classes: - config = copy.deepcopy(original_config) - model = model_class(config) - model.to(torch_device) - - if self.model_tester.is_training is False: - model.eval() - - model_vocab_size = config.vocab_size - # Retrieve the embeddings and clone theme - model_embed = model.resize_token_embeddings(model_vocab_size) - cloned_embeddings = [emb.weight.clone() for emb in model_embed.emb_layers] - # Retrieve the cutoffs and copy them - copied_cutoffs = copy.copy(model_embed.cutoffs) - - test_layers = list(range(config.div_val)) - for layer in test_layers: - # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size - model_embed = model.resize_token_embeddings(model_vocab_size + 10, layer) - self.assertEqual(model.config.vocab_size, model_vocab_size + 10) - # Check that it actually resizes the embeddings matrix - self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0] + 10) - # Check that the cutoffs were modified accordingly - self.check_cutoffs_and_n_token( - copied_cutoffs, layer, model_embed, model, model_class, 10, model_vocab_size - ) - - # Check that the model can still do a forward pass successfully (every parameter should be resized) - model(**inputs_dict) - - # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size - model_embed = model.resize_token_embeddings(model_vocab_size - 5, layer) - self.assertEqual(model.config.vocab_size, model_vocab_size - 5) - # Check that it actually resizes the embeddings matrix - self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0] - 5) - # Check that the cutoffs were modified accordingly - self.check_cutoffs_and_n_token( - copied_cutoffs, layer, model_embed, model, model_class, -5, model_vocab_size - ) - - # Check that the model can still do a forward pass successfully (every parameter should be resized) - # Input ids should be clamped to the maximum size of the vocabulary - inputs_dict["input_ids"].clamp_(max=model_vocab_size - 5 - 1) - model(**inputs_dict) - - # Check that adding and removing tokens has not modified the first part of the embedding matrix. - models_equal = True - for p1, p2 in zip(cloned_embeddings[layer], model_embed.emb_layers[layer].weight): - if p1.data.ne(p2.data).sum() > 0: - models_equal = False - - self.assertTrue(models_equal) - - # Reset model embeddings to original size - model.resize_token_embeddings(model_vocab_size, layer) - self.assertEqual(model_vocab_size, model.config.vocab_size) - self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0]) - - def test_resize_embeddings_untied(self): - # transfo-xl requires special resize for lm-head - return - - def _check_attentions_for_generate( - self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 - ): - self.assertIsInstance(attentions, tuple) - self.assertListEqual( - [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) - ) - self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups) - - for idx, iter_attentions in enumerate(attentions): - tgt_len = min_length if idx == 0 else (min_length - 2) - src_len = (min_length + config.mem_len) if idx == 0 else (min_length + config.mem_len - 2) - - expected_shape = ( - batch_size * num_beam_groups, - config.num_attention_heads, - tgt_len, - src_len, - ) - - # check attn size - self.assertListEqual( - [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions) - ) - - def _check_hidden_states_for_generate( - self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1 - ): - self.assertIsInstance(hidden_states, tuple) - self.assertListEqual( - [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], - [True] * len(hidden_states), - ) - self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups) - - for idx, iter_hidden_states in enumerate(hidden_states): - seq_len = min_length if idx == 0 else min_length - 2 - expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size) - # check hidden size - self.assertListEqual( - [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], - [expected_shape] * len(iter_hidden_states), - ) - - # overwrite from test_modeling_common - def _mock_init_weights(self, module): - if hasattr(module, "weight") and module.weight is not None: - module.weight.data.fill_(3) - if hasattr(module, "cluster_weight") and module.cluster_weight is not None: - module.cluster_weight.data.fill_(3) - if hasattr(module, "bias") and module.bias is not None: - module.bias.data.fill_(3) - if hasattr(module, "cluster_bias") and module.cluster_bias is not None: - module.cluster_bias.data.fill_(3) - - if hasattr(module, "emb_projs"): - for i in range(len(module.emb_projs)): - if module.emb_projs[i] is not None: - nn.init.constant_(module.emb_projs[i], 0.0003) - if hasattr(module, "out_projs"): - for i in range(len(module.out_projs)): - if module.out_projs[i] is not None: - nn.init.constant_(module.out_projs[i], 0.0003) - - for param in ["r_emb", "r_w_bias", "r_r_bias", "r_bias"]: - if hasattr(module, param) and getattr(module, param) is not None: - weight = getattr(module, param) - weight.data.fill_(3) - - @unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :) - def test_left_padding_compatibility(self): - pass - - @unittest.skip("This test is currently broken because of safetensors.") - def test_tf_from_pt_safetensors(self): - pass - - -@require_torch -class TransfoXLModelLanguageGenerationTest(unittest.TestCase): - @slow - def test_lm_generate_transfo_xl_wt103(self): - model = TransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103") - model.to(torch_device) - - input_ids = torch.tensor([[33, 1297, 2, 1, 1009, 4, 1109, 11739, 4762, 358, 5, 25, 245, 22, 1706, 17, 20098, 5, 3215, 21, 37, 1110, 3, 13, 1041, 4, 24, 603, 490, 2, 71477, 20098, 104447, 2, 20961, 1, 2604, 4, 1, 329, 3, 6224, 831, 16002, 2, 8, 603, 78967, 29546, 23, 803, 20, 25, 416, 5, 8, 232, 4, 277, 6, 1855, 4601, 3, 29546, 54, 8, 3609, 5, 57211, 49, 4, 1, 277, 18, 8, 1755, 15691, 3, 341, 25, 416, 693, 42573, 71, 17, 401, 94, 31, 17919, 2, 29546, 7873, 18, 1, 435, 23, 11011, 755, 5, 5167, 3, 7983, 98, 84, 2, 29546, 3267, 8, 3609, 4, 1, 4865, 1075, 2, 6087, 71, 6, 346, 8, 5854, 3, 29546, 824, 1400, 1868, 2, 19, 160, 2, 311, 8, 5496, 2, 20920, 17, 25, 15097, 3, 24, 24, 0]], dtype=torch.long,device=torch_device) # fmt: skip - # In 1991 , the remains of Russian Tsar Nicholas II and his family - # ( except for Alexei and Maria ) are discovered . - # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the - # remainder of the story . 1883 Western Siberia , - # a young Grigori Rasputin is asked by his father and a group of men to perform magic . - # Rasputin has a vision and denounces one of the men as a horse thief . Although his - # father initially slaps him for making such an accusation , Rasputin watches as the - # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of - # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , - # with people , even a bishop , begging for his blessing . - - expected_output_ids = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,142,1298,188,2,29546,113,8,3654,4,1,1109,7136,833,3,13,1645,4,29546,11,104,7,1,1109,532,7129,2,10,83507,2,1162,1123,2,6,7245,10,2,5,11,104,7,1,1109,532,7129,2,10,24,24,10,22,10,13,770,5863,4,7245,10] # fmt: skip - # In 1991, the remains of Russian Tsar Nicholas II and his family ( except for - # Alexei and Maria ) are discovered. The voice of young son, Tsarevich Alexei - # Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young - # Grigori Rasputin is asked by his father and a group of men to perform magic. - # Rasputin has a vision and denounces one of the men as a horse thief. Although - # his father initially slaps him for making such an accusation, Rasputin watches - # as the man is chased outside and beaten. Twenty years later, Rasputin sees a - # vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly - # becomes famous, with people, even a bishop, begging for his blessing. In the - # early 20th century, Rasputin became a symbol of the Russian Orthodox Church. - # The image of Rasputin was used in the Russian national anthem, " Nearer, My God, - # to Heaven ", and was used in the Russian national anthem, " " ( " The Great Spirit - # of Heaven " - - output_ids = model.generate(input_ids, max_length=200, do_sample=False) - self.assertListEqual(output_ids[0].tolist(), expected_output_ids) diff --git a/tests/models/transfo_xl/test_tokenization_transfo_xl.py b/tests/models/transfo_xl/test_tokenization_transfo_xl.py deleted file mode 100644 index d8835a164c6..00000000000 --- a/tests/models/transfo_xl/test_tokenization_transfo_xl.py +++ /dev/null @@ -1,156 +0,0 @@ -# coding=utf-8 -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import os -import pickle -import unittest -from collections import Counter, OrderedDict - -from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer - -from ...test_tokenization_common import TokenizerTesterMixin - - -class TransfoXLTokenizationTest(TokenizerTesterMixin, unittest.TestCase): - tokenizer_class = TransfoXLTokenizer - test_rust_tokenizer = False - test_seq2seq = False - - def setUp(self): - super().setUp() - - vocab_tokens = [ - "", - "[CLS]", - "[SEP]", - "want", - "unwanted", - "wa", - "un", - "running", - ",", - "low", - "l", - ] - self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) - with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: - vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) - - saved_dict = { - "eos_idx": 0, - "min_freq": 0, - "vocab_file": None, - "counter": Counter(["welcome home"]), - "sym2idx": OrderedDict([("", 0), ("welcome", 1), ("home", 2)]), - "delimiter": None, - "idx2sym": ["", "welcome", "home"], - "max_size": None, - "lower_case": False, - "special": [""], - } - self.pretrained_vocab_file = os.path.join( - self.tmpdirname, "mock_folder", VOCAB_FILES_NAMES["pretrained_vocab_file"] - ) - os.makedirs(os.path.dirname(self.pretrained_vocab_file), exist_ok=True) - with open(self.pretrained_vocab_file, "wb") as f: - pickle.dump(saved_dict, f) - - def get_tokenizer(self, **kwargs): - kwargs["lower_case"] = True - return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **kwargs) - - def get_input_output_texts(self, tokenizer): - input_text = " UNwanted , running" - output_text = " unwanted, running" - return input_text, output_text - - def test_full_tokenizer(self): - tokenizer = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=True) - - tokens = tokenizer.tokenize(" UNwanted , running") - self.assertListEqual(tokens, ["", "unwanted", ",", "running"]) - - self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7]) - - def test_full_tokenizer_lower(self): - tokenizer = TransfoXLTokenizer(lower_case=True) - - self.assertListEqual( - tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["hello", "!", "how", "are", "you", "?"] - ) - - def test_full_tokenizer_no_lower(self): - tokenizer = TransfoXLTokenizer(lower_case=False) - - self.assertListEqual( - tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["HeLLo", "!", "how", "Are", "yoU", "?"] - ) - - def test_full_tokenizer_moses_numbers(self): - tokenizer = TransfoXLTokenizer(lower_case=False) - text_in = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" - tokens_out = [ - "Hello", - "(", - "bracket", - ")", - "and", - "side", - "@-@", - "scrolled", - "[", - "and", - "]", - "Henry", - "'s", - "$", - "5", - "@,@", - "000", - "with", - "3", - "@.@", - "34", - "m", - ".", - "What", - "'s", - "up", - "!", - "?", - ] - - self.assertListEqual(tokenizer.tokenize(text_in), tokens_out) - - self.assertEqual(tokenizer.convert_tokens_to_string(tokens_out), text_in) - - def test_move_added_token(self): - tokenizer = self.get_tokenizer() - original_len = len(tokenizer) - - tokenizer.add_tokens(["new1", "new2"]) - tokenizer.move_added_token("new1", 1) - - # Check that moved token is not copied (duplicate) - self.assertEqual(len(tokenizer), original_len + 2) - # Check that token is moved to specified id - self.assertEqual(tokenizer.encode("new1"), [1]) - self.assertEqual(tokenizer.decode([1]), "new1") - - def test_from_pretrained_vocab_file(self): - tokenizer = TransfoXLTokenizer.from_pretrained(os.path.join(self.tmpdirname, "mock_folder")) - sentence = "welcome home" - self.assertEqual(tokenizer.decode(tokenizer.encode(sentence)), sentence) diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index c69b5ed77fe..06f05ec86c0 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -1927,7 +1927,6 @@ class ModelTesterMixin: "FunnelForPreTraining", "ElectraForPreTraining", "XLMWithLMHeadModel", - "TransfoXLLMHeadModel", ]: for k in key_differences: if k in ["loss", "losses"]: diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index d7cd62b41a0..7ac744263cc 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -465,7 +465,6 @@ class TFModelTesterMixin: "TFFunnelForPreTraining", "TFElectraForPreTraining", "TFXLMWithLMHeadModel", - "TFTransfoXLLMHeadModel", ]: for k in key_differences: if k in ["loss", "losses"]: diff --git a/utils/check_config_attributes.py b/utils/check_config_attributes.py index 7a116d5af31..a0060603351 100644 --- a/utils/check_config_attributes.py +++ b/utils/check_config_attributes.py @@ -127,7 +127,6 @@ SPECIAL_CASES_TO_ALLOW.update( "SwitchTransformersConfig": True, "TableTransformerConfig": True, "TapasConfig": True, - "TransfoXLConfig": True, "UniSpeechConfig": True, "UniSpeechSatConfig": True, "WavLMConfig": True, diff --git a/utils/check_docstrings.py b/utils/check_docstrings.py index cddd04ac651..f5f01a0d83f 100644 --- a/utils/check_docstrings.py +++ b/utils/check_docstrings.py @@ -738,7 +738,6 @@ OBJECTS_TO_IGNORE = [ "TrainerState", "TrainingArguments", "TrajectoryTransformerConfig", - "TransfoXLConfig", "TranslationPipeline", "TvltImageProcessor", "UMT5Config", diff --git a/utils/check_repo.py b/utils/check_repo.py index cac78bfe80c..fd5b4c255ab 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -397,13 +397,11 @@ def get_model_modules() -> List[str]: "modeling_flax_speech_encoder_decoder", "modeling_flax_vision_encoder_decoder", "modeling_timm_backbone", - "modeling_transfo_xl_utilities", "modeling_tf_auto", "modeling_tf_encoder_decoder", "modeling_tf_outputs", "modeling_tf_pytorch_utils", "modeling_tf_utils", - "modeling_tf_transfo_xl_utilities", "modeling_tf_vision_encoder_decoder", "modeling_vision_encoder_decoder", ] diff --git a/utils/not_doctested.txt b/utils/not_doctested.txt index 07775fe823a..070d0453a45 100644 --- a/utils/not_doctested.txt +++ b/utils/not_doctested.txt @@ -496,6 +496,11 @@ src/transformers/models/deprecated/tapex/tokenization_tapex.py src/transformers/models/deprecated/trajectory_transformer/configuration_trajectory_transformer.py src/transformers/models/deprecated/trajectory_transformer/convert_trajectory_transformer_original_pytorch_checkpoint_to_pytorch.py src/transformers/models/deprecated/trajectory_transformer/modeling_trajectory_transformer.py +src/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py +src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl.py +src/transformers/models/deprecated/transfo_xl/modeling_tf_transfo_xl_utilities.py +src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py +src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl_utilities.py src/transformers/models/deprecated/van/configuration_van.py src/transformers/models/deprecated/van/convert_van_to_pytorch.py src/transformers/models/deprecated/van/modeling_van.py @@ -818,11 +823,6 @@ src/transformers/models/tapas/modeling_tf_tapas.py src/transformers/models/timesformer/convert_timesformer_to_pytorch.py src/transformers/models/timm_backbone/configuration_timm_backbone.py src/transformers/models/timm_backbone/modeling_timm_backbone.py -src/transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py -src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py -src/transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py -src/transformers/models/transfo_xl/modeling_transfo_xl.py -src/transformers/models/transfo_xl/modeling_transfo_xl_utilities.py src/transformers/models/trocr/convert_trocr_unilm_to_pytorch.py src/transformers/models/tvlt/configuration_tvlt.py src/transformers/models/tvlt/modeling_tvlt.py @@ -990,4 +990,4 @@ src/transformers/utils/peft_utils.py src/transformers/utils/quantization_config.py src/transformers/utils/sentencepiece_model_pb2.py src/transformers/utils/sentencepiece_model_pb2_new.py -src/transformers/utils/versions.py \ No newline at end of file +src/transformers/utils/versions.py