diff --git a/.circleci/config.yml b/.circleci/config.yml index 637d1374929..9a81eea902e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -70,6 +70,27 @@ jobs: - run: sudo pip install pytest codecov pytest-cov - run: python -m pytest -sv ./transformers/tests/ --cov - run: codecov + build_py3_custom_tokenizers: + working_directory: ~/transformers + docker: + - image: circleci/python:3.5 + steps: + - checkout + - run: sudo pip install --progress-bar off . + - run: sudo pip install pytest + - run: sudo pip install mecab-python3 + - run: RUN_CUSTOM_TOKENIZERS=1 python -m pytest -sv ./transformers/tests/tokenization_bert_japanese_test.py + build_py2_custom_tokenizers: + working_directory: ~/transformers + docker: + - image: circleci/python:2.7 + steps: + - checkout + - run: sudo pip install --progress-bar off . + - run: sudo pip install pytest + - run: sudo apt-get -y install libmecab-dev mecab mecab-ipadic-utf8 swig + - run: sudo pip install mecab-python + - run: RUN_CUSTOM_TOKENIZERS=1 python -m pytest -sv ./transformers/tests/tokenization_bert_japanese_test.py deploy_doc: working_directory: ~/transformers docker: @@ -81,7 +102,17 @@ jobs: - checkout - run: sudo pip install --progress-bar off -r docs/requirements.txt - run: sudo pip install --progress-bar off -r requirements.txt - - run: cd docs && make clean && make html && scp -r -oStrictHostKeyChecking=no _build/html/* $doc:$dir + - run: ./.circleci/deploy.sh + repository_consistency: + working_directory: ~/transformers + docker: + - image: circleci/python:3.5 + resource_class: small + parallelism: 1 + steps: + - checkout + - run: sudo pip install requests + - run: python ./utils/link_tester.py workflow_filters: &workflow_filters filters: branches: @@ -91,9 +122,12 @@ workflows: version: 2 build_and_test: jobs: + - repository_consistency + - build_py3_custom_tokenizers + - build_py2_custom_tokenizers - build_py3_torch_and_tf - build_py3_torch - build_py3_tf - build_py2_torch - build_py2_tf - - deploy_doc: *workflow_filters \ No newline at end of file + - deploy_doc: *workflow_filters diff --git a/.circleci/deploy.sh b/.circleci/deploy.sh new file mode 100755 index 00000000000..a32581baef4 --- /dev/null +++ b/.circleci/deploy.sh @@ -0,0 +1,26 @@ +cd docs + +function deploy_doc(){ + echo "Creating doc at commit $1 and pushing to folder $2" + git checkout $1 + if [ ! -z "$2" ] + then + if [ -d "$dir/$2" ]; then + echo "Directory" $2 "already exists" + else + echo "Pushing version" $2 + make clean && make html && scp -r -oStrictHostKeyChecking=no _build/html $doc:$dir/$2 + fi + else + echo "Pushing master" + make clean && make html && scp -r -oStrictHostKeyChecking=no _build/html/* $doc:$dir + fi +} + +deploy_doc "master" +deploy_doc "b33a385" v1.0.0 +deploy_doc "fe02e45" v1.1.0 +deploy_doc "89fd345" v1.2.0 +deploy_doc "fc9faa8" v2.0.0 +deploy_doc "3ddce1d" v2.1.1 +deploy_doc "3616209" v2.2.0 diff --git a/.github/ISSUE_TEMPLATE/--new-model-addition.md b/.github/ISSUE_TEMPLATE/--new-model-addition.md index 96fd85269dc..456151d2a99 100644 --- a/.github/ISSUE_TEMPLATE/--new-model-addition.md +++ b/.github/ISSUE_TEMPLATE/--new-model-addition.md @@ -17,6 +17,7 @@ assignees: '' * [ ] the model implementation is available: (give details) * [ ] the model weights are available: (give details) +* [ ] who are the authors: (mention them) ## Additional context diff --git a/.gitignore b/.gitignore index ec60c045c47..c7896667070 100644 --- a/.gitignore +++ b/.gitignore @@ -137,4 +137,5 @@ examples/runs serialization_dir # emacs -*.*~ \ No newline at end of file +*.*~ +debug.env diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 817ba56aaf3..8228dd59d8e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -62,6 +62,8 @@ Awesome! Please provide the following information: If you are willing to contribute the model yourself, let us know so we can best guide you. +We have added a **detailed guide and templates** to guide you in the process of adding a new model. You can find them in the [`templates`](./templates) folder. + ### Do you want a new feature (that is not a model)? A world-class feature request addresses the following points: @@ -81,6 +83,8 @@ A world-class feature request addresses the following points: If your issue is well written we're already 80% of the way there by the time you post it. +We have added **templates** to guide you in the process of adding a new example script for training or testing the models in the library. You can find them in the [`templates`](./templates) folder. + ## Start contributing! (Pull Requests) Before writing code, we strongly advise you to search through the exising PRs or @@ -102,7 +106,7 @@ Follow these steps to start contributing: ```bash $ git clone git@github.com:/transformers.git $ cd transformers - $ git remote add upstream git@github.com:huggingface/transformers.git + $ git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Create a new branch to hold your development changes: diff --git a/README.md b/README.md index da0de4ae810..f3aa8a95ee2 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ State-of-the-art NLP for everyone Lower compute costs, smaller carbon footprint - Researchers can share trained models instead of always retraining - Practitioners can reduce compute time and production costs -- 8 architectures with over 30 pretrained models, some in more than 100 languages +- 10 architectures with over 30 pretrained models, some in more than 100 languages Choose the right framework for every part of a model's lifetime - Train state-of-the-art models in 3 lines of code @@ -58,7 +58,7 @@ Choose the right framework for every part of a model's lifetime | [Quick tour: Fine-tuning/usage scripts](#quick-tour-of-the-fine-tuningusage-scripts) | Using provided scripts: GLUE, SQuAD and Text generation | | [Migrating from pytorch-transformers to transformers](#Migrating-from-pytorch-transformers-to-transformers) | Migrating your code from pytorch-transformers to transformers | | [Migrating from pytorch-pretrained-bert to pytorch-transformers](#Migrating-from-pytorch-pretrained-bert-to-transformers) | Migrating your code from pytorch-pretrained-bert to transformers | -| [Documentation](https://huggingface.co/transformers/) | Full API documentation and more | +| [Documentation][(v2.2.0/v2.2.1)](https://huggingface.co/transformers/v2.2.0) [(v2.1.1)](https://huggingface.co/transformers/v2.1.1) [(v2.0.0)](https://huggingface.co/transformers/v2.0.0) [(v1.2.0)](https://huggingface.co/transformers/v1.2.0) [(v1.1.0)](https://huggingface.co/transformers/v1.1.0) [(v1.0.0)](https://huggingface.co/transformers/v1.0.0) [(master)](https://huggingface.co/transformers) | Full API documentation and more | ## Installation @@ -86,21 +86,41 @@ When TensorFlow 2.0 and/or PyTorch has been installed, you can install from sour pip install [--editable] . ``` +### Run the examples + +Examples are included in the repository but are not shipped with the library. +Therefore, in order to run the latest versions of the examples you also need to install from source. To do so, create a new virtual environment and follow these steps: + +```bash +git clone https://github.com/huggingface/transformers +cd transformers +pip install [--editable] . +``` + ### Tests A series of tests are included for the library and the example scripts. Library tests can be found in the [tests folder](https://github.com/huggingface/transformers/tree/master/transformers/tests) and examples tests in the [examples folder](https://github.com/huggingface/transformers/tree/master/examples). -These tests can be run using `pytest` (install pytest if needed with `pip install pytest`). +These tests can be run using `unittest` or `pytest` (install pytest if needed with `pip install pytest`). Depending on which framework is installed (TensorFlow 2.0 and/or PyTorch), the irrelevant tests will be skipped. Ensure that both frameworks are installed if you want to execute all tests. You can run the tests from the root of the cloned repository with the commands: +```bash +python -m unittest discover -s transformers/tests -p "*test.py" -t . +python -m unittest discover -s examples -p "*test.py" -t examples +``` + +or + ```bash python -m pytest -sv ./transformers/tests/ python -m pytest -sv ./examples/ ``` +By default, slow tests are skipped. Set the `RUN_SLOW` environment variable to `yes` to run them. + ### Do you want to run a Transformer model on a mobile device? You should check out our [`swift-coreml-transformers`](https://github.com/huggingface/swift-coreml-transformers) repo. @@ -111,7 +131,7 @@ At some point in the future, you'll be able to seamlessly move from pre-training ## Model architectures -🤗 Transformers currently provides 8 NLU/NLG architectures: +🤗 Transformers currently provides 10 NLU/NLG architectures: 1. **[BERT](https://github.com/google-research/bert)** (from Google) released with the paper [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. 2. **[GPT](https://github.com/openai/finetune-transformer-lm)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. @@ -120,8 +140,11 @@ At some point in the future, you'll be able to seamlessly move from pre-training 5. **[XLNet](https://github.com/zihangdai/xlnet/)** (from Google/CMU) released with the paper [​XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le. 6. **[XLM](https://github.com/facebookresearch/XLM/)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. 7. **[RoBERTa](https://github.com/pytorch/fairseq/tree/master/examples/roberta)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov. -8. **[DistilBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/master/examples/distillation). +8. **[DistilBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/master/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/master/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation) and a German version of DistilBERT. 9. **[CTRL](https://github.com/salesforce/ctrl/)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher. +10. **[CamemBERT](https://camembert-model.fr)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot. +11. **[ALBERT](https://github.com/google-research/ALBERT)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut. +11. Want to contribute a new model? We have added a **detailed guide and templates** to guide you in the process of adding a new model. You can find them in the [`templates`](./templates) folder of the repository. Be sure to check the [contributing guidelines](./CONTRIBUTING.md) and contact the maintainers or open an issue to collect feedbacks before starting your PR. These implementations have been tested on several datasets (see the example scripts) and should match the performances of the original implementations (e.g. ~93 F1 on SQuAD for BERT Whole-Word-Masking, ~88 F1 on RocStories for OpenAI GPT, ~18.3 perplexity on WikiText 103 for Transformer-XL, ~0.916 Peason R coefficient on STS-B for XLNet). You can find more details on the performances in the Examples section of the [documentation](https://huggingface.co/transformers/examples.html). @@ -170,8 +193,7 @@ for model_class, tokenizer_class, pretrained_weights in MODELS: # Each architecture is provided with several class for fine-tuning on down-stream tasks, e.g. BERT_MODEL_CLASSES = [BertModel, BertForPreTraining, BertForMaskedLM, BertForNextSentencePrediction, - BertForSequenceClassification, BertForMultipleChoice, BertForTokenClassification, - BertForQuestionAnswering] + BertForSequenceClassification, BertForTokenClassification, BertForQuestionAnswering] # All the classes for an architecture can be initiated from pretrained weights for this architecture # Note that additional weights added for fine-tuning are only initialized @@ -252,6 +274,11 @@ print("sentence_2 is", "a paraphrase" if pred_2 else "not a paraphrase", "of sen ## Quick tour of the fine-tuning/usage scripts +**Important** +Before running the fine-tuning scripts, please read the +[instructions](#run-the-examples) on how to +setup your environment to run the examples. + The library comprises several example scripts with SOTA performances for NLU and NLG tasks: - `run_glue.py`: an example fine-tuning Bert, XLNet and XLM on nine different GLUE tasks (*sequence-level classification*) @@ -413,7 +440,7 @@ and from the Salesforce CTRL model: python ./examples/run_generation.py \ --model_type=ctrl \ --length=20 \ - --model_name_or_path=gpt2 \ + --model_name_or_path=ctrl \ --temperature=0 \ --repetition_penalty=1.2 \ ``` @@ -520,12 +547,12 @@ Here is a conversion examples from `BertAdam` with a linear warmup and decay sch # Parameters: lr = 1e-3 max_grad_norm = 1.0 -num_total_steps = 1000 +num_training_steps = 1000 num_warmup_steps = 100 -warmup_proportion = float(num_warmup_steps) / float(num_total_steps) # 0.1 +warmup_proportion = float(num_warmup_steps) / float(num_training_steps) # 0.1 ### Previously BertAdam optimizer was instantiated like this: -optimizer = BertAdam(model.parameters(), lr=lr, schedule='warmup_linear', warmup=warmup_proportion, t_total=num_total_steps) +optimizer = BertAdam(model.parameters(), lr=lr, schedule='warmup_linear', warmup=warmup_proportion, t_total=num_training_steps) ### and used like this: for batch in train_data: loss = model(batch) @@ -534,9 +561,10 @@ for batch in train_data: ### In Transformers, optimizer and schedules are splitted and instantiated like this: optimizer = AdamW(model.parameters(), lr=lr, correct_bias=False) # To reproduce BertAdam specific behavior set correct_bias=False -scheduler = WarmupLinearSchedule(optimizer, warmup_steps=num_warmup_steps, t_total=num_total_steps) # PyTorch scheduler +scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps) # PyTorch scheduler ### and used like this: for batch in train_data: + model.train() loss = model(batch) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm) # Gradient clipping is not in AdamW anymore (so you can use amp without issue) @@ -549,12 +577,11 @@ for batch in train_data: We now have a paper you can cite for the 🤗 Transformers library: ``` -@misc{wolf2019transformers, - title={Transformers: State-of-the-art Natural Language Processing}, - author={Thomas Wolf and Lysandre Debut and Victor Sanh and Julien Chaumond and Clement Delangue and Anthony Moi and Pierric Cistac and Tim Rault and Rémi Louf and Morgan Funtowicz and Jamie Brew}, - year={2019}, - eprint={1910.03771}, - archivePrefix={arXiv}, - primaryClass={cs.CL} +@article{Wolf2019HuggingFacesTS, + title={HuggingFace's Transformers: State-of-the-art Natural Language Processing}, + author={Thomas Wolf and Lysandre Debut and Victor Sanh and Julien Chaumond and Clement Delangue and Anthony Moi and Pierric Cistac and Tim Rault and R'emi Louf and Morgan Funtowicz and Jamie Brew}, + journal={ArXiv}, + year={2019}, + volume={abs/1910.03771} } ``` diff --git a/deploy_multi_version_doc.sh b/deploy_multi_version_doc.sh new file mode 100644 index 00000000000..bd567213eb3 --- /dev/null +++ b/deploy_multi_version_doc.sh @@ -0,0 +1,22 @@ +cd docs + +function deploy_doc(){ + echo "Creating doc at commit $1 and pushing to folder $2" + git checkout $1 + if [ ! -z "$2" ] + then + echo "Pushing version" $2 + make clean && make html && scp -r -oStrictHostKeyChecking=no _build/html $doc:$dir/$2 + else + echo "Pushing master" + make clean && make html && scp -r -oStrictHostKeyChecking=no _build/html/* $doc:$dir + fi +} + +deploy_doc "master" +deploy_doc "b33a385" v1.0.0 +deploy_doc "fe02e45" v1.1.0 +deploy_doc "89fd345" v1.2.0 +deploy_doc "fc9faa8" v2.0.0 +deploy_doc "3ddce1d" v2.1.1 +deploy_doc "f2f3294" v2.2.0 \ No newline at end of file diff --git a/docs/source/_static/js/custom.js b/docs/source/_static/js/custom.js index 2c7836fd204..ec804b3704a 100644 --- a/docs/source/_static/js/custom.js +++ b/docs/source/_static/js/custom.js @@ -1,5 +1,5 @@ function addIcon() { - const huggingFaceLogo = "https://huggingface.co/assets/transformers-docs/huggingface_logo.svg"; + const huggingFaceLogo = "https://huggingface.co/landing/assets/transformers-docs/huggingface_logo.svg"; const image = document.createElement("img"); image.setAttribute("src", huggingFaceLogo); @@ -24,10 +24,10 @@ function addCustomFooter() { social.classList.add("footer__Social"); const imageDetails = [ - { link: "https://huggingface.co", imageLink: "https://huggingface.co/assets/transformers-docs/website.svg" }, - { link: "https://twitter.com/huggingface", imageLink: "https://huggingface.co/assets/transformers-docs/twitter.svg" }, - { link: "https://github.com/huggingface", imageLink: "https://huggingface.co/assets/transformers-docs/github.svg" }, - { link: "https://www.linkedin.com/company/huggingface/", imageLink: "https://huggingface.co/assets/transformers-docs/linkedin.svg" } + { link: "https://huggingface.co", imageLink: "https://huggingface.co/landing/assets/transformers-docs/website.svg" }, + { link: "https://twitter.com/huggingface", imageLink: "https://huggingface.co/landing/assets/transformers-docs/twitter.svg" }, + { link: "https://github.com/huggingface", imageLink: "https://huggingface.co/landing/assets/transformers-docs/github.svg" }, + { link: "https://www.linkedin.com/company/huggingface/", imageLink: "https://huggingface.co/landing/assets/transformers-docs/linkedin.svg" } ]; imageDetails.forEach(imageLinks => { diff --git a/docs/source/conf.py b/docs/source/conf.py index 00c020ab39c..2f8505ab3a7 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -26,7 +26,7 @@ author = u'huggingface' # The short X.Y version version = u'' # The full version, including alpha/beta/rc tags -release = u'2.1.1' +release = u'2.2.1' # -- General configuration --------------------------------------------------- diff --git a/docs/source/index.rst b/docs/source/index.rst index 4cd1f48ba83..84012fc6cfe 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -47,6 +47,9 @@ The library currently contains PyTorch and Tensorflow implementations, pre-train 6. `XLM `_ (from Facebook) released together with the paper `Cross-lingual Language Model Pretraining `_ by Guillaume Lample and Alexis Conneau. 7. `RoBERTa `_ (from Facebook), released together with the paper a `Robustly Optimized BERT Pretraining Approach `_ by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov. 8. `DistilBERT `_ (from HuggingFace) released together with the paper `DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter `_ by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into `DistilGPT2 `_. +9. `CTRL `_ (from Salesforce), released together with the paper `CTRL: A Conditional Transformer Language Model for Controllable Generation `_ by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher. +10. `CamemBERT `_ (from FAIR, Inria, Sorbonne Université) released together with the paper `CamemBERT: a Tasty French Language Model `_ by Louis Martin, Benjamin Muller, Pedro Javier Ortiz Suarez, Yoann Dupont, Laurent Romary, Eric Villemonte de la Clergerie, Djame Seddah, and Benoît Sagot. +11. `ALBERT `_ (from Google Research), released together with the paper a `ALBERT: A Lite BERT for Self-supervised Learning of Language Representations `_ by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut. .. toctree:: :maxdepth: 2 @@ -89,3 +92,5 @@ The library currently contains PyTorch and Tensorflow implementations, pre-train model_doc/roberta model_doc/distilbert model_doc/ctrl + model_doc/camembert + model_doc/albert diff --git a/docs/source/installation.md b/docs/source/installation.md index 11beb1ab3ae..6263f7604dd 100644 --- a/docs/source/installation.md +++ b/docs/source/installation.md @@ -24,15 +24,24 @@ pip install [--editable] . An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the [tests folder](https://github.com/huggingface/transformers/tree/master/transformers/tests) and examples tests in the [examples folder](https://github.com/huggingface/transformers/tree/master/examples). -Tests can be run using `pytest` (install pytest if needed with `pip install pytest`). +Tests can be run using `unittest` or `pytest` (install pytest if needed with `pip install pytest`). Run all the tests from the root of the cloned repository with the commands: +```bash +python -m unittest discover -s transformers/tests -p "*test.py" -t . +python -m unittest discover -s examples -p "*test.py" -t examples +``` + +or + ``` bash python -m pytest -sv ./transformers/tests/ python -m pytest -sv ./examples/ ``` +By default, slow tests are skipped. Set the `RUN_SLOW` environment variable to `yes` to run them. + ## OpenAI GPT original tokenization workflow If you want to reproduce the original tokenization process of the `OpenAI GPT` paper, you will need to install `ftfy` (use version 4.4.3 if you are using Python 2) and `SpaCy`: diff --git a/docs/source/main_classes/optimizer_schedules.rst b/docs/source/main_classes/optimizer_schedules.rst index ff0c9e6929c..22ed1b28fb0 100644 --- a/docs/source/main_classes/optimizer_schedules.rst +++ b/docs/source/main_classes/optimizer_schedules.rst @@ -5,6 +5,7 @@ The ``.optimization`` module provides: - an optimizer with weight decay fixed that can be used to fine-tuned models, and - several schedules in the form of schedule objects that inherit from ``_LRSchedule``: +- a gradient accumulation class to accumulate the gradients of multiple batches ``AdamW`` ~~~~~~~~~~~~~~~~ @@ -12,25 +13,32 @@ The ``.optimization`` module provides: .. autoclass:: transformers.AdamW :members: +``AdamWeightDecay`` +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.AdamWeightDecay + :members: + +.. autofunction:: transformers.create_optimizer + :members: + Schedules ---------------------------------------------------- Learning Rate Schedules ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: transformers.ConstantLRSchedule - :members: +.. autofunction:: transformers.get_constant_schedule -.. autoclass:: transformers.WarmupConstantSchedule - :members: +.. autofunction:: transformers.get_constant_schedule_with_warmup .. image:: /imgs/warmup_constant_schedule.png :target: /imgs/warmup_constant_schedule.png :alt: -.. autoclass:: transformers.WarmupCosineSchedule +.. autofunction:: transformers.get_cosine_schedule_with_warmup :members: .. image:: /imgs/warmup_cosine_schedule.png @@ -38,8 +46,7 @@ Learning Rate Schedules :alt: -.. autoclass:: transformers.WarmupCosineWithHardRestartsSchedule - :members: +.. autofunction:: transformers.get_cosine_with_hard_restarts_schedule_with_warmup .. image:: /imgs/warmup_cosine_hard_restarts_schedule.png :target: /imgs/warmup_cosine_hard_restarts_schedule.png @@ -47,9 +54,22 @@ Learning Rate Schedules -.. autoclass:: transformers.WarmupLinearSchedule - :members: +.. autofunction:: transformers.get_linear_schedule_with_warmup .. image:: /imgs/warmup_linear_schedule.png :target: /imgs/warmup_linear_schedule.png :alt: + +``Warmup`` +~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.Warmup + :members: + +Gradient Strategies +---------------------------------------------------- + +``GradientAccumulator`` +~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.GradientAccumulator diff --git a/docs/source/main_classes/processors.rst b/docs/source/main_classes/processors.rst index a85c1269564..46839ce67e6 100644 --- a/docs/source/main_classes/processors.rst +++ b/docs/source/main_classes/processors.rst @@ -54,5 +54,100 @@ Additionally, the following method can be used to load values from a data file Example usage ^^^^^^^^^^^^^^^^^^^^^^^^^ +An example using these processors is given in the `run_glue.py `__ script. + + +XNLI +~~~~~~~~~~~~~~~~~~~~~ + +`The Cross-Lingual NLI Corpus (XNLI) `__ is a benchmark that evaluates +the quality of cross-lingual text representations. +XNLI is crowd-sourced dataset based on `MultiNLI `: pairs of text are labeled with textual entailment +annotations for 15 different languages (including both high-ressource language such as English and low-ressource languages such as Swahili). + +It was released together with the paper +`XNLI: Evaluating Cross-lingual Sentence Representations `__ + +This library hosts the processor to load the XNLI data: + - :class:`~transformers.data.processors.utils.XnliProcessor` + +Please note that since the gold labels are available on the test set, evaluation is performed on the test set. + An example using these processors is given in the -`run_glue.py `__ script. \ No newline at end of file +`run_xnli.py `__ script. + + +SQuAD +~~~~~~~~~~~~~~~~~~~~~ + +`The Stanford Question Answering Dataset (SQuAD) `__ is a benchmark that evaluates +the performance of models on question answering. Two versions are available, v1.1 and v2.0. The first version (v1.1) was released together with the paper +`SQuAD: 100,000+ Questions for Machine Comprehension of Text `__. The second version (v2.0) was released alongside +the paper `Know What You Don't Know: Unanswerable Questions for SQuAD `__. + +This library hosts a processor for each of the two versions: + +Processors +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Those processors are: + - :class:`~transformers.data.processors.utils.SquadV1Processor` + - :class:`~transformers.data.processors.utils.SquadV2Processor` + +They both inherit from the abstract class :class:`~transformers.data.processors.utils.SquadProcessor` + +.. autoclass:: transformers.data.processors.squad.SquadProcessor + :members: + +Additionally, the following method can be used to convert SQuAD examples into :class:`~transformers.data.processors.utils.SquadFeatures` +that can be used as model inputs. + +.. automethod:: transformers.data.processors.squad.squad_convert_examples_to_features + +These processors as well as the aforementionned method can be used with files containing the data as well as with the `tensorflow_datasets` package. +Examples are given below. + + +Example usage +^^^^^^^^^^^^^^^^^^^^^^^^^ +Here is an example using the processors as well as the conversion method using data files: + +Example:: + + # Loading a V2 processor + processor = SquadV2Processor() + examples = processor.get_dev_examples(squad_v2_data_dir) + + # Loading a V1 processor + processor = SquadV1Processor() + examples = processor.get_dev_examples(squad_v1_data_dir) + + features = squad_convert_examples_to_features( + examples=examples, + tokenizer=tokenizer, + max_seq_length=max_seq_length, + doc_stride=args.doc_stride, + max_query_length=max_query_length, + is_training=not evaluate, + ) + +Using `tensorflow_datasets` is as easy as using a data file: + +Example:: + + # tensorflow_datasets only handle Squad V1. + tfds_examples = tfds.load("squad") + examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) + + features = squad_convert_examples_to_features( + examples=examples, + tokenizer=tokenizer, + max_seq_length=max_seq_length, + doc_stride=args.doc_stride, + max_query_length=max_query_length, + is_training=not evaluate, + ) + + +Another example using these processors is given in the +`run_squad.py `__ script. diff --git a/docs/source/migration.md b/docs/source/migration.md index 553a79c82b0..f50d1dff0a8 100644 --- a/docs/source/migration.md +++ b/docs/source/migration.md @@ -84,12 +84,12 @@ Here is a conversion examples from `BertAdam` with a linear warmup and decay sch # Parameters: lr = 1e-3 max_grad_norm = 1.0 -num_total_steps = 1000 +num_training_steps = 1000 num_warmup_steps = 100 -warmup_proportion = float(num_warmup_steps) / float(num_total_steps) # 0.1 +warmup_proportion = float(num_warmup_steps) / float(num_training_steps) # 0.1 ### Previously BertAdam optimizer was instantiated like this: -optimizer = BertAdam(model.parameters(), lr=lr, schedule='warmup_linear', warmup=warmup_proportion, t_total=num_total_steps) +optimizer = BertAdam(model.parameters(), lr=lr, schedule='warmup_linear', warmup=warmup_proportion, num_training_steps=num_training_steps) ### and used like this: for batch in train_data: loss = model(batch) @@ -98,12 +98,12 @@ for batch in train_data: ### In Transformers, optimizer and schedules are splitted and instantiated like this: optimizer = AdamW(model.parameters(), lr=lr, correct_bias=False) # To reproduce BertAdam specific behavior set correct_bias=False -scheduler = WarmupLinearSchedule(optimizer, warmup_steps=num_warmup_steps, t_total=num_total_steps) # PyTorch scheduler +scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps) # PyTorch scheduler ### and used like this: for batch in train_data: loss = model(batch) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm) # Gradient clipping is not in AdamW anymore (so you can use amp without issue) - scheduler.step() optimizer.step() + scheduler.step() ``` diff --git a/docs/source/model_doc/albert.rst b/docs/source/model_doc/albert.rst new file mode 100644 index 00000000000..92970c93284 --- /dev/null +++ b/docs/source/model_doc/albert.rst @@ -0,0 +1,64 @@ +ALBERT +---------------------------------------------------- + +``AlbrtConfig`` +~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.AlbertConfig + :members: + + +``AlbertTokenizer`` +~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.AlbertTokenizer + :members: + + +``AlbertModel`` +~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.AlbertModel + :members: + + +``AlbertForMaskedLM`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.AlbertForMaskedLM + :members: + + +``AlbertForSequenceClassification`` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.AlbertForSequenceClassification + :members: + + +``AlbertForQuestionAnswering`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.AlbertForQuestionAnswering + :members: + + +``TFAlbertModel`` +~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.TFAlbertModel + :members: + + +``TFAlbertForMaskedLM`` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.TFAlbertForMaskedLM + :members: + + +``TFAlbertForSequenceClassification`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.TFAlbertForSequenceClassification + :members: diff --git a/docs/source/model_doc/camembert.rst b/docs/source/model_doc/camembert.rst new file mode 100644 index 00000000000..82ca9de9456 --- /dev/null +++ b/docs/source/model_doc/camembert.rst @@ -0,0 +1,50 @@ +CamemBERT +---------------------------------------------------- + +``CamembertConfig`` +~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CamembertConfig + :members: + + +``CamembertTokenizer`` +~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CamembertTokenizer + :members: + + +``CamembertModel`` +~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CamembertModel + :members: + + +``CamembertForMaskedLM`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CamembertForMaskedLM + :members: + + +``CamembertForSequenceClassification`` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CamembertForSequenceClassification + :members: + + +``CamembertForMultipleChoice`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CamembertForMultipleChoice + :members: + + +``CamembertForTokenClassification`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: transformers.CamembertForTokenClassification + :members: diff --git a/docs/source/model_doc/ctrl.rst b/docs/source/model_doc/ctrl.rst index 9fd5b4acdbf..36b37b3ee12 100644 --- a/docs/source/model_doc/ctrl.rst +++ b/docs/source/model_doc/ctrl.rst @@ -1,6 +1,11 @@ CTRL ---------------------------------------------------- +Note: if you fine-tune a CTRL model using the Salesforce code (https://github.com/salesforce/ctrl), +you'll be able to convert from TF to our HuggingFace/Transformers format using the +``convert_tf_to_huggingface_pytorch.py`` script (see `issue #1654 `_). + + ``CTRLConfig`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/source/pretrained_models.rst b/docs/source/pretrained_models.rst index 1d02cd0dd76..775772e896a 100644 --- a/docs/source/pretrained_models.rst +++ b/docs/source/pretrained_models.rst @@ -61,6 +61,24 @@ Here is the full list of the currently provided pretrained models together with | | ``bert-base-german-dbmdz-uncased`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. | | | | | Trained on uncased German text by DBMDZ | | | | (see `details on dbmdz repository `__). | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``bert-base-japanese`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. | +| | | | Trained on Japanese text. Text is tokenized with MeCab and WordPiece. | +| | | | `MeCab `__ is required for tokenization. | +| | | (see `details on cl-tohoku repository `__). | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``bert-base-japanese-whole-word-masking`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. | +| | | | Trained on Japanese text using Whole-Word-Masking. Text is tokenized with MeCab and WordPiece. | +| | | | `MeCab `__ is required for tokenization. | +| | | (see `details on cl-tohoku repository `__). | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``bert-base-japanese-char`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. | +| | | | Trained on Japanese text. Text is tokenized into characters. | +| | | (see `details on cl-tohoku repository `__). | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``bert-base-japanese-char-whole-word-masking`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. | +| | | | Trained on Japanese text using Whole-Word-Masking. Text is tokenized into characters. | +| | | (see `details on cl-tohoku repository `__). | +-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ | GPT | ``openai-gpt`` | | 12-layer, 768-hidden, 12-heads, 110M parameters. | | | | | OpenAI GPT English model | @@ -73,6 +91,9 @@ Here is the full list of the currently provided pretrained models together with | +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ | | ``gpt2-large`` | | 36-layer, 1280-hidden, 20-heads, 774M parameters. | | | | | OpenAI's Large-sized GPT-2 English model | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``gpt2-xl`` | | 48-layer, 1600-hidden, 25-heads, 1558M parameters. | +| | | | OpenAI's XL-sized GPT-2 English model | +-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ | Transformer-XL | ``transfo-xl-wt103`` | | 18-layer, 1024-hidden, 16-heads, 257M parameters. | | | | | English model trained on wikitext-103 | @@ -124,6 +145,14 @@ Here is the full list of the currently provided pretrained models together with | | ``roberta-large-mnli`` | | 24-layer, 1024-hidden, 16-heads, 355M parameters | | | | | ``roberta-large`` fine-tuned on `MNLI `__. | | | | (see `details `__) | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``roberta-base-openai-detector`` | | 12-layer, 768-hidden, 12-heads, 125M parameters | +| | | | ``roberta-base`` fine-tuned by OpenAI on the outputs of the 1.5B-parameter GPT-2 model. | +| | | (see `details `__) | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``roberta-large-openai-detector`` | | 24-layer, 1024-hidden, 16-heads, 355M parameters | +| | | | ``roberta-large`` fine-tuned by OpenAI on the outputs of the 1.5B-parameter GPT-2 model. | +| | | (see `details `__) | +-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ | DistilBERT | ``distilbert-base-uncased`` | | 6-layer, 768-hidden, 12-heads, 66M parameters | | | | | The DistilBERT model distilled from the BERT model `bert-base-uncased` checkpoint | @@ -136,9 +165,58 @@ Here is the full list of the currently provided pretrained models together with | | ``distilgpt2`` | | 6-layer, 768-hidden, 12-heads, 82M parameters | | | | | The DistilGPT2 model distilled from the GPT2 model `gpt2` checkpoint. | | | | (see `details `__) | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``distilroberta-base`` | | 6-layer, 768-hidden, 12-heads, 82M parameters | +| | | | The DistilRoBERTa model distilled from the RoBERTa model `roberta-base` checkpoint. | +| | | (see `details `__) | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``distilbert-base-german-cased`` | | 6-layer, 768-hidden, 12-heads, 66M parameters | +| | | | The German DistilBERT model distilled from the German DBMDZ BERT model `bert-base-german-dbmdz-cased` checkpoint. | +| | | (see `details `__) | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``distilbert-base-multilingual-cased`` | | 6-layer, 768-hidden, 12-heads, 134M parameters | +| | | | The multilingual DistilBERT model distilled from the Multilingual BERT model `bert-base-multilingual-cased` checkpoint. | +| | | (see `details `__) | +-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ | CTRL | ``ctrl`` | | 48-layer, 1280-hidden, 16-heads, 1.6B parameters | | | | | Salesforce's Large-sized CTRL English model | +-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| CamemBERT | ``camembert-base`` | | 12-layer, 768-hidden, 12-heads, 110M parameters | +| | | | CamemBERT using the BERT-base architecture | +| | | (see `details `__) | ++-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| ALBERT | ``albert-base-v1`` | | 12 repeating layers, 128 embedding, 768-hidden, 12-heads, 11M parameters | +| | | | ALBERT base model | +| | | (see `details `__) | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``albert-large-v1`` | | 24 repeating layers, 128 embedding, 1024-hidden, 16-heads, 17M parameters | +| | | | ALBERT large model | +| | | (see `details `__) | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``albert-xlarge-v1`` | | 24 repeating layers, 128 embedding, 2048-hidden, 16-heads, 58M parameters | +| | | | ALBERT xlarge model | +| | | (see `details `__) | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``albert-xxlarge-v1`` | | 12 repeating layer, 128 embedding, 4096-hidden, 64-heads, 223M parameters | +| | | | ALBERT xxlarge model | +| | | (see `details `__) | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``albert-base-v2`` | | 12 repeating layers, 128 embedding, 768-hidden, 12-heads, 11M parameters | +| | | | ALBERT base model with no dropout, additional training data and longer training | +| | | (see `details `__) | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``albert-large-v2`` | | 24 repeating layers, 128 embedding, 1024-hidden, 16-heads, 17M parameters | +| | | | ALBERT large model with no dropout, additional training data and longer training | +| | | (see `details `__) | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``albert-xlarge-v2`` | | 24 repeating layers, 128 embedding, 2048-hidden, 16-heads, 58M parameters | +| | | | ALBERT xlarge model with no dropout, additional training data and longer training | +| | | (see `details `__) | +| +------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ +| | ``albert-xxlarge-v2`` | | 12 repeating layer, 128 embedding, 4096-hidden, 64-heads, 223M parameters | +| | | | ALBERT xxlarge model with no dropout, additional training data and longer training | +| | | (see `details `__) | ++-------------------+------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------+ -.. `__ \ No newline at end of file + +.. `__ diff --git a/docs/source/quickstart.md b/docs/source/quickstart.md index ccba75e7c07..530aff8eb01 100644 --- a/docs/source/quickstart.md +++ b/docs/source/quickstart.md @@ -188,3 +188,35 @@ assert predicted_text == 'Who was Jim Henson? Jim Henson was a man' ``` Examples for each model class of each model architecture (Bert, GPT, GPT-2, Transformer-XL, XLNet and XLM) can be found in the [documentation](#documentation). + +#### Using the past + +GPT-2 as well as some other models (GPT, XLNet, Transfo-XL, CTRL) make use of a `past` or `mems` attribute which can be used to prevent re-computing the key/value pairs when using sequential decoding. It is useful when generating sequences as a big part of the attention mechanism benefits from previous computations. + +Here is a fully-working example using the `past` with `GPT2LMHeadModel` and argmax decoding (which should only be used as an example, as argmax decoding introduces a lot of repetition): + +```python +from transformers import GPT2LMHeadModel, GPT2Tokenizer +import torch + +tokenizer = GPT2Tokenizer.from_pretrained("gpt2") +model = GPT2LMHeadModel.from_pretrained('gpt2') + +generated = tokenizer.encode("The Manhattan bridge") +context = torch.tensor([generated]) +past = None + +for i in range(100): + print(i) + output, past = model(context, past=past) + token = torch.argmax(output[0, :]) + + generated += [token.tolist()] + context = token.unsqueeze(0) + +sequence = tokenizer.decode(generated) + +print(sequence) +``` + +The model only requires a single token as input as all the previous tokens' key/value pairs are contained in the `past`. \ No newline at end of file diff --git a/docs/source/serialization.rst b/docs/source/serialization.rst index c948104d69f..d2862dc0b50 100644 --- a/docs/source/serialization.rst +++ b/docs/source/serialization.rst @@ -106,7 +106,7 @@ This section explain how you can save and re-load a fine-tuned model (BERT, GPT, There are three types of files you need to save to be able to reload a fine-tuned model: -* the model it-self which should be saved following PyTorch serialization `best practices `__\ , +* the model itself which should be saved following PyTorch serialization `best practices `__\ , * the configuration file of the model which is saved as a JSON file, and * the vocabulary (and the merges for the BPE-based models GPT and GPT-2). diff --git a/examples/README.md b/examples/README.md index 6b68d880eb2..b6b39088108 100644 --- a/examples/README.md +++ b/examples/README.md @@ -3,15 +3,27 @@ In this section a few examples are put together. All of these examples work for several models, making use of the very similar API between the different models. +**Important** +To run the latest versions of the examples, you have to install from source and install some specific requirements for the examples. +Execute the following steps in a new virtual environment: + +```bash +git clone https://github.com/huggingface/transformers +cd transformers +pip install [--editable] . +pip install -r ./examples/requirements.txt +``` + | Section | Description | |----------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------| | [TensorFlow 2.0 models on GLUE](#TensorFlow-2.0-Bert-models-on-GLUE) | Examples running BERT TensorFlow 2.0 model on the GLUE tasks. | [Language Model fine-tuning](#language-model-fine-tuning) | Fine-tuning the library models for language modeling on a text dataset. Causal language modeling for GPT/GPT-2, masked language modeling for BERT/RoBERTa. | | [Language Generation](#language-generation) | Conditional text generation using the auto-regressive models of the library: GPT, GPT-2, Transformer-XL and XLNet. | | [GLUE](#glue) | Examples running BERT/XLM/XLNet/RoBERTa on the 9 GLUE tasks. Examples feature distributed training as well as half-precision. | -| [SQuAD](#squad) | Using BERT/XLM/XLNet/RoBERTa for question answering, examples with distributed training. | +| [SQuAD](#squad) | Using BERT/RoBERTa/XLNet/XLM for question answering, examples with distributed training. | | [Multiple Choice](#multiple-choice) | Examples running BERT/XLNet/RoBERTa on the SWAG/RACE/ARC tasks. | [Named Entity Recognition](#named-entity-recognition) | Using BERT for Named Entity Recognition (NER) on the CoNLL 2003 dataset, examples with distributed training. | +| [XNLI](#xnli) | Examples running BERT/XLM on the XNLI benchmark. | ## TensorFlow 2.0 Bert models on GLUE @@ -101,7 +113,7 @@ python run_lm_finetuning.py \ Based on the script [`run_generation.py`](https://github.com/huggingface/transformers/blob/master/examples/run_generation.py). -Conditional text generation using the auto-regressive models of the library: GPT, GPT-2, Transformer-XL and XLNet. +Conditional text generation using the auto-regressive models of the library: GPT, GPT-2, Transformer-XL, XLNet, CTRL. A similar script is used for our official demo [Write With Transfomer](https://transformer.huggingface.co), where you can try out the different models available in the library. @@ -411,12 +423,51 @@ f1 = 93.15 exact_match = 86.91 ``` -This fine-tuneds model is available as a checkpoint under the reference +This fine-tuned model is available as a checkpoint under the reference `bert-large-uncased-whole-word-masking-finetuned-squad`. +#### Fine-tuning XLNet on SQuAD + +This example code fine-tunes XLNet on the SQuAD dataset. See above to download the data for SQuAD . + +```bash +export SQUAD_DIR=/path/to/SQUAD + +python /data/home/hlu/transformers/examples/run_squad.py \ + --model_type xlnet \ + --model_name_or_path xlnet-large-cased \ + --do_train \ + --do_eval \ + --do_lower_case \ + --train_file /data/home/hlu/notebooks/NLP/examples/question_answering/train-v1.1.json \ + --predict_file /data/home/hlu/notebooks/NLP/examples/question_answering/dev-v1.1.json \ + --learning_rate 3e-5 \ + --num_train_epochs 2 \ + --max_seq_length 384 \ + --doc_stride 128 \ + --output_dir ./wwm_cased_finetuned_squad/ \ + --per_gpu_eval_batch_size=4 \ + --per_gpu_train_batch_size=4 \ + --save_steps 5000 +``` + +Training with the previously defined hyper-parameters yields the following results: + +```python +{ +"exact": 85.45884578997162, +"f1": 92.5974600601065, +"total": 10570, +"HasAns_exact": 85.45884578997162, +"HasAns_f1": 92.59746006010651, +"HasAns_total": 10570 +} +``` + ## Named Entity Recognition -Based on the script [`run_ner.py`](https://github.com/huggingface/transformers/blob/master/examples/run_ner.py). +Based on the scripts [`run_ner.py`](https://github.com/huggingface/transformers/blob/master/examples/run_ner.py) for Pytorch and +[`run_tf_ner.py`(https://github.com/huggingface/transformers/blob/master/examples/run_tf_ner.py)] for Tensorflow 2. This example fine-tune Bert Multilingual on GermEval 2014 (German NER). Details and results for the fine-tuning provided by @stefan-it. @@ -461,7 +512,7 @@ The GermEval 2014 dataset has much more labels than CoNLL-2002/2003 datasets, so cat train.txt dev.txt test.txt | cut -d " " -f 2 | grep -v "^$"| sort | uniq > labels.txt ``` -### Training +### Prepare the run Additional environment variables must be set: @@ -473,6 +524,8 @@ export SAVE_STEPS=750 export SEED=1 ``` +### Run the Pytorch version + To start training, just run: ```bash @@ -493,7 +546,7 @@ python3 run_ner.py --data_dir ./ \ If your GPU supports half-precision training, just add the `--fp16` flag. After training, the model will be both evaluated on development and test datasets. -### Evaluation +#### Evaluation Evaluation on development dataset outputs the following for our example: @@ -514,3 +567,119 @@ On the test dataset the following results could be achieved: 10/04/2019 00:42:42 - INFO - __main__ - precision = 0.8604651162790697 10/04/2019 00:42:42 - INFO - __main__ - recall = 0.8624150210424085 ``` + +#### Comparing BERT (large, cased), RoBERTa (large, cased) and DistilBERT (base, uncased) + +Here is a small comparison between BERT (large, cased), RoBERTa (large, cased) and DistilBERT (base, uncased) with the same hyperparameters as specified in the [example documentation](https://huggingface.co/transformers/examples.html#named-entity-recognition) (one run): + +| Model | F-Score Dev | F-Score Test +| --------------------------------- | ------- | -------- +| `bert-large-cased` | 95.59 | 91.70 +| `roberta-large` | 95.96 | 91.87 +| `distilbert-base-uncased` | 94.34 | 90.32 + +### Run the Tensorflow 2 version + +To start training, just run: + +```bash +python3 run_tf_ner.py --data_dir ./ \ +--model_type bert \ +--labels ./labels.txt \ +--model_name_or_path $BERT_MODEL \ +--output_dir $OUTPUT_DIR \ +--max_seq_length $MAX_LENGTH \ +--num_train_epochs $NUM_EPOCHS \ +--per_device_train_batch_size $BATCH_SIZE \ +--save_steps $SAVE_STEPS \ +--seed $SEED \ +--do_train \ +--do_eval \ +--do_predict +``` + +Such as the Pytorch version, if your GPU supports half-precision training, just add the `--fp16` flag. After training, the model will be both evaluated on development and test datasets. + +#### Evaluation + +Evaluation on development dataset outputs the following for our example: +```bash + precision recall f1-score support + + LOCderiv 0.7619 0.6154 0.6809 52 + PERpart 0.8724 0.8997 0.8858 4057 + OTHpart 0.9360 0.9466 0.9413 711 + ORGpart 0.7015 0.6989 0.7002 269 + LOCpart 0.7668 0.8488 0.8057 496 + LOC 0.8745 0.9191 0.8963 235 + ORGderiv 0.7723 0.8571 0.8125 91 + OTHderiv 0.4800 0.6667 0.5581 18 + OTH 0.5789 0.6875 0.6286 16 + PERderiv 0.5385 0.3889 0.4516 18 + PER 0.5000 0.5000 0.5000 2 + ORG 0.0000 0.0000 0.0000 3 + +micro avg 0.8574 0.8862 0.8715 5968 +macro avg 0.8575 0.8862 0.8713 5968 +``` + +On the test dataset the following results could be achieved: +```bash + precision recall f1-score support + + PERpart 0.8847 0.8944 0.8896 9397 + OTHpart 0.9376 0.9353 0.9365 1639 + ORGpart 0.7307 0.7044 0.7173 697 + LOC 0.9133 0.9394 0.9262 561 + LOCpart 0.8058 0.8157 0.8107 1150 + ORG 0.0000 0.0000 0.0000 8 + OTHderiv 0.5882 0.4762 0.5263 42 + PERderiv 0.6571 0.5227 0.5823 44 + OTH 0.4906 0.6667 0.5652 39 + ORGderiv 0.7016 0.7791 0.7383 172 + LOCderiv 0.8256 0.6514 0.7282 109 + PER 0.0000 0.0000 0.0000 11 + +micro avg 0.8722 0.8774 0.8748 13869 +macro avg 0.8712 0.8774 0.8740 13869 +``` + +## XNLI + +Based on the script [`run_xnli.py`](https://github.com/huggingface/transformers/blob/master/examples/run_xnli.py). + +[XNLI](https://www.nyu.edu/projects/bowman/xnli/) is crowd-sourced dataset based on [MultiNLI](http://www.nyu.edu/projects/bowman/multinli/). It is an evaluation benchmark for cross-lingual text representations. Pairs of text are labeled with textual entailment annotations for 15 different languages (including both high-ressource language such as English and low-ressource languages such as Swahili). + +#### Fine-tuning on XNLI + +This example code fine-tunes mBERT (multi-lingual BERT) on the XNLI dataset. It runs in 106 mins +on a single tesla V100 16GB. The data for XNLI can be downloaded with the following links and should be both saved (and un-zipped) in a +`$XNLI_DIR` directory. + +* [XNLI 1.0](https://www.nyu.edu/projects/bowman/xnli/XNLI-1.0.zip) +* [XNLI-MT 1.0](https://www.nyu.edu/projects/bowman/xnli/XNLI-MT-1.0.zip) + +```bash +export XNLI_DIR=/path/to/XNLI + +python run_xnli.py \ + --model_type bert \ + --model_name_or_path bert-base-multilingual-cased \ + --language de \ + --train_language en \ + --do_train \ + --do_eval \ + --data_dir $XNLI_DIR \ + --per_gpu_train_batch_size 32 \ + --learning_rate 5e-5 \ + --num_train_epochs 2.0 \ + --max_seq_length 128 \ + --output_dir /tmp/debug_xnli/ \ + --save_steps -1 +``` + +Training with the previously defined hyper-parameters yields the following results on the **test** set: + +```bash +acc = 0.7093812375249501 +``` diff --git a/examples/benchmarks.py b/examples/benchmarks.py index b1153bf5665..26c260b9ec7 100644 --- a/examples/benchmarks.py +++ b/examples/benchmarks.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. """ Benchmarking the library on inference and training """ -import tensorflow as tf # If checking the tensors placement # tf.debugging.set_log_device_placement(True) @@ -23,15 +22,18 @@ from typing import List import timeit from transformers import is_tf_available, is_torch_available from time import time -import torch - import argparse import csv -if not is_torch_available() or not is_tf_available(): - raise ImportError("TensorFlow and Pytorch should be installed on the system.") +if is_tf_available(): + import tensorflow as tf + from transformers import TFAutoModel -from transformers import AutoConfig, AutoModel, AutoTokenizer, TFAutoModel +if is_torch_available(): + import torch + from transformers import AutoModel + +from transformers import AutoConfig, AutoTokenizer input_text = """Bent over their instruments, three hundred Fertilizers were plunged, as the Director of Hatcheries and Conditioning entered the room, in the @@ -251,18 +253,22 @@ def create_setup_and_compute(model_names: List[str], average_over: int = 3, torchscript: bool = False, xla: bool = False, + amp: bool = False, + fp16: bool = False, save_to_csv: bool = False, csv_filename: str = f"results_{round(time())}.csv"): if xla: tf.config.optimizer.set_jit(True) + if amp: + tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True}) if tensorflow: dictionary = {model_name: {} for model_name in model_names} - results = _compute_tensorflow(model_names, dictionary, average_over) + results = _compute_tensorflow(model_names, dictionary, average_over, amp) else: device = 'cuda' if (gpu and torch.cuda.is_available()) else 'cpu' dictionary = {model_name: {} for model_name in model_names} - results = _compute_pytorch(model_names, dictionary, average_over, device, torchscript) + results = _compute_pytorch(model_names, dictionary, average_over, device, torchscript, fp16) print("=========== RESULTS ===========") for model_name in model_names: @@ -300,14 +306,14 @@ def create_setup_and_compute(model_names: List[str], writer.writerow({'model': model_name, **model_results}) -def _compute_pytorch(model_names, dictionary, average_over, device, torchscript): +def _compute_pytorch(model_names, dictionary, average_over, device, torchscript, fp16): for c, model_name in enumerate(model_names): print(f"{c + 1} / {len(model_names)}") config = AutoConfig.from_pretrained(model_name, torchscript=torchscript) model = AutoModel.from_pretrained(model_name, config=config) tokenizer = AutoTokenizer.from_pretrained(model_name) - tokenized_sequence = tokenizer.encode(input_text) + tokenized_sequence = tokenizer.encode(input_text, add_special_tokens=False) max_input_size = tokenizer.max_model_input_sizes[model_name] batch_sizes = [1, 2, 4, 8] @@ -317,6 +323,8 @@ def _compute_pytorch(model_names, dictionary, average_over, device, torchscript) dictionary[model_name]["results"] = {i: {} for i in batch_sizes} for batch_size in batch_sizes: + if fp16: + model.half() model.to(device) model.eval() for slice_size in slice_sizes: @@ -344,14 +352,14 @@ def _compute_pytorch(model_names, dictionary, average_over, device, torchscript) return dictionary -def _compute_tensorflow(model_names, dictionary, average_over): +def _compute_tensorflow(model_names, dictionary, average_over, amp): for c, model_name in enumerate(model_names): print(f"{c + 1} / {len(model_names)}") config = AutoConfig.from_pretrained(model_name) model = TFAutoModel.from_pretrained(model_name, config=config) tokenizer = AutoTokenizer.from_pretrained(model_name) - tokenized_sequence = tokenizer.encode(input_text) + tokenized_sequence = tokenizer.encode(input_text, add_special_tokens=False) max_input_size = tokenizer.max_model_input_sizes[model_name] batch_sizes = [1, 2, 4, 8] @@ -407,6 +415,8 @@ def main(): "the correct dependencies are " "installed") parser.add_argument("--xla", required=False, action="store_true", help="TensorFlow only: use XLA acceleration.") + parser.add_argument("--amp", required=False, action="store_true", help="TensorFlow only: use automatic mixed precision acceleration.") + parser.add_argument("--fp16", required=False, action="store_true", help="PyTorch only: use FP16 to accelerate inference.") parser.add_argument("--keras_predict", required=False, action="store_true", help="Whether to use model.predict " "instead of model() to do a " "forward pass.") @@ -434,26 +444,33 @@ def main(): print("Running with arguments", args) if args.torch: - create_setup_and_compute( - model_names=args.models, - tensorflow=False, - gpu=args.torch_cuda, - torchscript=args.torchscript, - save_to_csv=args.save_to_csv, - csv_filename=args.csv_filename, - average_over=args.average_over - ) + if is_torch_available(): + create_setup_and_compute( + model_names=args.models, + tensorflow=False, + gpu=args.torch_cuda, + torchscript=args.torchscript, + fp16=args.fp16, + save_to_csv=args.save_to_csv, + csv_filename=args.csv_filename, + average_over=args.average_over + ) + else: + raise ImportError("Trying to run a PyTorch benchmark but PyTorch was not found in the environment.") if args.tensorflow: - create_setup_and_compute( - model_names=args.models, - tensorflow=True, - xla=args.xla, - save_to_csv=args.save_to_csv, - csv_filename=args.csv_filename, - average_over=args.average_over - ) - + if is_tf_available(): + create_setup_and_compute( + model_names=args.models, + tensorflow=True, + xla=args.xla, + amp=args.amp, + save_to_csv=args.save_to_csv, + csv_filename=args.csv_filename, + average_over=args.average_over + ) + else: + raise ImportError("Trying to run a TensorFlow benchmark but TensorFlow was not found in the environment.") if __name__ == '__main__': main() diff --git a/examples/contrib/run_camembert.py b/examples/contrib/run_camembert.py new file mode 100644 index 00000000000..28144d51670 --- /dev/null +++ b/examples/contrib/run_camembert.py @@ -0,0 +1,48 @@ +from pathlib import Path +import tarfile +import urllib.request + +import torch + +from transformers.tokenization_camembert import CamembertTokenizer +from transformers.modeling_camembert import CamembertForMaskedLM + + +def fill_mask(masked_input, model, tokenizer, topk=5): + # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py + assert masked_input.count('') == 1 + input_ids = torch.tensor(tokenizer.encode(masked_input, add_special_tokens=True)).unsqueeze(0) # Batch size 1 + logits = model(input_ids)[0] # The last hidden-state is the first element of the output tuple + masked_index = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() + logits = logits[0, masked_index, :] + prob = logits.softmax(dim=0) + values, indices = prob.topk(k=topk, dim=0) + topk_predicted_token_bpe = ' '.join([tokenizer.convert_ids_to_tokens(indices[i].item()) + for i in range(len(indices))]) + masked_token = tokenizer.mask_token + topk_filled_outputs = [] + for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ')): + predicted_token = predicted_token_bpe.replace('\u2581', ' ') + if " {0}".format(masked_token) in masked_input: + topk_filled_outputs.append(( + masked_input.replace( + ' {0}'.format(masked_token), predicted_token + ), + values[index].item(), + predicted_token, + )) + else: + topk_filled_outputs.append(( + masked_input.replace(masked_token, predicted_token), + values[index].item(), + predicted_token, + )) + return topk_filled_outputs + + +tokenizer = CamembertTokenizer.from_pretrained('camembert-base') +model = CamembertForMaskedLM.from_pretrained('camembert-base') +model.eval() + +masked_input = "Le camembert est :)" +print(fill_mask(masked_input, model, tokenizer, topk=3)) diff --git a/examples/contrib/run_openai_gpt.py b/examples/contrib/run_openai_gpt.py index 661c1c305b7..2d165a91e32 100644 --- a/examples/contrib/run_openai_gpt.py +++ b/examples/contrib/run_openai_gpt.py @@ -41,7 +41,7 @@ from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, from transformers import (OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, AdamW, cached_path, WEIGHTS_NAME, CONFIG_NAME, - WarmupLinearSchedule) + get_linear_schedule_with_warmup) ROCSTORIES_URL = "https://s3.amazonaws.com/datasets.huggingface.co/ROCStories.tar.gz" @@ -211,7 +211,7 @@ def main(): {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) - scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.do_train: nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None @@ -237,7 +237,7 @@ def main(): # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer - model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self + model_to_save = model.module if hasattr(model, 'module') else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) diff --git a/examples/contrib/run_swag.py b/examples/contrib/run_swag.py index 8494c5fad9a..5de93db7fe8 100644 --- a/examples/contrib/run_swag.py +++ b/examples/contrib/run_swag.py @@ -42,7 +42,7 @@ from tqdm import tqdm, trange from transformers import (WEIGHTS_NAME, BertConfig, BertForMultipleChoice, BertTokenizer) -from transformers import AdamW, WarmupLinearSchedule +from transformers import AdamW, get_linear_schedule_with_warmup logger = logging.getLogger(__name__) @@ -322,7 +322,7 @@ def train(args, train_dataset, model, tokenizer): {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) - scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp diff --git a/examples/distillation/README.md b/examples/distillation/README.md index 0fbcb5628b5..24a1677db1e 100644 --- a/examples/distillation/README.md +++ b/examples/distillation/README.md @@ -1,25 +1,53 @@ # Distil* -This folder contains the original code used to train Distil* as well as examples showcasing how to use DistilBERT and DistilGPT2. +This folder contains the original code used to train Distil* as well as examples showcasing how to use DistilBERT, DistilRoBERTa and DistilGPT2. -**2019, October 3rd - Update** We release our [NeurIPS workshop paper](https://arxiv.org/abs/1910.01108) explaining our approach on **DistilBERT**. It includes updated results and further experiments. We applied the same method to GPT2 and release the weights of **DistilGPT2**. DistilGPT2 is two times faster and 33% smaller than GPT2. **The paper superseeds our [previous blogpost](https://medium.com/huggingface/distilbert-8cf3380435b5) with a different distillation loss and better performances. Please use the paper as a reference when comparing/reporting results on DistilBERT.** +**December 6th, 2019 - Update** We release **DistilmBERT**: 92% of `bert-base-multilingual-cased` on XNLI. The model supports 104 different languages listed [here](https://github.com/google-research/bert/blob/master/multilingual.md#list-of-languages). + +**November 19th, 2019 - Update** We release German **DistilBERT**: 98.8% of `bert-base-german-dbmdz-cased` on NER tasks. + +**October 23rd, 2019 - Update** We release **DistilRoBERTa**: 95% of `RoBERTa-base`'s performance on GLUE, twice as fast as RoBERTa while being 35% smaller. + +**October 3rd, 2019 - Update** We release our [NeurIPS workshop paper](https://arxiv.org/abs/1910.01108) explaining our approach on **DistilBERT**. It includes updated results and further experiments. We applied the same method to GPT2 and release the weights of **DistilGPT2**. DistilGPT2 is two times faster and 33% smaller than GPT2. **The paper superseeds our [previous blogpost](https://medium.com/huggingface/distilbert-8cf3380435b5) with a different distillation loss and better performances. Please use the paper as a reference when comparing/reporting results on DistilBERT.** + +**September 19th, 2019 - Update:** We fixed bugs in the code and released an upadted version of the weights trained with a modification of the distillation loss. DistilBERT now reaches 97% of `BERT-base`'s performance on GLUE, and 86.9 F1 score on SQuAD v1.1 dev set (compared to 88.5 for `BERT-base`). We will publish a formal write-up of our approach in the near future! -**2019, September 19th - Update:** We fixed bugs in the code and released an upadted version of the weights trained with a modification of the distillation loss. DistilBERT now reaches 97% of `BERT-base`'s performance on GLUE, and 86.9 F1 score on SQuAD v1.1 dev set (compared to 88.5 for `BERT-base`). We will publish a formal write-up of our approach in the near future! ## What is Distil* Distil* is a class of compressed models that started with DistilBERT. DistilBERT stands for Distillated-BERT. DistilBERT is a small, fast, cheap and light Transformer model based on Bert architecture. It has 40% less parameters than `bert-base-uncased`, runs 60% faster while preserving 97% of BERT's performances as measured on the GLUE language understanding benchmark. DistilBERT is trained using knowledge distillation, a technique to compress a large model called the teacher into a smaller model called the student. By distillating Bert, we obtain a smaller Transformer model that bears a lot of similarities with the original BERT model while being lighter, smaller and faster to run. DistilBERT is thus an interesting option to put large-scaled trained Transformer model into production. -We have applied the same method to GPT2 and release the weights of the compressed model. On the [WikiText-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/) benchmark, GPT2 reaches a perplexity on the test set of 15.0 compared to 18.5 for DistilGPT2 (after fine-tuning on the train set). +We have applied the same method to other Transformer architectures and released the weights: +- GPT2: on the [WikiText-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/) benchmark, GPT2 reaches a perplexity on the test set of 15.0 compared to 18.5 for **DistilGPT2** (after fine-tuning on the train set). +- RoBERTa: **DistilRoBERTa** reaches 95% of `RoBERTa-base`'s performance on GLUE while being twice faster and 35% smaller. +- German BERT: **German DistilBERT** reaches 99% of `bert-base-german-dbmdz-cased`'s performance on German NER (CoNLL-2003). +- Multilingual BERT: **DistilmBERT** reaches 92% of Multilingual BERT's performance on XNLI while being twice faster and 25% smaller. The model supports 104 languages listed [here](https://github.com/google-research/bert/blob/master/multilingual.md#list-of-languages). For more information on DistilBERT, please refer to our [NeurIPS workshop paper](https://arxiv.org/abs/1910.01108). Here are the results on the dev sets of GLUE: -| Model | Macro-score | CoLA | MNLI | MRPC | QNLI | QQP | RTE | SST-2| STS-B| WNLI | -| :---: | :---: | :---:| :---:| :---:| :---:| :---:| :---:| :---:| :---:| :---:| -| BERT-base | **77.6** | 48.9 | 84.3 | 88.6 | 89.3 | 89.5 | 71.3 | 91.7 | 91.2 | 43.7 | -| DistilBERT | **76.8** | 49.1 | 81.8 | 90.2 | 90.2 | 89.2 | 62.9 | 92.7 | 90.7 | 44.4 | +| Model | Macro-score | CoLA | MNLI | MRPC | QNLI | QQP | RTE | SST-2| STS-B| WNLI | +| :---: | :---: | :---:| :---:| :---:| :---:| :---:| :---:| :---:| :---:| :---: | +| BERT-base | **77.6** | 48.9 | 84.3 | 88.6 | 89.3 | 89.5 | 71.3 | 91.7 | 91.2 | 43.7 | +| DistilBERT | **76.8** | 49.1 | 81.8 | 90.2 | 90.2 | 89.2 | 62.9 | 92.7 | 90.7 | 44.4 | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | +| RoBERTa-base (reported) | **83.2**/**86.4**2 | 63.6 | 87.6 | 90.2 | 92.8 | 91.9 | 78.7 | 94.8 | 91.2 | 57.73 | +| DistilRoBERTa1 | **79.0**/**82.3**2 | 59.4 | 83.9 | 86.6 | 90.8 | 89.4 | 67.9 | 92.5 | 88.3 | 52.1 | + +1 We did not use the MNLI checkpoint for fine-tuning but directy perform transfer learning on the pre-trained DistilRoBERTa. + +2 Macro-score computed without WNLI. + +3 We compute this score ourselves for completeness. + +Here are the results on the *test* sets for 6 of the languages available in XNLI. The results are computed in the zero shot setting (trained on the English portion and evaluated on the target language portion): + +| Model | English | Spanish | Chinese | German | Arabic | Urdu | +| :---: | :---: | :---: | :---: | :---: | :---: | :---:| +| mBERT base cased (computed) | 82.1 | 74.6 | 69.1 | 72.3 | 66.4 | 58.5 | +| mBERT base uncased (reported)| 81.4 | 74.3 | 63.8 | 70.5 | 62.1 | 58.3 | +| DistilmBERT | 78.2 | 69.1 | 64.0 | 66.3 | 59.1 | 54.7 | ## Setup @@ -27,14 +55,17 @@ This part of the library has only be tested with Python3.6+. There are few speci **Important note:** The training scripts have been updated to support PyTorch v1.2.0 (there are breakings changes compared to v1.1.0). + ## How to use DistilBERT -Transformers includes two pre-trained Distil* models, currently only provided for English (we are investigating the possibility to train and release a multilingual version of DistilBERT): +Transformers includes five pre-trained Distil* models, currently only provided for English and German (we are investigating the possibility to train and release a multilingual version of DistilBERT): - `distilbert-base-uncased`: DistilBERT English language model pretrained on the same data used to pretrain Bert (concatenation of the Toronto Book Corpus and full English Wikipedia) using distillation with the supervision of the `bert-base-uncased` version of Bert. The model has 6 layers, 768 dimension and 12 heads, totalizing 66M parameters. - `distilbert-base-uncased-distilled-squad`: A finetuned version of `distilbert-base-uncased` finetuned using (a second step of) knwoledge distillation on SQuAD 1.0. This model reaches a F1 score of 86.9 on the dev set (for comparison, Bert `bert-base-uncased` version reaches a 88.5 F1 score). -- `distilgpt2`: DistilGPT2 English language model pretrained with the supervision of `gpt2` (the smallest version of GPT2) on [OpenWebTextCorpus](https://skylion007.github.io/OpenWebTextCorpus/), a reproduction of OpenAI's WebText dataset and . The model has 6 layers, 768 dimension and 12 heads, totalizing 82M (compared to 124M parameters for GPT2). On average, DistilGPT2 is two times faster than GPT2. -- and more to come! 🤗🤗🤗 +- `distilbert-base-german-cased`: DistilBERT German language model pretrained on 1/2 of the data used to pretrain Bert using distillation with the supervision of the `bert-base-german-dbmdz-cased` version of German DBMDZ Bert. For NER tasks the model reaches a F1 score of 83.49 on the CoNLL-2003 test set (for comparison, `bert-base-german-dbmdz-cased` reaches a 84.52 F1 score), and a F1 score of 85.23 on the GermEval 2014 test set (`bert-base-german-dbmdz-cased` reaches a 86.89 F1 score). +- `distilgpt2`: DistilGPT2 English language model pretrained with the supervision of `gpt2` (the smallest version of GPT2) on [OpenWebTextCorpus](https://skylion007.github.io/OpenWebTextCorpus/), a reproduction of OpenAI's WebText dataset. The model has 6 layers, 768 dimension and 12 heads, totalizing 82M parameters (compared to 124M parameters for GPT2). On average, DistilGPT2 is two times faster than GPT2. +- `distilroberta-base`: DistilRoBERTa English language model pretrained with the supervision of `roberta-base` solely on [OpenWebTextCorpus](https://skylion007.github.io/OpenWebTextCorpus/), a reproduction of OpenAI's WebText dataset (it is ~4 times less training data than the teacher RoBERTa). The model has 6 layers, 768 dimension and 12 heads, totalizing 82M parameters (compared to 125M parameters for RoBERTa-base). On average DistilRoBERTa is twice as fast as Roberta-base. +- `distilbert-base-multilingual-cased`: DistilmBERT multilingual model pretrained with the supervision of `bert-base-multilingual-cased` on the concatenation of Wikipedia in 104 different languages. The model supports the 104 languages listed [here](https://github.com/google-research/bert/blob/master/multilingual.md#list-of-languages). The model has 6 layers, 768 dimension and 12 heads, totalizing 134M parameters (compared to 177M parameters for mBERT-base). On average DistilmBERT is twice as fast as mBERT-base. Using DistilBERT is very similar to using BERT. DistilBERT share the same tokenizer as BERT's `bert-base-uncased` even though we provide a link to this tokenizer under the `DistilBertTokenizer` name to have a consistent naming between the library models. @@ -47,7 +78,11 @@ outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple ``` -Similarly, using DistilGPT2 simply consists in calling the GPT2 classes from a different pretrained checkpoint: `model = GPT2Model.from_pretrained('distilgpt2')`. +Similarly, using the other Distil* models simply consists in calling the base classes with a different pretrained checkpoint: +- DistilGPT2: `model = GPT2Model.from_pretrained('distilgpt2')` +- DistilRoBERTa: `model = RobertaModel.from_pretrained('distilroberta-base')` +- DistilmBERT: `model = DistilBertModel.from_pretrained('distilbert-base-multilingual-cased')` + ## How to train Distil* @@ -88,7 +123,7 @@ python train.py \ --student_config training_configs/distilbert-base-uncased.json \ --teacher_type bert \ --teacher_name bert-base-uncased \ - --alpha_ce 5.0 --alpha_mlm 2.0 --alpha_cos 1.0 --mlm \ + --alpha_ce 5.0 --alpha_mlm 2.0 --alpha_cos 1.0 --alpha_clm 0.0 --mlm \ --freeze_pos_embs \ --dump_path serialization_dir/my_first_training \ --data_file data/binarized_text.bert-base-uncased.pickle \ @@ -124,7 +159,7 @@ python -m torch.distributed.launch \ --student_config training_configs/distilbert-base-uncased.json \ --teacher_type bert \ --teacher_name bert-base-uncased \ - --alpha_ce 0.33 --alpha_mlm 0.33 --alpha_cos 0.33 --mlm \ + --alpha_ce 0.33 --alpha_mlm 0.33 --alpha_cos 0.33 --alpha_clm 0.0 --mlm \ --freeze_pos_embs \ --dump_path serialization_dir/my_first_training \ --data_file data/binarized_text.bert-base-uncased.pickle \ @@ -146,4 +181,4 @@ If you find the ressource useful, you should cite the following paper: booktitle={NeurIPS EMC^2 Workshop}, year={2019} } -``` \ No newline at end of file +``` diff --git a/examples/distillation/distiller.py b/examples/distillation/distiller.py index d51bdae77fe..1e33190acad 100644 --- a/examples/distillation/distiller.py +++ b/examples/distillation/distiller.py @@ -21,7 +21,6 @@ import psutil import time from tqdm import trange, tqdm import numpy as np -import psutil import torch import torch.nn as nn @@ -35,7 +34,7 @@ try: except: from tensorboardX import SummaryWriter -from transformers import WarmupLinearSchedule +from transformers import get_linear_schedule_with_warmup from utils import logger from lm_seqs_dataset import LmSeqsDataset @@ -137,9 +136,9 @@ class Distiller: betas=(0.9, 0.98)) warmup_steps = math.ceil(num_train_optimization_steps * params.warmup_prop) - self.scheduler = WarmupLinearSchedule(self.optimizer, - warmup_steps=warmup_steps, - t_total=num_train_optimization_steps) + self.scheduler = get_linear_schedule_with_warmup(self.optimizer, + num_warmup_steps=warmup_steps, + num_training_steps=num_train_optimization_steps) if self.fp16: try: diff --git a/examples/distillation/requirements.txt b/examples/distillation/requirements.txt index d76273b34a7..491924ee2c6 100644 --- a/examples/distillation/requirements.txt +++ b/examples/distillation/requirements.txt @@ -3,4 +3,4 @@ tensorboard>=1.14.0 tensorboardX==1.8 psutil==5.6.3 scipy==1.3.1 -transformers==2.0.0 +transformers diff --git a/examples/distillation/run_squad_w_distillation.py b/examples/distillation/run_squad_w_distillation.py index a5194d08041..70b65dc1b8f 100644 --- a/examples/distillation/run_squad_w_distillation.py +++ b/examples/distillation/run_squad_w_distillation.py @@ -46,7 +46,7 @@ from transformers import (WEIGHTS_NAME, BertConfig, XLNetTokenizer, DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) -from transformers import AdamW, WarmupLinearSchedule +from transformers import AdamW, get_linear_schedule_with_warmup from ..utils_squad import (read_squad_examples, convert_examples_to_features, RawResult, write_predictions, @@ -101,7 +101,7 @@ def train(args, train_dataset, model, tokenizer, teacher=None): {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) - scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp @@ -506,9 +506,15 @@ def main(): args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] - config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) - tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) - model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, + cache_dir=args.cache_dir if args.cache_dir else None) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, + do_lower_case=args.do_lower_case, + cache_dir=args.cache_dir if args.cache_dir else None) + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool('.ckpt' in args.model_name_or_path), + config=config, + cache_dir=args.cache_dir if args.cache_dir else None) if args.teacher_type is not None: assert args.teacher_name_or_path is not None @@ -516,8 +522,11 @@ def main(): assert args.alpha_ce + args.alpha_squad > 0. assert args.teacher_type != 'distilbert', "We constraint teachers not to be of type DistilBERT." teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type] - teacher_config = teacher_config_class.from_pretrained(args.teacher_name_or_path) - teacher = teacher_model_class.from_pretrained(args.teacher_name_or_path, config=teacher_config) + teacher_config = teacher_config_class.from_pretrained(args.teacher_name_or_path, + cache_dir=args.cache_dir if args.cache_dir else None) + teacher = teacher_model_class.from_pretrained(args.teacher_name_or_path, + config=teacher_config, + cache_dir=args.cache_dir if args.cache_dir else None) teacher.to(args.device) else: teacher = None @@ -553,8 +562,10 @@ def main(): torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned - model = model_class.from_pretrained(args.output_dir) - tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) + model = model_class.from_pretrained(args.output_dir, cache_dir=args.cache_dir if args.cache_dir else None) + tokenizer = tokenizer_class.from_pretrained(args.output_dir, + do_lower_case=args.do_lower_case, + cache_dir=args.cache_dir if args.cache_dir else None) model.to(args.device) @@ -571,7 +582,7 @@ def main(): for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" - model = model_class.from_pretrained(checkpoint) + model = model_class.from_pretrained(checkpoint, cache_dir=args.cache_dir if args.cache_dir else None) model.to(args.device) # Evaluate diff --git a/examples/distillation/scripts/binarized_data.py b/examples/distillation/scripts/binarized_data.py index 43824e9964c..681cc2de341 100644 --- a/examples/distillation/scripts/binarized_data.py +++ b/examples/distillation/scripts/binarized_data.py @@ -68,7 +68,7 @@ def main(): start = time.time() for text in data: text = f'{bos} {text.strip()} {sep}' - token_ids = tokenizer.encode(text) + token_ids = tokenizer.encode(text, add_special_tokens=False) rslt.append(token_ids) iter += 1 diff --git a/examples/pplm/README.md b/examples/pplm/README.md new file mode 100644 index 00000000000..b12205854aa --- /dev/null +++ b/examples/pplm/README.md @@ -0,0 +1,54 @@ +# Plug and Play Language Models: a Simple Approach to Controlled Text Generation + +Authors: [Sumanth Dathathri](https://dathath.github.io/), [Andrea Madotto](https://andreamad8.github.io/), Janice Lan, Jane Hung, Eric Frank, [Piero Molino](https://w4nderlu.st/), [Jason Yosinski](http://yosinski.com/), and [Rosanne Liu](http://www.rosanneliu.com/) + +This folder contains the original code used to run the Plug and Play Language Model (PPLM). + +Paper link: https://arxiv.org/abs/1912.02164 + +Blog link: https://eng.uber.com/pplm + +Please check out the repo under uber-research for more information: https://github.com/uber-research/PPLM + + +## Setup + +```bash +git clone https://github.com/huggingface/transformers && cd transformers +pip install [--editable] . +pip install nltk torchtext # additional requirements. +cd examples/pplm +``` + +## PPLM-BoW + +### Example command for bag-of-words control + +```bash +python run_pplm.py -B military --cond_text "The potato" --length 50 --gamma 1.5 --num_iterations 3 --num_samples 10 --stepsize 0.03 --window_length 5 --kl_scale 0.01 --gm_scale 0.99 --colorama --sample +``` + +### Tuning hyperparameters for bag-of-words control + +1. Increase `--stepsize` to intensify topic control, and decrease its value to soften the control. `--stepsize 0` recovers the original uncontrolled GPT-2 model. + +2. If the language being generated is repetitive (For e.g. "science science experiment experiment"), there are several options to consider:
+ a) Reduce the `--stepsize`
+ b) Increase `--kl_scale` (the KL-loss coefficient) or decrease `--gm_scale` (the gm-scaling term)
+ c) Add `--grad-length xx` where xx is an (integer <= length, e.g. `--grad-length 30`).
+ + +## PPLM-Discrim + +### Example command for discriminator based sentiment control + +```bash +python run_pplm.py -D sentiment --class_label 2 --cond_text "My dog died" --length 50 --gamma 1.0 --num_iterations 10 --num_samples 10 --stepsize 0.04 --kl_scale 0.01 --gm_scale 0.95 --sample +``` + +### Tuning hyperparameters for discriminator control + +1. Increase `--stepsize` to intensify topic control, and decrease its value to soften the control. `--stepsize 0` recovers the original uncontrolled GPT-2 model. + +2. Use `--class_label 3` for negative, and `--class_label 2` for positive + diff --git a/examples/pplm/imgs/headfigure.png b/examples/pplm/imgs/headfigure.png new file mode 100644 index 00000000000..f4c11ad54d1 Binary files /dev/null and b/examples/pplm/imgs/headfigure.png differ diff --git a/examples/pplm/imgs/wooly.png b/examples/pplm/imgs/wooly.png new file mode 100644 index 00000000000..190d3afd49f Binary files /dev/null and b/examples/pplm/imgs/wooly.png differ diff --git a/examples/pplm/pplm_classification_head.py b/examples/pplm/pplm_classification_head.py new file mode 100644 index 00000000000..9aae0f17e9c --- /dev/null +++ b/examples/pplm/pplm_classification_head.py @@ -0,0 +1,18 @@ +import torch + +class ClassificationHead(torch.nn.Module): + """Classification Head for transformer encoders""" + + def __init__(self, class_size, embed_size): + super(ClassificationHead, self).__init__() + self.class_size = class_size + self.embed_size = embed_size + # self.mlp1 = torch.nn.Linear(embed_size, embed_size) + # self.mlp2 = (torch.nn.Linear(embed_size, class_size)) + self.mlp = torch.nn.Linear(embed_size, class_size) + + def forward(self, hidden_state): + # hidden_state = F.relu(self.mlp1(hidden_state)) + # hidden_state = self.mlp2(hidden_state) + logits = self.mlp(hidden_state) + return logits diff --git a/examples/pplm/run_pplm.py b/examples/pplm/run_pplm.py new file mode 100644 index 00000000000..095dc39a745 --- /dev/null +++ b/examples/pplm/run_pplm.py @@ -0,0 +1,879 @@ +#! /usr/bin/env python3 +# coding=utf-8 + +#Copyright (c) 2019 Uber Technologies, Inc. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +#http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +""" +Example command with bag of words: +python examples/run_pplm.py -B space --cond_text "The president" --length 100 --gamma 1.5 --num_iterations 3 --num_samples 10 --stepsize 0.01 --window_length 5 --kl_scale 0.01 --gm_scale 0.95 + +Example command with discriminator: +python examples/run_pplm.py -D sentiment --class_label 3 --cond_text "The lake" --length 10 --gamma 1.0 --num_iterations 30 --num_samples 10 --stepsize 0.01 --kl_scale 0.01 --gm_scale 0.95 +""" + +import argparse +import json +from operator import add +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from torch.autograd import Variable +from tqdm import trange + +from transformers import GPT2Tokenizer +from transformers.file_utils import cached_path +from transformers.modeling_gpt2 import GPT2LMHeadModel +from pplm_classification_head import ClassificationHead + +PPLM_BOW = 1 +PPLM_DISCRIM = 2 +PPLM_BOW_DISCRIM = 3 +SMALL_CONST = 1e-15 +BIG_CONST = 1e10 + +BAG_OF_WORDS_ARCHIVE_MAP = { + 'legal': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/legal.txt", + 'military': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/military.txt", + 'politics': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/politics.txt", + 'religion': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/religion.txt", + 'science': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/science.txt", + 'space': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/space.txt", + 'technology': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/technology.txt", +} + +DISCRIMINATOR_MODELS_PARAMS = { + "clickbait": { + "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/clickbait_classifier_head.pt", + "class_size": 2, + "embed_size": 1024, + "class_vocab": {"non_clickbait": 0, "clickbait": 1}, + "default_class": 1, + "pretrained_model": "gpt2-medium", + }, + "sentiment": { + "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/SST_classifier_head.pt", + "class_size": 5, + "embed_size": 1024, + "class_vocab": {"very_positive": 2, "very_negative": 3}, + "default_class": 3, + "pretrained_model": "gpt2-medium", + }, +} + + +def to_var(x, requires_grad=False, volatile=False, device='cuda'): + if torch.cuda.is_available() and device == 'cuda': + x = x.cuda() + elif device != 'cuda': + x = x.to(device) + return Variable(x, requires_grad=requires_grad, volatile=volatile) + + +def top_k_filter(logits, k, probs=False): + """ + Masks everything but the k top entries as -infinity (1e10). + Used to mask logits such that e^-infinity -> 0 won't contribute to the + sum of the denominator. + """ + if k == 0: + return logits + else: + values = torch.topk(logits, k)[0] + batch_mins = values[:, -1].view(-1, 1).expand_as(logits) + if probs: + return torch.where(logits < batch_mins, + torch.ones_like(logits) * 0.0, logits) + return torch.where(logits < batch_mins, + torch.ones_like(logits) * -BIG_CONST, + logits) + + +def perturb_past( + past, + model, + last, + unpert_past=None, + unpert_logits=None, + accumulated_hidden=None, + grad_norms=None, + stepsize=0.01, + one_hot_bows_vectors=None, + classifier=None, + class_label=None, + loss_type=0, + num_iterations=3, + horizon_length=1, + window_length=0, + decay=False, + gamma=1.5, + kl_scale=0.01, + device='cuda', +): + # Generate inital perturbed past + grad_accumulator = [ + (np.zeros(p.shape).astype("float32")) + for p in past + ] + + if accumulated_hidden is None: + accumulated_hidden = 0 + + if decay: + decay_mask = torch.arange( + 0., + 1.0 + SMALL_CONST, + 1.0 / (window_length) + )[1:] + else: + decay_mask = 1.0 + + # TODO fix this comment (SUMANTH) + # Generate a mask is gradient perturbated is based on a past window + _, _, _, curr_length, _ = past[0].shape + + if curr_length > window_length and window_length > 0: + ones_key_val_shape = ( + tuple(past[0].shape[:-2]) + + tuple([window_length]) + + tuple(past[0].shape[-1:]) + ) + + zeros_key_val_shape = ( + tuple(past[0].shape[:-2]) + + tuple([curr_length - window_length]) + + tuple(past[0].shape[-1:]) + ) + + ones_mask = torch.ones(ones_key_val_shape) + ones_mask = decay_mask * ones_mask.permute(0, 1, 2, 4, 3) + ones_mask = ones_mask.permute(0, 1, 2, 4, 3) + + window_mask = torch.cat( + (ones_mask, torch.zeros(zeros_key_val_shape)), + dim=-2 + ).to(device) + else: + window_mask = torch.ones_like(past[0]).to(device) + + # accumulate perturbations for num_iterations + loss_per_iter = [] + new_accumulated_hidden = None + for i in range(num_iterations): + print("Iteration ", i + 1) + curr_perturbation = [ + to_var(torch.from_numpy(p_), requires_grad=True, device=device) + for p_ in grad_accumulator + ] + + # Compute hidden using perturbed past + perturbed_past = list(map(add, past, curr_perturbation)) + _, _, _, curr_length, _ = curr_perturbation[0].shape + all_logits, _, all_hidden = model(last, past=perturbed_past) + hidden = all_hidden[-1] + new_accumulated_hidden = accumulated_hidden + torch.sum( + hidden, + dim=1 + ).detach() + # TODO: Check the layer-norm consistency of this with trained discriminator (Sumanth) + logits = all_logits[:, -1, :] + probs = F.softmax(logits, dim=-1) + + loss = 0.0 + loss_list = [] + if loss_type == PPLM_BOW or loss_type == PPLM_BOW_DISCRIM: + for one_hot_bow in one_hot_bows_vectors: + bow_logits = torch.mm(probs, torch.t(one_hot_bow)) + bow_loss = -torch.log(torch.sum(bow_logits)) + loss += bow_loss + loss_list.append(bow_loss) + print(" pplm_bow_loss:", loss.data.cpu().numpy()) + + if loss_type == 2 or loss_type == 3: + ce_loss = torch.nn.CrossEntropyLoss() + # TODO why we need to do this assignment and not just using unpert_past? (Sumanth) + curr_unpert_past = unpert_past + curr_probs = torch.unsqueeze(probs, dim=1) + wte = model.resize_token_embeddings() + for _ in range(horizon_length): + inputs_embeds = torch.matmul(curr_probs, wte.weight.data) + _, curr_unpert_past, curr_all_hidden = model( + past=curr_unpert_past, + inputs_embeds=inputs_embeds + ) + curr_hidden = curr_all_hidden[-1] + new_accumulated_hidden = new_accumulated_hidden + torch.sum( + curr_hidden, dim=1) + + prediction = classifier(new_accumulated_hidden / + (curr_length + 1 + horizon_length)) + + label = torch.tensor(prediction.shape[0] * [class_label], + device=device, + dtype=torch.long) + discrim_loss = ce_loss(prediction, label) + print(" pplm_discrim_loss:", discrim_loss.data.cpu().numpy()) + loss += discrim_loss + loss_list.append(discrim_loss) + + kl_loss = 0.0 + if kl_scale > 0.0: + unpert_probs = F.softmax(unpert_logits[:, -1, :], dim=-1) + unpert_probs = ( + unpert_probs + SMALL_CONST * + (unpert_probs <= SMALL_CONST).float().to(device).detach() + ) + correction = SMALL_CONST * (probs <= SMALL_CONST).float().to( + device).detach() + corrected_probs = probs + correction.detach() + kl_loss = kl_scale * ( + (corrected_probs * (corrected_probs / unpert_probs).log()).sum() + ) + print(' kl_loss', kl_loss.data.cpu().numpy()) + loss += kl_loss + + loss_per_iter.append(loss.data.cpu().numpy()) + print(' pplm_loss', (loss - kl_loss).data.cpu().numpy()) + + # compute gradients + loss.backward() + + # calculate gradient norms + if grad_norms is not None and loss_type == PPLM_BOW: + grad_norms = [ + torch.max(grad_norms[index], torch.norm(p_.grad * window_mask)) + for index, p_ in enumerate(curr_perturbation) + ] + else: + grad_norms = [ + (torch.norm(p_.grad * window_mask) + SMALL_CONST) + for index, p_ in enumerate(curr_perturbation) + ] + + # normalize gradients + grad = [ + -stepsize * + (p_.grad * window_mask / grad_norms[ + index] ** gamma).data.cpu().numpy() + for index, p_ in enumerate(curr_perturbation) + ] + + # accumulate gradient + grad_accumulator = list(map(add, grad, grad_accumulator)) + + # reset gradients, just to make sure + for p_ in curr_perturbation: + p_.grad.data.zero_() + + # removing past from the graph + new_past = [] + for p_ in past: + new_past.append(p_.detach()) + past = new_past + + # apply the accumulated perturbations to the past + grad_accumulator = [ + to_var(torch.from_numpy(p_), requires_grad=True, device=device) + for p_ in grad_accumulator + ] + pert_past = list(map(add, past, grad_accumulator)) + + return pert_past, new_accumulated_hidden, grad_norms, loss_per_iter + + +def get_classifier( + name: Optional[str], class_label: Union[str, int], + device: str +) -> Tuple[Optional[ClassificationHead], Optional[int]]: + if name is None: + return None, None + + params = DISCRIMINATOR_MODELS_PARAMS[name] + classifier = ClassificationHead( + class_size=params['class_size'], + embed_size=params['embed_size'] + ).to(device) + if "url" in params: + resolved_archive_file = cached_path(params["url"]) + elif "path" in params: + resolved_archive_file = params["path"] + else: + raise ValueError("Either url or path have to be specified " + "in the discriminator model parameters") + classifier.load_state_dict( + torch.load(resolved_archive_file, map_location=device)) + classifier.eval() + + if isinstance(class_label, str): + if class_label in params["class_vocab"]: + label_id = params["class_vocab"][class_label] + else: + label_id = params["default_class"] + print("class_label {} not in class_vocab".format(class_label)) + print("available values are: {}".format(params["class_vocab"])) + print("using default class {}".format(label_id)) + + elif isinstance(class_label, int): + if class_label in set(params["class_vocab"].values()): + label_id = class_label + else: + label_id = params["default_class"] + print("class_label {} not in class_vocab".format(class_label)) + print("available values are: {}".format(params["class_vocab"])) + print("using default class {}".format(label_id)) + + else: + label_id = params["default_class"] + + return classifier, label_id + + +def get_bag_of_words_indices(bag_of_words_ids_or_paths: List[str], tokenizer) -> \ + List[List[List[int]]]: + bow_indices = [] + for id_or_path in bag_of_words_ids_or_paths: + if id_or_path in BAG_OF_WORDS_ARCHIVE_MAP: + filepath = cached_path(BAG_OF_WORDS_ARCHIVE_MAP[id_or_path]) + else: + filepath = id_or_path + with open(filepath, "r") as f: + words = f.read().strip().split("\n") + bow_indices.append( + [tokenizer.encode(word.strip(), add_prefix_space=True) for word in + words]) + return bow_indices + + +def build_bows_one_hot_vectors(bow_indices, tokenizer, device='cuda'): + if bow_indices is None: + return None + + one_hot_bows_vectors = [] + for single_bow in bow_indices: + single_bow = list(filter(lambda x: len(x) <= 1, single_bow)) + single_bow = torch.tensor(single_bow).to(device) + num_words = single_bow.shape[0] + one_hot_bow = torch.zeros(num_words, tokenizer.vocab_size).to(device) + one_hot_bow.scatter_(1, single_bow, 1) + one_hot_bows_vectors.append(one_hot_bow) + return one_hot_bows_vectors + + +def full_text_generation( + model, + tokenizer, + context=None, + num_samples=1, + device="cuda", + bag_of_words=None, + discrim=None, + class_label=None, + length=100, + stepsize=0.02, + temperature=1.0, + top_k=10, + sample=False, + num_iterations=3, + grad_length=10000, + horizon_length=1, + window_length=0, + decay=False, + gamma=1.5, + gm_scale=0.9, + kl_scale=0.01, + **kwargs +): + classifier, class_id = get_classifier( + discrim, + class_label, + device + ) + + bow_indices = [] + if bag_of_words: + bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), + tokenizer) + + if bag_of_words and classifier: + print("Both PPLM-BoW and PPLM-Discrim are on. This is not optimized.") + loss_type = PPLM_BOW_DISCRIM + + elif bag_of_words: + loss_type = PPLM_BOW + print("Using PPLM-BoW") + + elif classifier is not None: + loss_type = PPLM_DISCRIM + print("Using PPLM-Discrim") + + else: + raise Exception("Specify either a bag of words or a discriminator") + + unpert_gen_tok_text, _, _ = generate_text_pplm( + model=model, + tokenizer=tokenizer, + context=context, + device=device, + length=length, + sample=sample, + perturb=False + ) + if device == 'cuda': + torch.cuda.empty_cache() + + pert_gen_tok_texts = [] + discrim_losses = [] + losses_in_time = [] + + for i in range(num_samples): + pert_gen_tok_text, discrim_loss, loss_in_time = generate_text_pplm( + model=model, + tokenizer=tokenizer, + context=context, + device=device, + perturb=True, + bow_indices=bow_indices, + classifier=classifier, + class_label=class_id, + loss_type=loss_type, + length=length, + stepsize=stepsize, + temperature=temperature, + top_k=top_k, + sample=sample, + num_iterations=num_iterations, + grad_length=grad_length, + horizon_length=horizon_length, + window_length=window_length, + decay=decay, + gamma=gamma, + gm_scale=gm_scale, + kl_scale=kl_scale, + ) + pert_gen_tok_texts.append(pert_gen_tok_text) + if classifier is not None: + discrim_losses.append(discrim_loss.data.cpu().numpy()) + losses_in_time.append(loss_in_time) + + if device == 'cuda': + torch.cuda.empty_cache() + + return unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time + + +def generate_text_pplm( + model, + tokenizer, + context=None, + past=None, + device="cuda", + perturb=True, + bow_indices=None, + classifier=None, + class_label=None, + loss_type=0, + length=100, + stepsize=0.02, + temperature=1.0, + top_k=10, + sample=False, + num_iterations=3, + grad_length=10000, + horizon_length=1, + window_length=0, + decay=False, + gamma=1.5, + gm_scale=0.9, + kl_scale=0.01, +): + output_so_far = None + if context: + context_t = torch.tensor(context, device=device, dtype=torch.long) + while len(context_t.shape) < 2: + context_t = context_t.unsqueeze(0) + output_so_far = context_t + + # collect one hot vectors for bags of words + one_hot_bows_vectors = build_bows_one_hot_vectors(bow_indices, tokenizer, + device) + + grad_norms = None + last = None + unpert_discrim_loss = 0 + loss_in_time = [] + for i in trange(length, ascii=True): + + # Get past/probs for current output, except for last word + # Note that GPT takes 2 inputs: past + current_token + + # run model forward to obtain unperturbed + if past is None and output_so_far is not None: + last = output_so_far[:, -1:] + if output_so_far.shape[1] > 1: + _, past, _ = model(output_so_far[:, :-1]) + + unpert_logits, unpert_past, unpert_all_hidden = model(output_so_far) + unpert_last_hidden = unpert_all_hidden[-1] + + # check if we are abowe grad max length + if i >= grad_length: + current_stepsize = stepsize * 0 + else: + current_stepsize = stepsize + + # modify the past if necessary + if not perturb or num_iterations == 0: + pert_past = past + + else: + accumulated_hidden = unpert_last_hidden[:, :-1, :] + accumulated_hidden = torch.sum(accumulated_hidden, dim=1) + + if past is not None: + pert_past, _, grad_norms, loss_this_iter = perturb_past( + past, + model, + last, + unpert_past=unpert_past, + unpert_logits=unpert_logits, + accumulated_hidden=accumulated_hidden, + grad_norms=grad_norms, + stepsize=current_stepsize, + one_hot_bows_vectors=one_hot_bows_vectors, + classifier=classifier, + class_label=class_label, + loss_type=loss_type, + num_iterations=num_iterations, + horizon_length=horizon_length, + window_length=window_length, + decay=decay, + gamma=gamma, + kl_scale=kl_scale, + device=device, + ) + loss_in_time.append(loss_this_iter) + else: + pert_past = past + + pert_logits, past, pert_all_hidden = model(last, past=pert_past) + pert_logits = pert_logits[:, -1, :] / temperature # + SMALL_CONST + pert_probs = F.softmax(pert_logits, dim=-1) + + if classifier is not None: + ce_loss = torch.nn.CrossEntropyLoss() + prediction = classifier(torch.mean(unpert_last_hidden, dim=1)) + label = torch.tensor([class_label], device=device, + dtype=torch.long) + unpert_discrim_loss = ce_loss(prediction, label) + print( + "unperturbed discrim loss", + unpert_discrim_loss.data.cpu().numpy() + ) + else: + unpert_discrim_loss = 0 + + # Fuse the modified model and original model + if perturb: + + unpert_probs = F.softmax(unpert_logits[:, -1, :], dim=-1) + + pert_probs = ((pert_probs ** gm_scale) * ( + unpert_probs ** (1 - gm_scale))) # + SMALL_CONST + pert_probs = top_k_filter(pert_probs, k=top_k, + probs=True) # + SMALL_CONST + + # rescale + if torch.sum(pert_probs) <= 1: + pert_probs = pert_probs / torch.sum(pert_probs) + + else: + pert_logits = top_k_filter(pert_logits, k=top_k) # + SMALL_CONST + pert_probs = F.softmax(pert_logits, dim=-1) + + # sample or greedy + if sample: + last = torch.multinomial(pert_probs, num_samples=1) + + else: + _, last = torch.topk(pert_probs, k=1, dim=-1) + + # update context/output_so_far appending the new token + output_so_far = ( + last if output_so_far is None + else torch.cat((output_so_far, last), dim=1) + ) + + print(tokenizer.decode(output_so_far.tolist()[0])) + + return output_so_far, unpert_discrim_loss, loss_in_time + + +def set_generic_model_params(discrim_weights, discrim_meta): + if discrim_weights is None: + raise ValueError('When using a generic discriminator, ' + 'discrim_weights need to be specified') + if discrim_meta is None: + raise ValueError('When using a generic discriminator, ' + 'discrim_meta need to be specified') + + with open(discrim_meta, 'r') as discrim_meta_file: + meta = json.load(discrim_meta_file) + meta['path'] = discrim_weights + DISCRIMINATOR_MODELS_PARAMS['generic'] = meta + + +def run_pplm_example( + pretrained_model="gpt2-medium", + cond_text="", + uncond=False, + num_samples=1, + bag_of_words=None, + discrim=None, + discrim_weights=None, + discrim_meta=None, + class_label=-1, + length=100, + stepsize=0.02, + temperature=1.0, + top_k=10, + sample=False, + num_iterations=3, + grad_length=10000, + horizon_length=1, + window_length=0, + decay=False, + gamma=1.5, + gm_scale=0.9, + kl_scale=0.01, + seed=0, + no_cuda=False, + colorama=False +): + # set Random seed + torch.manual_seed(seed) + np.random.seed(seed) + + # set the device + device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu" + + if discrim == 'generic': + set_generic_model_params(discrim_weights, discrim_meta) + + if discrim is not None: + pretrained_model = DISCRIMINATOR_MODELS_PARAMS[discrim][ + "pretrained_model" + ] + print("discrim = {}, pretrained_model set " + "to discriminator's = {}".format(discrim, pretrained_model)) + + # load pretrained model + model = GPT2LMHeadModel.from_pretrained( + pretrained_model, + output_hidden_states=True + ) + model.to(device) + model.eval() + + # load tokenizer + tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model) + + # Freeze GPT-2 weights + for param in model.parameters(): + param.requires_grad = False + + # figure out conditioning text + if uncond: + tokenized_cond_text = tokenizer.encode( + [tokenizer.bos_token] + ) + else: + raw_text = cond_text + while not raw_text: + print("Did you forget to add `--cond_text`? ") + raw_text = input("Model prompt >>> ") + tokenized_cond_text = tokenizer.encode(tokenizer.bos_token + raw_text) + + print("= Prefix of sentence =") + print(tokenizer.decode(tokenized_cond_text)) + print() + + # generate unperturbed and perturbed texts + + # full_text_generation returns: + # unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time + unpert_gen_tok_text, pert_gen_tok_texts, _, _ = full_text_generation( + model=model, + tokenizer=tokenizer, + context=tokenized_cond_text, + device=device, + num_samples=num_samples, + bag_of_words=bag_of_words, + discrim=discrim, + class_label=class_label, + length=length, + stepsize=stepsize, + temperature=temperature, + top_k=top_k, + sample=sample, + num_iterations=num_iterations, + grad_length=grad_length, + horizon_length=horizon_length, + window_length=window_length, + decay=decay, + gamma=gamma, + gm_scale=gm_scale, + kl_scale=kl_scale, + ) + + # untokenize unperturbed text + unpert_gen_text = tokenizer.decode(unpert_gen_tok_text.tolist()[0]) + + print("=" * 80) + print("= Unperturbed generated text =") + print(unpert_gen_text) + print() + + generated_texts = [] + + bow_word_ids = set() + if bag_of_words and colorama: + bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), + tokenizer) + for single_bow_list in bow_indices: + # filtering all words in the list composed of more than 1 token + filtered = list(filter(lambda x: len(x) <= 1, single_bow_list)) + # w[0] because we are sure w has only 1 item because previous fitler + bow_word_ids.update(w[0] for w in filtered) + + # iterate through the perturbed texts + for i, pert_gen_tok_text in enumerate(pert_gen_tok_texts): + try: + # untokenize unperturbed text + if colorama: + import colorama + + pert_gen_text = '' + for word_id in pert_gen_tok_text.tolist()[0]: + if word_id in bow_word_ids: + pert_gen_text += '{}{}{}'.format( + colorama.Fore.RED, + tokenizer.decode([word_id]), + colorama.Style.RESET_ALL + ) + else: + pert_gen_text += tokenizer.decode([word_id]) + else: + pert_gen_text = tokenizer.decode(pert_gen_tok_text.tolist()[0]) + + print("= Perturbed generated text {} =".format(i + 1)) + print(pert_gen_text) + print() + except: + pass + + # keep the prefix, perturbed seq, original seq for each index + generated_texts.append( + (tokenized_cond_text, pert_gen_tok_text, unpert_gen_tok_text) + ) + + return + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + "--pretrained_model", + "-M", + type=str, + default="gpt2-medium", + help="pretrained model name or path to local checkpoint", + ) + parser.add_argument( + "--cond_text", type=str, default="The lake", + help="Prefix texts to condition on" + ) + parser.add_argument( + "--uncond", action="store_true", + help="Generate from end-of-text as prefix" + ) + parser.add_argument( + "--num_samples", + type=int, + default=1, + help="Number of samples to generate from the modified latents", + ) + parser.add_argument( + "--bag_of_words", + "-B", + type=str, + default=None, + help="Bags of words used for PPLM-BoW. " + "Either a BOW id (see list in code) or a filepath. " + "Multiple BoWs separated by ;", + ) + parser.add_argument( + "--discrim", + "-D", + type=str, + default=None, + choices=("clickbait", "sentiment", "toxicity", "generic"), + help="Discriminator to use", + ) + parser.add_argument('--discrim_weights', type=str, default=None, + help='Weights for the generic discriminator') + parser.add_argument('--discrim_meta', type=str, default=None, + help='Meta information for the generic discriminator') + parser.add_argument( + "--class_label", + type=int, + default=-1, + help="Class label used for the discriminator", + ) + parser.add_argument("--length", type=int, default=100) + parser.add_argument("--stepsize", type=float, default=0.02) + parser.add_argument("--temperature", type=float, default=1.0) + parser.add_argument("--top_k", type=int, default=10) + parser.add_argument( + "--sample", action="store_true", + help="Generate from end-of-text as prefix" + ) + parser.add_argument("--num_iterations", type=int, default=3) + parser.add_argument("--grad_length", type=int, default=10000) + parser.add_argument( + "--window_length", + type=int, + default=0, + help="Length of past which is being optimized; " + "0 corresponds to infinite window length", + ) + parser.add_argument( + "--horizon_length", + type=int, + default=1, + help="Length of future to optimize over", + ) + parser.add_argument("--decay", action="store_true", + help="whether to decay or not") + parser.add_argument("--gamma", type=float, default=1.5) + parser.add_argument("--gm_scale", type=float, default=0.9) + parser.add_argument("--kl_scale", type=float, default=0.01) + parser.add_argument("--seed", type=int, default=0) + parser.add_argument("--no_cuda", action="store_true", help="no cuda") + parser.add_argument("--colorama", action="store_true", + help="colors keywords") + + args = parser.parse_args() + run_pplm_example(**vars(args)) diff --git a/examples/pplm/run_pplm_discrim_train.py b/examples/pplm/run_pplm_discrim_train.py new file mode 100644 index 00000000000..3055139d8c5 --- /dev/null +++ b/examples/pplm/run_pplm_discrim_train.py @@ -0,0 +1,588 @@ +#! /usr/bin/env python3 +# coding=utf-8 + +#Copyright (c) 2019 Uber Technologies, Inc. +# +#Licensed under the Apache License, Version 2.0 (the "License"); +#you may not use this file except in compliance with the License. +#You may obtain a copy of the License at +# +#http://www.apache.org/licenses/LICENSE-2.0 +# +#Unless required by applicable law or agreed to in writing, software +#distributed under the License is distributed on an "AS IS" BASIS, +#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +#See the License for the specific language governing permissions and +#limitations under the License. + +import argparse +import csv +import json +import math +import time + +import numpy as np +import torch +import torch.nn.functional as F +import torch.optim +import torch.optim as optim +import torch.utils.data as data +from nltk.tokenize.treebank import TreebankWordDetokenizer +from torchtext import data as torchtext_data +from torchtext import datasets +from tqdm import tqdm, trange + +from transformers import GPT2Tokenizer, GPT2LMHeadModel +from pplm_classification_head import ClassificationHead + +torch.manual_seed(0) +np.random.seed(0) +EPSILON = 1e-10 +example_sentence = "This is incredible! I love it, this is the best chicken I have ever had." +max_length_seq = 100 + + + + +class Discriminator(torch.nn.Module): + """Transformer encoder followed by a Classification Head""" + + def __init__( + self, + class_size, + pretrained_model="gpt2-medium", + cached_mode=False, + device='cpu' + ): + super(Discriminator, self).__init__() + self.tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model) + self.encoder = GPT2LMHeadModel.from_pretrained(pretrained_model) + self.embed_size = self.encoder.transformer.config.hidden_size + self.classifier_head = ClassificationHead( + class_size=class_size, + embed_size=self.embed_size + ) + self.cached_mode = cached_mode + self.device = device + + def get_classifier(self): + return self.classifier_head + + def train_custom(self): + for param in self.encoder.parameters(): + param.requires_grad = False + self.classifier_head.train() + + def avg_representation(self, x): + mask = x.ne(0).unsqueeze(2).repeat( + 1, 1, self.embed_size + ).float().to(self.device).detach() + hidden, _ = self.encoder.transformer(x) + masked_hidden = hidden * mask + avg_hidden = torch.sum(masked_hidden, dim=1) / ( + torch.sum(mask, dim=1).detach() + EPSILON + ) + return avg_hidden + + def forward(self, x): + if self.cached_mode: + avg_hidden = x.to(self.device) + else: + avg_hidden = self.avg_representation(x.to(self.device)) + + logits = self.classifier_head(avg_hidden) + probs = F.log_softmax(logits, dim=-1) + + return probs + + +class Dataset(data.Dataset): + def __init__(self, X, y): + """Reads source and target sequences from txt files.""" + self.X = X + self.y = y + + def __len__(self): + return len(self.X) + + def __getitem__(self, index): + """Returns one data pair (source and target).""" + data = {} + data["X"] = self.X[index] + data["y"] = self.y[index] + return data + + +def collate_fn(data): + def pad_sequences(sequences): + lengths = [len(seq) for seq in sequences] + + padded_sequences = torch.zeros( + len(sequences), + max(lengths) + ).long() # padding value = 0 + + for i, seq in enumerate(sequences): + end = lengths[i] + padded_sequences[i, :end] = seq[:end] + + return padded_sequences, lengths + + item_info = {} + for key in data[0].keys(): + item_info[key] = [d[key] for d in data] + + x_batch, _ = pad_sequences(item_info["X"]) + y_batch = torch.tensor(item_info["y"], dtype=torch.long) + + return x_batch, y_batch + + +def cached_collate_fn(data): + item_info = {} + for key in data[0].keys(): + item_info[key] = [d[key] for d in data] + + x_batch = torch.cat(item_info["X"], 0) + y_batch = torch.tensor(item_info["y"], dtype=torch.long) + + return x_batch, y_batch + + +def train_epoch(data_loader, discriminator, optimizer, + epoch=0, log_interval=10, device='cpu'): + samples_so_far = 0 + discriminator.train_custom() + for batch_idx, (input_t, target_t) in enumerate(data_loader): + input_t, target_t = input_t.to(device), target_t.to(device) + + optimizer.zero_grad() + + output_t = discriminator(input_t) + loss = F.nll_loss(output_t, target_t) + loss.backward(retain_graph=True) + optimizer.step() + + samples_so_far += len(input_t) + + if batch_idx % log_interval == 0: + print( + "Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format( + epoch + 1, + samples_so_far, len(data_loader.dataset), + 100 * samples_so_far / len(data_loader.dataset), loss.item() + ) + ) + + +def evaluate_performance(data_loader, discriminator, device='cpu'): + discriminator.eval() + test_loss = 0 + correct = 0 + with torch.no_grad(): + for input_t, target_t in data_loader: + input_t, target_t = input_t.to(device), target_t.to(device) + output_t = discriminator(input_t) + # sum up batch loss + test_loss += F.nll_loss(output_t, target_t, reduction="sum").item() + # get the index of the max log-probability + pred_t = output_t.argmax(dim=1, keepdim=True) + correct += pred_t.eq(target_t.view_as(pred_t)).sum().item() + + test_loss /= len(data_loader.dataset) + + print( + "Performance on test set: " + "Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)".format( + test_loss, correct, len(data_loader.dataset), + 100. * correct / len(data_loader.dataset) + ) + ) + + +def predict(input_sentence, model, classes, cached=False, device='cpu'): + input_t = model.tokenizer.encode(input_sentence) + input_t = torch.tensor([input_t], dtype=torch.long, device=device) + if cached: + input_t = model.avg_representation(input_t) + + log_probs = model(input_t).data.cpu().numpy().flatten().tolist() + print("Input sentence:", input_sentence) + print("Predictions:", ", ".join( + "{}: {:.4f}".format(c, math.exp(log_prob)) for c, log_prob in + zip(classes, log_probs) + )) + + +def get_cached_data_loader(dataset, batch_size, discriminator, + shuffle=False, device='cpu'): + data_loader = torch.utils.data.DataLoader(dataset=dataset, + batch_size=batch_size, + collate_fn=collate_fn) + + xs = [] + ys = [] + for batch_idx, (x, y) in enumerate(tqdm(data_loader, ascii=True)): + with torch.no_grad(): + x = x.to(device) + avg_rep = discriminator.avg_representation(x).cpu().detach() + avg_rep_list = torch.unbind(avg_rep.unsqueeze(1)) + xs += avg_rep_list + ys += y.cpu().numpy().tolist() + + data_loader = torch.utils.data.DataLoader( + dataset=Dataset(xs, ys), + batch_size=batch_size, + shuffle=shuffle, + collate_fn=cached_collate_fn) + + return data_loader + + +def train_discriminator( + dataset, dataset_fp=None, pretrained_model="gpt2-medium", + epochs=10, batch_size=64, log_interval=10, + save_model=False, cached=False, no_cuda=False): + device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu" + + print("Preprocessing {} dataset...".format(dataset)) + start = time.time() + + if dataset == "SST": + idx2class = ["positive", "negative", "very positive", "very negative", + "neutral"] + class2idx = {c: i for i, c in enumerate(idx2class)} + + discriminator = Discriminator( + class_size=len(idx2class), + pretrained_model=pretrained_model, + cached_mode=cached, + device=device + ).to(device) + + text = torchtext_data.Field() + label = torchtext_data.Field(sequential=False) + train_data, val_data, test_data = datasets.SST.splits( + text, + label, + fine_grained=True, + train_subtrees=True, + ) + + x = [] + y = [] + for i in trange(len(train_data), ascii=True): + seq = TreebankWordDetokenizer().detokenize( + vars(train_data[i])["text"] + ) + seq = discriminator.tokenizer.encode(seq) + seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) + x.append(seq) + y.append(class2idx[vars(train_data[i])["label"]]) + train_dataset = Dataset(x, y) + + test_x = [] + test_y = [] + for i in trange(len(test_data), ascii=True): + seq = TreebankWordDetokenizer().detokenize( + vars(test_data[i])["text"] + ) + seq = discriminator.tokenizer.encode(seq) + seq = torch.tensor([50256] + seq, device=device, dtype=torch.long) + test_x.append(seq) + test_y.append(class2idx[vars(test_data[i])["label"]]) + test_dataset = Dataset(test_x, test_y) + + discriminator_meta = { + "class_size": len(idx2class), + "embed_size": discriminator.embed_size, + "pretrained_model": pretrained_model, + "class_vocab": class2idx, + "default_class": 2, + } + + elif dataset == "clickbait": + idx2class = ["non_clickbait", "clickbait"] + class2idx = {c: i for i, c in enumerate(idx2class)} + + discriminator = Discriminator( + class_size=len(idx2class), + pretrained_model=pretrained_model, + cached_mode=cached, + device=device + ).to(device) + + with open("datasets/clickbait/clickbait_train_prefix.txt") as f: + data = [] + for i, line in enumerate(f): + try: + data.append(eval(line)) + except: + print("Error evaluating line {}: {}".format( + i, line + )) + continue + x = [] + y = [] + with open("datasets/clickbait/clickbait_train_prefix.txt") as f: + for i, line in enumerate(tqdm(f, ascii=True)): + try: + d = eval(line) + seq = discriminator.tokenizer.encode(d["text"]) + + if len(seq) < max_length_seq: + seq = torch.tensor( + [50256] + seq, device=device, dtype=torch.long + ) + else: + print("Line {} is longer than maximum length {}".format( + i, max_length_seq + )) + continue + x.append(seq) + y.append(d["label"]) + except: + print("Error evaluating / tokenizing" + " line {}, skipping it".format(i)) + pass + + full_dataset = Dataset(x, y) + train_size = int(0.9 * len(full_dataset)) + test_size = len(full_dataset) - train_size + train_dataset, test_dataset = torch.utils.data.random_split( + full_dataset, [train_size, test_size] + ) + + discriminator_meta = { + "class_size": len(idx2class), + "embed_size": discriminator.embed_size, + "pretrained_model": pretrained_model, + "class_vocab": class2idx, + "default_class": 1, + } + + elif dataset == "toxic": + idx2class = ["non_toxic", "toxic"] + class2idx = {c: i for i, c in enumerate(idx2class)} + + discriminator = Discriminator( + class_size=len(idx2class), + pretrained_model=pretrained_model, + cached_mode=cached, + device=device + ).to(device) + + x = [] + y = [] + with open("datasets/toxic/toxic_train.txt") as f: + for i, line in enumerate(tqdm(f, ascii=True)): + try: + d = eval(line) + seq = discriminator.tokenizer.encode(d["text"]) + + if len(seq) < max_length_seq: + seq = torch.tensor( + [50256] + seq, device=device, dtype=torch.long + ) + else: + print("Line {} is longer than maximum length {}".format( + i, max_length_seq + )) + continue + x.append(seq) + y.append(int(np.sum(d["label"]) > 0)) + except: + print("Error evaluating / tokenizing" + " line {}, skipping it".format(i)) + pass + + full_dataset = Dataset(x, y) + train_size = int(0.9 * len(full_dataset)) + test_size = len(full_dataset) - train_size + train_dataset, test_dataset = torch.utils.data.random_split( + full_dataset, [train_size, test_size] + ) + + discriminator_meta = { + "class_size": len(idx2class), + "embed_size": discriminator.embed_size, + "pretrained_model": pretrained_model, + "class_vocab": class2idx, + "default_class": 0, + } + + else: # if dataset == "generic": + # This assumes the input dataset is a TSV with the following structure: + # class \t text + + if dataset_fp is None: + raise ValueError("When generic dataset is selected, " + "dataset_fp needs to be specified aswell.") + + classes = set() + with open(dataset_fp) as f: + csv_reader = csv.reader(f, delimiter="\t") + for row in tqdm(csv_reader, ascii=True): + if row: + classes.add(row[0]) + + idx2class = sorted(classes) + class2idx = {c: i for i, c in enumerate(idx2class)} + + discriminator = Discriminator( + class_size=len(idx2class), + pretrained_model=pretrained_model, + cached_mode=cached, + device=device + ).to(device) + + x = [] + y = [] + with open(dataset_fp) as f: + csv_reader = csv.reader(f, delimiter="\t") + for i, row in enumerate(tqdm(csv_reader, ascii=True)): + if row: + label = row[0] + text = row[1] + + try: + seq = discriminator.tokenizer.encode(text) + if (len(seq) < max_length_seq): + seq = torch.tensor( + [50256] + seq, + device=device, + dtype=torch.long + ) + + else: + print( + "Line {} is longer than maximum length {}".format( + i, max_length_seq + )) + continue + + x.append(seq) + y.append(class2idx[label]) + + except: + print("Error tokenizing line {}, skipping it".format(i)) + pass + + full_dataset = Dataset(x, y) + train_size = int(0.9 * len(full_dataset)) + test_size = len(full_dataset) - train_size + train_dataset, test_dataset = torch.utils.data.random_split( + full_dataset, + [train_size, test_size] + ) + + discriminator_meta = { + "class_size": len(idx2class), + "embed_size": discriminator.embed_size, + "pretrained_model": pretrained_model, + "class_vocab": class2idx, + "default_class": 0, + } + + end = time.time() + print("Preprocessed {} data points".format( + len(train_dataset) + len(test_dataset)) + ) + print("Data preprocessing took: {:.3f}s".format(end - start)) + + if cached: + print("Building representation cache...") + + start = time.time() + + train_loader = get_cached_data_loader( + train_dataset, batch_size, discriminator, + shuffle=True, device=device + ) + + test_loader = get_cached_data_loader( + test_dataset, batch_size, discriminator, device=device + ) + + end = time.time() + print("Building representation cache took: {:.3f}s".format(end - start)) + + else: + train_loader = torch.utils.data.DataLoader(dataset=train_dataset, + batch_size=batch_size, + shuffle=True, + collate_fn=collate_fn) + test_loader = torch.utils.data.DataLoader(dataset=test_dataset, + batch_size=batch_size, + collate_fn=collate_fn) + + if save_model: + with open("{}_classifier_head_meta.json".format(dataset), + "w") as meta_file: + json.dump(discriminator_meta, meta_file) + + optimizer = optim.Adam(discriminator.parameters(), lr=0.0001) + + for epoch in range(epochs): + start = time.time() + print("\nEpoch", epoch + 1) + + train_epoch( + discriminator=discriminator, + data_loader=train_loader, + optimizer=optimizer, + epoch=epoch, + log_interval=log_interval, + device=device + ) + evaluate_performance( + data_loader=test_loader, + discriminator=discriminator, + device=device + ) + + end = time.time() + print("Epoch took: {:.3f}s".format(end - start)) + + print("\nExample prediction") + predict(example_sentence, discriminator, idx2class, + cached=cached, device=device) + + if save_model: + # torch.save(discriminator.state_dict(), + # "{}_discriminator_{}.pt".format( + # args.dataset, epoch + 1 + # )) + torch.save(discriminator.get_classifier().state_dict(), + "{}_classifier_head_epoch_{}.pt".format(dataset, + epoch + 1)) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Train a discriminator on top of GPT-2 representations") + parser.add_argument("--dataset", type=str, default="SST", + choices=("SST", "clickbait", "toxic", "generic"), + help="dataset to train the discriminator on." + "In case of generic, the dataset is expected" + "to be a TSBV file with structure: class \\t text") + parser.add_argument("--dataset_fp", type=str, default="", + help="File path of the dataset to use. " + "Needed only in case of generic datadset") + parser.add_argument("--pretrained_model", type=str, default="gpt2-medium", + help="Pretrained model to use as encoder") + parser.add_argument("--epochs", type=int, default=10, metavar="N", + help="Number of training epochs") + parser.add_argument("--batch_size", type=int, default=64, metavar="N", + help="input batch size for training (default: 64)") + parser.add_argument("--log_interval", type=int, default=10, metavar="N", + help="how many batches to wait before logging training status") + parser.add_argument("--save_model", action="store_true", + help="whether to save the model") + parser.add_argument("--cached", action="store_true", + help="whether to cache the input representations") + parser.add_argument("--no_cuda", action="store_true", + help="use to turn off cuda") + args = parser.parse_args() + + train_discriminator(**(vars(args))) diff --git a/examples/run_bertology.py b/examples/run_bertology.py index f37358359dd..d1d05a10735 100644 --- a/examples/run_bertology.py +++ b/examples/run_bertology.py @@ -39,8 +39,9 @@ from transformers import (WEIGHTS_NAME, from run_glue import set_seed, load_and_cache_examples, ALL_MODELS, MODEL_CLASSES -from utils_glue import (compute_metrics, convert_examples_to_features, - output_modes, processors) +from transformers import glue_compute_metrics as compute_metrics +from transformers import glue_output_modes as output_modes +from transformers import glue_processors as processors logger = logging.getLogger(__name__) @@ -233,6 +234,8 @@ def main(): help="If > 0: limit the data to a subset of data_subset instances.") parser.add_argument("--overwrite_output_dir", action='store_true', help="Whether to overwrite data in output directory") + parser.add_argument('--overwrite_cache', action='store_true', + help="Overwrite the cached training and evaluation sets") parser.add_argument("--dont_normalize_importance_by_layer", action='store_true', help="Don't normalize importance score by layers") @@ -304,10 +307,16 @@ def main(): break config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, - num_labels=num_labels, finetuning_task=args.task_name, - output_attentions=True) - tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path) - model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + num_labels=num_labels, + finetuning_task=args.task_name, + output_attentions=True, + cache_dir=args.cache_dir if args.cache_dir else None) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, + cache_dir=args.cache_dir if args.cache_dir else None) + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool('.ckpt' in args.model_name_or_path), + config=config, + cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab diff --git a/examples/run_generation.py b/examples/run_generation.py index ef58cfd844c..2d917660cf7 100644 --- a/examples/run_generation.py +++ b/examples/run_generation.py @@ -79,13 +79,12 @@ def set_seed(args): def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')): """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: - logits: logits distribution shape (vocabulary size) + logits: logits distribution shape (batch size x vocabulary size) top_k > 0: keep only top k tokens with highest probability (top-k filtering). top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ - assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear top_k = min(top_k, logits.size(-1)) # Safety check if top_k > 0: # Remove all tokens with a probability less than the last token of the top-k @@ -102,7 +101,8 @@ def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf') sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 - indices_to_remove = sorted_indices[sorted_indices_to_remove] + # scatter sorted tensors to original indexing + indices_to_remove = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove) logits[indices_to_remove] = filter_value return logits @@ -136,18 +136,19 @@ def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k= inputs["langs"] = torch.tensor([xlm_lang] * inputs["input_ids"].shape[1], device=device).view(1, -1) outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet/CTRL (cached hidden-states) - next_token_logits = outputs[0][0, -1, :] / (temperature if temperature > 0 else 1.) + next_token_logits = outputs[0][:, -1, :] / (temperature if temperature > 0 else 1.) - # reptition penalty from CTRL (https://arxiv.org/abs/1909.05858) - for _ in set(generated.view(-1).tolist()): - next_token_logits[_] /= repetition_penalty + # repetition penalty from CTRL (https://arxiv.org/abs/1909.05858) + for i in range(num_samples): + for _ in set(generated[i].tolist()): + next_token_logits[i, _] /= repetition_penalty filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p) - if temperature == 0: #greedy sampling: - next_token = torch.argmax(filtered_logits).unsqueeze(0) + if temperature == 0: # greedy sampling: + next_token = torch.argmax(filtered_logits, dim=-1).unsqueeze(-1) else: next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1) - generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1) + generated = torch.cat((generated, next_token), dim=1) return generated @@ -161,6 +162,7 @@ def main(): parser.add_argument("--padding_text", type=str, default="") parser.add_argument("--xlm_lang", type=str, default="", help="Optional language when used with the XLM model.") parser.add_argument("--length", type=int, default=20) + parser.add_argument("--num_samples", type=int, default=1) parser.add_argument("--temperature", type=float, default=1.0, help="temperature of 0 implies greedy sampling") parser.add_argument("--repetition_penalty", type=float, default=1.0, @@ -196,7 +198,7 @@ def main(): logger.info(args) if args.model_type in ["ctrl"]: - if args.temperature > 0.7 : + if args.temperature > 0.7: logger.info('CTRL typically works better with lower temperatures (and lower top_k).') while True: @@ -223,10 +225,14 @@ def main(): if args.model_type in ["transfo-xl", "xlnet"]: # Models with memory likes to have a long prompt for short inputs. raw_text = (args.padding_text if args.padding_text else PADDING_TEXT) + raw_text - context_tokens = tokenizer.encode(raw_text) + context_tokens = tokenizer.encode(raw_text, add_special_tokens=False) + if args.model_type == "ctrl": + if not any(context_tokens[0] == x for x in tokenizer.control_codes.values()): + logger.info("WARNING! You are not starting your generation from a control code so you won't get good results") out = sample_sequence( model=model, context=context_tokens, + num_samples=args.num_samples, length=args.length, temperature=args.temperature, top_k=args.top_k, @@ -238,12 +244,13 @@ def main(): xlm_lang=xlm_lang, device=args.device, ) - out = out[0, len(context_tokens):].tolist() + out = out[:, len(context_tokens):].tolist() + for o in out: + text = tokenizer.decode(o, clean_up_tokenization_spaces=True) + text = text[: text.find(args.stop_token) if args.stop_token else None] - text = tokenizer.decode(out, clean_up_tokenization_spaces=True, skip_special_tokens=True) - text = text[: text.find(args.stop_token) if args.stop_token else None] + print(text) - print(text) if args.prompt: break return text diff --git a/examples/run_glue.py b/examples/run_glue.py index 45924c92906..1a51255c110 100644 --- a/examples/run_glue.py +++ b/examples/run_glue.py @@ -22,6 +22,7 @@ import glob import logging import os import random +import json import numpy as np import torch @@ -47,9 +48,13 @@ from transformers import (WEIGHTS_NAME, BertConfig, XLNetTokenizer, DistilBertConfig, DistilBertForSequenceClassification, - DistilBertTokenizer) + DistilBertTokenizer, + AlbertConfig, + AlbertForSequenceClassification, + AlbertTokenizer, + ) -from transformers import AdamW, WarmupLinearSchedule +from transformers import AdamW, get_linear_schedule_with_warmup from transformers import glue_compute_metrics as compute_metrics from transformers import glue_output_modes as output_modes @@ -66,7 +71,8 @@ MODEL_CLASSES = { 'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer), 'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer), 'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer), - 'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer) + 'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer), + 'albert': (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer) } @@ -99,8 +105,9 @@ def train(args, train_dataset, model, tokenizer): {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) - scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp @@ -154,28 +161,39 @@ def train(args, train_dataset, model, tokenizer): if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() - torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() - torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() - if (step + 1) % args.gradient_accumulation_steps == 0 and not args.tpu: + if (step + 1) % args.gradient_accumulation_steps == 0: + if args.fp16: + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: - # Log metrics + logs = {} if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): - tb_writer.add_scalar('eval_{}'.format(key), value, global_step) - tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) - tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) + eval_key = 'eval_{}'.format(key) + logs[eval_key] = value + + loss_scalar = (tr_loss - logging_loss) / args.logging_steps + learning_rate_scalar = scheduler.get_lr()[0] + logs['learning_rate'] = learning_rate_scalar + logs['loss'] = loss_scalar logging_loss = tr_loss + for key, value in logs.items(): + tb_writer.add_scalar(key, value, global_step) + print(json.dumps({**logs, **{'step': global_step}})) + if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) @@ -186,11 +204,6 @@ def train(args, train_dataset, model, tokenizer): torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) - if args.tpu: - args.xla_model.optimizer_step(optimizer, barrier=True) - model.zero_grad() - global_step += 1 - if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break @@ -218,9 +231,13 @@ def evaluate(args, model, tokenizer, prefix=""): args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly - eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + # multi-gpu eval + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) @@ -315,7 +332,7 @@ def load_and_cache_examples(args, task, tokenizer, evaluate=False): all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif output_mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) - + dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) return dataset @@ -359,11 +376,11 @@ def main(): parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.") + help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, - help="Weight deay if we apply some.") + help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, @@ -390,15 +407,6 @@ def main(): parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") - parser.add_argument('--tpu', action='store_true', - help="Whether to run on the TPU defined in the environment variables") - parser.add_argument('--tpu_ip_address', type=str, default='', - help="TPU IP address if none are set in the environment variables") - parser.add_argument('--tpu_name', type=str, default='', - help="TPU name if none are set in the environment variables") - parser.add_argument('--xrt_tpu_config', type=str, default='', - help="XRT TPU config if none are set in the environment variables") - parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', @@ -432,23 +440,6 @@ def main(): args.n_gpu = 1 args.device = device - if args.tpu: - if args.tpu_ip_address: - os.environ["TPU_IP_ADDRESS"] = args.tpu_ip_address - if args.tpu_name: - os.environ["TPU_NAME"] = args.tpu_name - if args.xrt_tpu_config: - os.environ["XRT_TPU_CONFIG"] = args.xrt_tpu_config - - assert "TPU_IP_ADDRESS" in os.environ - assert "TPU_NAME" in os.environ - assert "XRT_TPU_CONFIG" in os.environ - - import torch_xla - import torch_xla.core.xla_model as xm - args.device = xm.xla_device() - args.xla_model = xm - # Setup logging logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', @@ -474,9 +465,17 @@ def main(): args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] - config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) - tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) - model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, + num_labels=num_labels, + finetuning_task=args.task_name, + cache_dir=args.cache_dir if args.cache_dir else None) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, + do_lower_case=args.do_lower_case, + cache_dir=args.cache_dir if args.cache_dir else None) + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool('.ckpt' in args.model_name_or_path), + config=config, + cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab @@ -494,7 +493,7 @@ def main(): # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() - if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0) and not args.tpu: + if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) @@ -511,7 +510,7 @@ def main(): # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) - tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) + tokenizer = tokenizer_class.from_pretrained(args.output_dir) model.to(args.device) diff --git a/examples/run_lm_finetuning.py b/examples/run_lm_finetuning.py index 571bcb43919..c4c73e71afc 100644 --- a/examples/run_lm_finetuning.py +++ b/examples/run_lm_finetuning.py @@ -42,12 +42,13 @@ except: from tqdm import tqdm, trange -from transformers import (WEIGHTS_NAME, AdamW, WarmupLinearSchedule, +from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, BertConfig, BertForMaskedLM, BertTokenizer, GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, - DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) + DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, + CamembertConfig, CamembertForMaskedLM, CamembertTokenizer) logger = logging.getLogger(__name__) @@ -58,17 +59,18 @@ MODEL_CLASSES = { 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), - 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) + 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), + 'camembert': (CamembertConfig, CamembertForMaskedLM, CamembertTokenizer) } class TextDataset(Dataset): - def __init__(self, tokenizer, file_path='train', block_size=512): + def __init__(self, tokenizer, args, file_path='train', block_size=512): assert os.path.isfile(file_path) directory, filename = os.path.split(file_path) - cached_features_file = os.path.join(directory, 'cached_lm_' + str(block_size) + '_' + filename) + cached_features_file = os.path.join(directory, args.model_name_or_path + '_cached_lm_' + str(block_size) + '_' + filename) - if os.path.exists(cached_features_file): + if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) with open(cached_features_file, 'rb') as handle: self.examples = pickle.load(handle) @@ -99,7 +101,7 @@ class TextDataset(Dataset): def load_and_cache_examples(args, tokenizer, evaluate=False): - dataset = TextDataset(tokenizer, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size) + dataset = TextDataset(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size) return dataset @@ -185,7 +187,14 @@ def train(args, train_dataset, model, tokenizer): {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) - scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) + + # Check if saved optimizer or scheduler states exist + if os.path.isfile(os.path.join(args.model_name_or_path, 'optimizer.pt')) and os.path.isfile(os.path.join(args.model_name_or_path, 'scheduler.pt')): + # Load in optimizer and scheduler states + optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'optimizer.pt'))) + scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'scheduler.pt'))) + if args.fp16: try: from apex import amp @@ -214,13 +223,37 @@ def train(args, train_dataset, model, tokenizer): logger.info(" Total optimization steps = %d", t_total) global_step = 0 + epochs_trained = 0 + steps_trained_in_current_epoch = 0 + # Check if continuing training from a checkpoint + if os.path.exists(args.model_name_or_path): + # set global_step to gobal_step of last saved checkpoint from model path + global_step = int(args.model_name_or_path.split('-')[-1].split('/')[0]) + epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) + steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) + + logger.info(" Continuing training from checkpoint, will skip to saved global_step") + logger.info(" Continuing training from epoch %d", epochs_trained) + logger.info(" Continuing training from global step %d", global_step) + logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) + tr_loss, logging_loss = 0.0, 0.0 + + model_to_resize = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training + model_to_resize.resize_token_embeddings(len(tokenizer)) + model.zero_grad() - train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) + train_iterator = trange(epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproducibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): + + # Skip past any already trained steps if resuming training + if steps_trained_in_current_epoch > 0: + steps_trained_in_current_epoch -= 1 + continue + inputs, labels = mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch) inputs = inputs.to(args.device) labels = labels.to(args.device) @@ -268,11 +301,17 @@ def train(args, train_dataset, model, tokenizer): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) + tokenizer.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) _rotate_checkpoints(args, checkpoint_prefix) + torch.save(optimizer.state_dict(), os.path.join(output_dir, 'optimizer.pt')) + torch.save(scheduler.state_dict(), os.path.join(output_dir, 'scheduler.pt')) + logger.info("Saving optimizer and scheduler states to %s", output_dir) + if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break @@ -297,9 +336,13 @@ def evaluate(args, model, tokenizer, prefix=""): args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly - eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + # multi-gpu evaluate + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) @@ -309,10 +352,12 @@ def evaluate(args, model, tokenizer, prefix=""): model.eval() for batch in tqdm(eval_dataloader, desc="Evaluating"): - batch = batch.to(args.device) + inputs, labels = mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch) + inputs = inputs.to(args.device) + labels = labels.to(args.device) with torch.no_grad(): - outputs = model(batch, masked_lm_labels=batch) if args.mlm else model(batch, labels=batch) + outputs = model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels) lm_loss = outputs[0] eval_loss += lm_loss.mean().item() nb_eval_steps += 1 @@ -425,7 +470,7 @@ def main(): parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") args = parser.parse_args() - if args.model_type in ["bert", "roberta", "distilbert"] and not args.mlm: + if args.model_type in ["bert", "roberta", "distilbert", "camembert"] and not args.mlm: raise ValueError("BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm " "flag (masked language modeling).") if args.eval_data_file is None and args.do_eval: @@ -469,12 +514,18 @@ def main(): torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] - config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) - tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, + cache_dir=args.cache_dir if args.cache_dir else None) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, + do_lower_case=args.do_lower_case, + cache_dir=args.cache_dir if args.cache_dir else None) if args.block_size <= 0: args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model args.block_size = min(args.block_size, tokenizer.max_len_single_sentence) - model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool('.ckpt' in args.model_name_or_path), + config=config, + cache_dir=args.cache_dir if args.cache_dir else None) model.to(args.device) if args.local_rank == 0: diff --git a/examples/run_multiple_choice.py b/examples/run_multiple_choice.py index e11264cf578..9d1ca7f3000 100644 --- a/examples/run_multiple_choice.py +++ b/examples/run_multiple_choice.py @@ -43,7 +43,7 @@ from transformers import (WEIGHTS_NAME, BertConfig, XLNetTokenizer, RobertaConfig, RobertaForMultipleChoice, RobertaTokenizer) -from transformers import AdamW, WarmupLinearSchedule +from transformers import AdamW, get_linear_schedule_with_warmup from utils_multiple_choice import (convert_examples_to_features, processors) @@ -101,7 +101,7 @@ def train(args, train_dataset, model, tokenizer): {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) - scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp @@ -226,9 +226,13 @@ def evaluate(args, model, tokenizer, prefix="", test=False): args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly - eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + # multi-gpu evaluate + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) @@ -464,9 +468,17 @@ def main(): args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] - config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) - tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) - model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, + num_labels=num_labels, + finetuning_task=args.task_name, + cache_dir=args.cache_dir if args.cache_dir else None) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, + do_lower_case=args.do_lower_case, + cache_dir=args.cache_dir if args.cache_dir else None) + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool('.ckpt' in args.model_name_or_path), + config=config, + cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab diff --git a/examples/run_ner.py b/examples/run_ner.py index fdf2f1924a2..1ab1236d948 100644 --- a/examples/run_ner.py +++ b/examples/run_ner.py @@ -13,7 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -""" Fine-tuning the library models for named entity recognition on CoNLL-2003 (Bert). """ +""" Fine-tuning the library models for named entity recognition on CoNLL-2003 (Bert or Roberta). """ from __future__ import absolute_import, division, print_function @@ -33,17 +33,23 @@ from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file -from transformers import AdamW, WarmupLinearSchedule +from transformers import AdamW, get_linear_schedule_with_warmup from transformers import WEIGHTS_NAME, BertConfig, BertForTokenClassification, BertTokenizer +from transformers import RobertaConfig, RobertaForTokenClassification, RobertaTokenizer +from transformers import DistilBertConfig, DistilBertForTokenClassification, DistilBertTokenizer +from transformers import CamembertConfig, CamembertForTokenClassification, CamembertTokenizer logger = logging.getLogger(__name__) ALL_MODELS = sum( - (tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, )), + (tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, RobertaConfig, DistilBertConfig)), ()) MODEL_CLASSES = { "bert": (BertConfig, BertForTokenClassification, BertTokenizer), + "roberta": (RobertaConfig, RobertaForTokenClassification, RobertaTokenizer), + "distilbert": (DistilBertConfig, DistilBertForTokenClassification, DistilBertTokenizer), + "camembert": (CamembertConfig, CamembertForTokenClassification, CamembertTokenizer), } @@ -78,7 +84,7 @@ def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id): {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0} ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) - scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp @@ -119,9 +125,10 @@ def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id): batch = tuple(t.to(args.device) for t in batch) inputs = {"input_ids": batch[0], "attention_mask": batch[1], - "token_type_ids": batch[2] if args.model_type in ["bert", "xlnet"] else None, - # XLM and RoBERTa don"t use segment_ids "labels": batch[3]} + if args.model_type != "distilbert": + inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None # XLM and RoBERTa don"t use segment_ids + outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) @@ -133,13 +140,16 @@ def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id): if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() - torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() - torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: + if args.fp16: + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + scheduler.step() # Update learning rate schedule optimizer.step() model.zero_grad() @@ -148,7 +158,7 @@ def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id): if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well - results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id) + results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev") for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) @@ -186,6 +196,10 @@ def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix="" eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + # multi-gpu evaluate + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + # Eval! logger.info("***** Running evaluation %s *****", prefix) logger.info(" Num examples = %d", len(eval_dataset)) @@ -201,12 +215,15 @@ def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix="" with torch.no_grad(): inputs = {"input_ids": batch[0], "attention_mask": batch[1], - "token_type_ids": batch[2] if args.model_type in ["bert", "xlnet"] else None, - # XLM and RoBERTa don"t use segment_ids "labels": batch[3]} + if args.model_type != "distilbert": + inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None # XLM and RoBERTa don"t use segment_ids outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] + if args.n_gpu > 1: + tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating + eval_loss += tmp_eval_loss.item() nb_eval_steps += 1 if preds is None: @@ -420,11 +437,15 @@ def main(): args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, - num_labels=num_labels) + num_labels=num_labels, + cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, - do_lower_case=args.do_lower_case) - model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), - config=config) + do_lower_case=args.do_lower_case, + cache_dir=args.cache_dir if args.cache_dir else None) + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool(".ckpt" in args.model_name_or_path), + config=config, + cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab @@ -508,3 +529,4 @@ def main(): if __name__ == "__main__": main() + diff --git a/examples/run_squad.py b/examples/run_squad.py index 71c656a13d1..117b86e32cd 100644 --- a/examples/run_squad.py +++ b/examples/run_squad.py @@ -16,17 +16,18 @@ """ Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet).""" from __future__ import absolute_import, division, print_function +from transformers.data.processors.squad import SquadV1Processor, SquadV2Processor, SquadResult +from transformers.data.metrics.squad_metrics import compute_predictions_logits, compute_predictions_log_probs, squad_evaluate import argparse import logging import os import random import glob - +import timeit import numpy as np import torch -from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, - TensorDataset) +from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.data.distributed import DistributedSampler try: @@ -42,18 +43,12 @@ from transformers import (WEIGHTS_NAME, BertConfig, XLMTokenizer, XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer, - DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) + DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer, + AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer, + XLMConfig, XLMForQuestionAnswering, XLMTokenizer, + ) -from transformers import AdamW, WarmupLinearSchedule - -from utils_squad import (read_squad_examples, convert_examples_to_features, - RawResult, write_predictions, - RawResultExtended, write_predictions_extended) - -# The follwing import is the official SQuAD evaluation script (2.0). -# You can remove it from the dependencies if you are using this script outside of the library -# We've added it here for automated tests (see examples/test_examples.py file) -from utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad +from transformers import AdamW, get_linear_schedule_with_warmup, squad_convert_examples_to_features logger = logging.getLogger(__name__) @@ -64,7 +59,9 @@ MODEL_CLASSES = { 'bert': (BertConfig, BertForQuestionAnswering, BertTokenizer), 'xlnet': (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer), 'xlm': (XLMConfig, XLMForQuestionAnswering, XLMTokenizer), - 'distilbert': (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) + 'distilbert': (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer), + 'albert': (AlbertConfig, AlbertForQuestionAnswering, AlbertTokenizer), + 'xlm': (XLMConfig, XLMForQuestionAnswering, XLMTokenizer) } def set_seed(args): @@ -97,14 +94,16 @@ def train(args, train_dataset, model, tokenizer): optimizer_grouped_parameters = [ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} - ] + ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) - scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) + if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) @@ -127,25 +126,31 @@ def train(args, train_dataset, model, tokenizer): logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) - global_step = 0 + global_step = 1 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) + for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) - inputs = {'input_ids': batch[0], - 'attention_mask': batch[1], - 'start_positions': batch[3], - 'end_positions': batch[4]} + + inputs = { + 'input_ids': batch[0], + 'attention_mask': batch[1], + 'start_positions': batch[3], + 'end_positions': batch[4] + } + if args.model_type != 'distilbert': inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] + if args.model_type in ['xlnet', 'xlm']: - inputs.update({'cls_index': batch[5], - 'p_mask': batch[6]}) + inputs.update({'cls_index': batch[5], 'p_mask': batch[6]}) + outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) @@ -157,20 +162,23 @@ def train(args, train_dataset, model, tokenizer): if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() - torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() - torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: + if args.fp16: + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 + # Log metrics if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: - # Log metrics if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): @@ -179,8 +187,8 @@ def train(args, train_dataset, model, tokenizer): tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) logging_loss = tr_loss + # Save model checkpoint if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: - # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) @@ -209,124 +217,162 @@ def evaluate(args, model, tokenizer, prefix=""): os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly - eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) + eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + # multi-gpu evaluate + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) + all_results = [] + start_time = timeit.default_timer() + for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) + with torch.no_grad(): - inputs = {'input_ids': batch[0], - 'attention_mask': batch[1] - } + inputs = { + 'input_ids': batch[0], + 'attention_mask': batch[1] + } + if args.model_type != 'distilbert': inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids + example_indices = batch[3] + + # XLNet and XLM use more arguments for their predictions if args.model_type in ['xlnet', 'xlm']: - inputs.update({'cls_index': batch[4], - 'p_mask': batch[5]}) + inputs.update({'cls_index': batch[4], 'p_mask': batch[5]}) + outputs = model(**inputs) for i, example_index in enumerate(example_indices): eval_feature = features[example_index.item()] unique_id = int(eval_feature.unique_id) - if args.model_type in ['xlnet', 'xlm']: - # XLNet uses a more complex post-processing procedure - result = RawResultExtended(unique_id = unique_id, - start_top_log_probs = to_list(outputs[0][i]), - start_top_index = to_list(outputs[1][i]), - end_top_log_probs = to_list(outputs[2][i]), - end_top_index = to_list(outputs[3][i]), - cls_logits = to_list(outputs[4][i])) + + output = [to_list(output[i]) for output in outputs] + + # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler" + # models only use two. + if len(output) >= 5: + start_logits = output[0] + start_top_index = output[1] + end_logits = output[2] + end_top_index = output[3] + cls_logits = output[4] + + result = SquadResult( + unique_id, start_logits, end_logits, + start_top_index=start_top_index, + end_top_index=end_top_index, + cls_logits=cls_logits + ) + else: - result = RawResult(unique_id = unique_id, - start_logits = to_list(outputs[0][i]), - end_logits = to_list(outputs[1][i])) + start_logits, end_logits = output + result = SquadResult( + unique_id, start_logits, end_logits + ) + all_results.append(result) + evalTime = timeit.default_timer() - start_time + logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) + # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) + if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None + # XLNet and XLM use a more complex post-processing procedure if args.model_type in ['xlnet', 'xlm']: - # XLNet uses a more complex post-processing procedure - write_predictions_extended(examples, features, all_results, args.n_best_size, + start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top + end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top + + predictions = compute_predictions_log_probs(examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, - output_nbest_file, output_null_log_odds_file, args.predict_file, - model.config.start_n_top, model.config.end_n_top, + output_nbest_file, output_null_log_odds_file, + start_n_top, end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging) else: - write_predictions(examples, features, all_results, args.n_best_size, + predictions = compute_predictions_logits(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold) - # Evaluate with the official SQuAD script - evaluate_options = EVAL_OPTS(data_file=args.predict_file, - pred_file=output_prediction_file, - na_prob_file=output_null_log_odds_file) - results = evaluate_on_squad(evaluate_options) + # Compute the F1 and exact scores. + results = squad_evaluate(examples, predictions) return results - def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file - input_file = args.predict_file if evaluate else args.train_file - cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format( + input_dir = args.data_dir if args.data_dir else "." + cached_features_file = os.path.join(input_dir, 'cached_{}_{}_{}'.format( 'dev' if evaluate else 'train', list(filter(None, args.model_name_or_path.split('/'))).pop(), - str(args.max_seq_length))) + str(args.max_seq_length)) + ) + + # Init features and dataset from cache if it exists if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: logger.info("Loading features from cached file %s", cached_features_file) - features = torch.load(cached_features_file) + features_and_dataset = torch.load(cached_features_file) + features, dataset = features_and_dataset["features"], features_and_dataset["dataset"] else: - logger.info("Creating features from dataset file at %s", input_file) - examples = read_squad_examples(input_file=input_file, - is_training=not evaluate, - version_2_with_negative=args.version_2_with_negative) - features = convert_examples_to_features(examples=examples, - tokenizer=tokenizer, - max_seq_length=args.max_seq_length, - doc_stride=args.doc_stride, - max_query_length=args.max_query_length, - is_training=not evaluate) + logger.info("Creating features from dataset file at %s", input_dir) + + if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)): + try: + import tensorflow_datasets as tfds + except ImportError: + raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.") + + if args.version_2_with_negative: + logger.warn("tensorflow_datasets does not handle version 2 of SQuAD.") + + tfds_examples = tfds.load("squad") + examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) + else: + processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() + + if evaluate: + examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file) + else: + examples = processor.get_train_examples(args.data_dir, filename=args.train_file) + + features, dataset = squad_convert_examples_to_features( + examples=examples, + tokenizer=tokenizer, + max_seq_length=args.max_seq_length, + doc_stride=args.doc_stride, + max_query_length=args.max_query_length, + is_training=not evaluate, + return_dataset='pt' + ) + if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) - torch.save(features, cached_features_file) + torch.save({"features": features, "dataset": dataset}, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache - # Convert to Tensors and build dataset - all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) - all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) - all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) - all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) - all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) - if evaluate: - all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) - dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, - all_example_index, all_cls_index, all_p_mask) - else: - all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) - all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) - dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, - all_start_positions, all_end_positions, - all_cls_index, all_p_mask) - if output_examples: return dataset, examples, features return dataset @@ -336,10 +382,6 @@ def main(): parser = argparse.ArgumentParser() ## Required parameters - parser.add_argument("--train_file", default=None, type=str, required=True, - help="SQuAD json for training. E.g., train-v1.1.json") - parser.add_argument("--predict_file", default=None, type=str, required=True, - help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument("--model_name_or_path", default=None, type=str, required=True, @@ -348,6 +390,15 @@ def main(): help="The output directory where the model checkpoints and predictions will be written.") ## Other parameters + parser.add_argument("--data_dir", default=None, type=str, + help="The input data dir. Should contain the .json files for the task." + + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.") + parser.add_argument("--train_file", default=None, type=str, + help="The input training file. If a data dir is specified, will look for the file there" + + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.") + parser.add_argument("--predict_file", default=None, type=str, + help="The input evaluation file. If a data dir is specified, will look for the file there" + + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.") parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, @@ -386,7 +437,7 @@ def main(): parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--weight_decay", default=0.0, type=float, - help="Weight deay if we apply some.") + help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, @@ -470,9 +521,15 @@ def main(): args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] - config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) - tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) - model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, + cache_dir=args.cache_dir if args.cache_dir else None) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, + do_lower_case=args.do_lower_case, + cache_dir=args.cache_dir if args.cache_dir else None) + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool('.ckpt' in args.model_name_or_path), + config=config, + cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab @@ -515,7 +572,7 @@ def main(): torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Load a trained model and vocabulary that you have fine-tuned - model = model_class.from_pretrained(args.output_dir) + model = model_class.from_pretrained(args.output_dir, force_download=True) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) @@ -533,7 +590,7 @@ def main(): for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" - model = model_class.from_pretrained(checkpoint) + model = model_class.from_pretrained(checkpoint, force_download=True) model.to(args.device) # Evaluate diff --git a/examples/run_tf_glue.py b/examples/run_tf_glue.py index 399fe9e6169..54282277d22 100644 --- a/examples/run_tf_glue.py +++ b/examples/run_tf_glue.py @@ -1,29 +1,47 @@ import os import tensorflow as tf import tensorflow_datasets -from transformers import BertTokenizer, TFBertForSequenceClassification, glue_convert_examples_to_features, BertForSequenceClassification +from transformers import BertTokenizer, TFBertForSequenceClassification, BertConfig, glue_convert_examples_to_features, BertForSequenceClassification, glue_processors # script parameters BATCH_SIZE = 32 EVAL_BATCH_SIZE = BATCH_SIZE * 2 USE_XLA = False USE_AMP = False +EPOCHS = 3 + +TASK = "mrpc" + +if TASK == "sst-2": + TFDS_TASK = "sst2" +elif TASK == "sts-b": + TFDS_TASK = "stsb" +else: + TFDS_TASK = TASK + +num_labels = len(glue_processors[TASK]().get_labels()) +print(num_labels) tf.config.optimizer.set_jit(USE_XLA) tf.config.optimizer.set_experimental_options({"auto_mixed_precision": USE_AMP}) -# Load tokenizer and model from pretrained model/vocabulary +# Load tokenizer and model from pretrained model/vocabulary. Specify the number of labels to classify (2+: classification, 1: regression) +config = BertConfig.from_pretrained("bert-base-cased", num_labels=num_labels) tokenizer = BertTokenizer.from_pretrained('bert-base-cased') -model = TFBertForSequenceClassification.from_pretrained('bert-base-cased') +model = TFBertForSequenceClassification.from_pretrained('bert-base-cased', config=config) # Load dataset via TensorFlow Datasets -data, info = tensorflow_datasets.load('glue/mrpc', with_info=True) +data, info = tensorflow_datasets.load(f'glue/{TFDS_TASK}', with_info=True) train_examples = info.splits['train'].num_examples + +# MNLI expects either validation_matched or validation_mismatched valid_examples = info.splits['validation'].num_examples # Prepare dataset for GLUE as a tf.data.Dataset instance -train_dataset = glue_convert_examples_to_features(data['train'], tokenizer, 128, 'mrpc') -valid_dataset = glue_convert_examples_to_features(data['validation'], tokenizer, 128, 'mrpc') +train_dataset = glue_convert_examples_to_features(data['train'], tokenizer, 128, TASK) + +# MNLI expects either validation_matched or validation_mismatched +valid_dataset = glue_convert_examples_to_features(data['validation'], tokenizer, 128, TASK) train_dataset = train_dataset.shuffle(128).batch(BATCH_SIZE).repeat(-1) valid_dataset = valid_dataset.batch(EVAL_BATCH_SIZE) @@ -32,7 +50,13 @@ opt = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08) if USE_AMP: # loss scaling is currently required when using mixed precision opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(opt, 'dynamic') -loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) + + +if num_labels == 1: + loss = tf.keras.losses.MeanSquaredError() +else: + loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) + metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy') model.compile(optimizer=opt, loss=loss, metrics=[metric]) @@ -40,24 +64,30 @@ model.compile(optimizer=opt, loss=loss, metrics=[metric]) train_steps = train_examples//BATCH_SIZE valid_steps = valid_examples//EVAL_BATCH_SIZE -history = model.fit(train_dataset, epochs=2, steps_per_epoch=train_steps, +history = model.fit(train_dataset, epochs=EPOCHS, steps_per_epoch=train_steps, validation_data=valid_dataset, validation_steps=valid_steps) # Save TF2 model os.makedirs('./save/', exist_ok=True) model.save_pretrained('./save/') -# Load the TensorFlow model in PyTorch for inspection -pytorch_model = BertForSequenceClassification.from_pretrained('./save/', from_tf=True) +if TASK == "mrpc": + # Load the TensorFlow model in PyTorch for inspection + # This is to demo the interoperability between the two frameworks, you don't have to + # do this in real life (you can run the inference on the TF model). + pytorch_model = BertForSequenceClassification.from_pretrained('./save/', from_tf=True) -# Quickly test a few predictions - MRPC is a paraphrasing task, let's see if our model learned the task -sentence_0 = 'This research was consistent with his findings.' -sentence_1 = 'His findings were compatible with this research.' -sentence_2 = 'His findings were not compatible with this research.' -inputs_1 = tokenizer.encode_plus(sentence_0, sentence_1, add_special_tokens=True, return_tensors='pt') -inputs_2 = tokenizer.encode_plus(sentence_0, sentence_2, add_special_tokens=True, return_tensors='pt') + # Quickly test a few predictions - MRPC is a paraphrasing task, let's see if our model learned the task + sentence_0 = 'This research was consistent with his findings.' + sentence_1 = 'His findings were compatible with this research.' + sentence_2 = 'His findings were not compatible with this research.' + inputs_1 = tokenizer.encode_plus(sentence_0, sentence_1, add_special_tokens=True, return_tensors='pt') + inputs_2 = tokenizer.encode_plus(sentence_0, sentence_2, add_special_tokens=True, return_tensors='pt') -pred_1 = pytorch_model(**inputs_1)[0].argmax().item() -pred_2 = pytorch_model(**inputs_2)[0].argmax().item() -print('sentence_1 is', 'a paraphrase' if pred_1 else 'not a paraphrase', 'of sentence_0') -print('sentence_2 is', 'a paraphrase' if pred_2 else 'not a paraphrase', 'of sentence_0') + del inputs_1["special_tokens_mask"] + del inputs_2["special_tokens_mask"] + + pred_1 = pytorch_model(**inputs_1)[0].argmax().item() + pred_2 = pytorch_model(**inputs_2)[0].argmax().item() + print('sentence_1 is', 'a paraphrase' if pred_1 else 'not a paraphrase', 'of sentence_0') + print('sentence_2 is', 'a paraphrase' if pred_2 else 'not a paraphrase', 'of sentence_0') diff --git a/examples/run_tf_ner.py b/examples/run_tf_ner.py new file mode 100644 index 00000000000..eb284f4c2a7 --- /dev/null +++ b/examples/run_tf_ner.py @@ -0,0 +1,615 @@ +# coding=utf-8 +import datetime +import os +import math +import glob +import re +import tensorflow as tf +import collections +import numpy as np +from seqeval import metrics +import _pickle as pickle +from absl import logging +from transformers import TF2_WEIGHTS_NAME, BertConfig, BertTokenizer, TFBertForTokenClassification +from transformers import RobertaConfig, RobertaTokenizer, TFRobertaForTokenClassification +from transformers import DistilBertConfig, DistilBertTokenizer, TFDistilBertForTokenClassification +from transformers import create_optimizer, GradientAccumulator +from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file +from fastprogress import master_bar, progress_bar +from absl import flags +from absl import app + + +ALL_MODELS = sum( + (tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, RobertaConfig, DistilBertConfig)), + ()) + +MODEL_CLASSES = { + "bert": (BertConfig, TFBertForTokenClassification, BertTokenizer), + "roberta": (RobertaConfig, TFRobertaForTokenClassification, RobertaTokenizer), + "distilbert": (DistilBertConfig, TFDistilBertForTokenClassification, DistilBertTokenizer) +} + + +flags.DEFINE_string( + "data_dir", None, + "The input data dir. Should contain the .conll files (or other data files) " + "for the task.") + +flags.DEFINE_string( + "model_type", None, + "Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) + +flags.DEFINE_string( + "model_name_or_path", None, + "Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) + +flags.DEFINE_string( + "output_dir", None, + "The output directory where the model checkpoints will be written.") + +flags.DEFINE_string( + "labels", "", + "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.") + +flags.DEFINE_string( + "config_name", "", + "Pretrained config name or path if not the same as model_name") + +flags.DEFINE_string( + "tokenizer_name", "", + "Pretrained tokenizer name or path if not the same as model_name") + +flags.DEFINE_string( + "cache_dir", "", + "Where do you want to store the pre-trained models downloaded from s3") + +flags.DEFINE_integer( + "max_seq_length", 128, + "The maximum total input sentence length after tokenization. " + "Sequences longer than this will be truncated, sequences shorter " + "will be padded.") + +flags.DEFINE_string( + "tpu", None, + "The Cloud TPU to use for training. This should be either the name " + "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " + "url.") + +flags.DEFINE_integer( + "num_tpu_cores", 8, + "Total number of TPU cores to use.") + +flags.DEFINE_boolean( + "do_train", False, + "Whether to run training.") + +flags.DEFINE_boolean( + "do_eval", False, + "Whether to run eval on the dev set.") + +flags.DEFINE_boolean( + "do_predict", False, + "Whether to run predictions on the test set.") + +flags.DEFINE_boolean( + "evaluate_during_training", False, + "Whether to run evaluation during training at each logging step.") + +flags.DEFINE_boolean( + "do_lower_case", False, + "Set this flag if you are using an uncased model.") + +flags.DEFINE_integer( + "per_device_train_batch_size", 8, + "Batch size per GPU/CPU/TPU for training.") + +flags.DEFINE_integer( + "per_device_eval_batch_size", 8, + "Batch size per GPU/CPU/TPU for evaluation.") + +flags.DEFINE_integer( + "gradient_accumulation_steps", 1, + "Number of updates steps to accumulate before performing a backward/update pass.") + +flags.DEFINE_float( + "learning_rate", 5e-5, + "The initial learning rate for Adam.") + +flags.DEFINE_float( + "weight_decay", 0.0, + "Weight decay if we apply some.") + +flags.DEFINE_float( + "adam_epsilon", 1e-8, + "Epsilon for Adam optimizer.") + +flags.DEFINE_float( + "max_grad_norm", 1.0, + "Max gradient norm.") + +flags.DEFINE_integer( + "num_train_epochs", 3, + "Total number of training epochs to perform.") + +flags.DEFINE_integer( + "max_steps", -1, + "If > 0: set total number of training steps to perform. Override num_train_epochs.") + +flags.DEFINE_integer( + "warmup_steps", 0, + "Linear warmup over warmup_steps.") + +flags.DEFINE_integer( + "logging_steps", 50, + "Log every X updates steps.") + +flags.DEFINE_integer( + "save_steps", 50, + "Save checkpoint every X updates steps.") + +flags.DEFINE_boolean( + "eval_all_checkpoints", False, + "Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") + +flags.DEFINE_boolean( + "no_cuda", False, + "Avoid using CUDA when available") + +flags.DEFINE_boolean( + "overwrite_output_dir", False, + "Overwrite the content of the output directory") + +flags.DEFINE_boolean( + "overwrite_cache", False, + "Overwrite the cached training and evaluation sets") + +flags.DEFINE_integer( + "seed", 42, + "random seed for initialization") + +flags.DEFINE_boolean( + "fp16", False, + "Whether to use 16-bit (mixed) precision instead of 32-bit") + +flags.DEFINE_string( + "gpus", "0", + "Comma separated list of gpus devices. If only one, switch to single " + "gpu strategy, if None takes all the gpus available.") + + +def train(args, strategy, train_dataset, tokenizer, model, num_train_examples, labels, train_batch_size, pad_token_label_id): + if args['max_steps'] > 0: + num_train_steps = args['max_steps'] * args['gradient_accumulation_steps'] + args['num_train_epochs'] = 1 + else: + num_train_steps = math.ceil(num_train_examples / train_batch_size) // args['gradient_accumulation_steps'] * args['num_train_epochs'] + + writer = tf.summary.create_file_writer("/tmp/mylogs") + + with strategy.scope(): + loss_fct = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE) + optimizer = create_optimizer(args['learning_rate'], num_train_steps, args['warmup_steps']) + + if args['fp16']: + optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(optimizer, 'dynamic') + + loss_metric = tf.keras.metrics.Mean(name='loss', dtype=tf.float32) + gradient_accumulator = GradientAccumulator() + + logging.info("***** Running training *****") + logging.info(" Num examples = %d", num_train_examples) + logging.info(" Num Epochs = %d", args['num_train_epochs']) + logging.info(" Instantaneous batch size per device = %d", args['per_device_train_batch_size']) + logging.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", + train_batch_size * args['gradient_accumulation_steps']) + logging.info(" Gradient Accumulation steps = %d", args['gradient_accumulation_steps']) + logging.info(" Total training steps = %d", num_train_steps) + + model.summary() + + @tf.function + def apply_gradients(): + grads_and_vars = [] + + for gradient, variable in zip(gradient_accumulator.gradients, model.trainable_variables): + if gradient is not None: + scaled_gradient = gradient / (args['n_device'] * args['gradient_accumulation_steps']) + grads_and_vars.append((scaled_gradient, variable)) + else: + grads_and_vars.append((gradient, variable)) + + optimizer.apply_gradients(grads_and_vars, args['max_grad_norm']) + gradient_accumulator.reset() + + @tf.function + def train_step(train_features, train_labels): + def step_fn(train_features, train_labels): + inputs = {'attention_mask': train_features['input_mask'], 'training': True} + + if args['model_type'] != "distilbert": + inputs["token_type_ids"] = train_features['segment_ids'] if args['model_type'] in ["bert", "xlnet"] else None + + with tf.GradientTape() as tape: + logits = model(train_features['input_ids'], **inputs)[0] + logits = tf.reshape(logits, (-1, len(labels) + 1)) + active_loss = tf.reshape(train_features['input_mask'], (-1,)) + active_logits = tf.boolean_mask(logits, active_loss) + train_labels = tf.reshape(train_labels, (-1,)) + active_labels = tf.boolean_mask(train_labels, active_loss) + cross_entropy = loss_fct(active_labels, active_logits) + loss = tf.reduce_sum(cross_entropy) * (1.0 / train_batch_size) + grads = tape.gradient(loss, model.trainable_variables) + + gradient_accumulator(grads) + + return cross_entropy + + per_example_losses = strategy.experimental_run_v2(step_fn, args=(train_features, train_labels)) + mean_loss = strategy.reduce(tf.distribute.ReduceOp.MEAN, per_example_losses, axis=0) + + return mean_loss + + current_time = datetime.datetime.now() + train_iterator = master_bar(range(args['num_train_epochs'])) + global_step = 0 + logging_loss = 0.0 + + for epoch in train_iterator: + epoch_iterator = progress_bar(train_dataset, total=num_train_steps, parent=train_iterator, display=args['n_device'] > 1) + step = 1 + + with strategy.scope(): + for train_features, train_labels in epoch_iterator: + loss = train_step(train_features, train_labels) + + if step % args['gradient_accumulation_steps'] == 0: + strategy.experimental_run_v2(apply_gradients) + + loss_metric(loss) + + global_step += 1 + + if args['logging_steps'] > 0 and global_step % args['logging_steps'] == 0: + # Log metrics + if args['n_device'] == 1 and args['evaluate_during_training']: # Only evaluate when single GPU otherwise metrics may not average well + y_true, y_pred, eval_loss = evaluate(args, strategy, model, tokenizer, labels, pad_token_label_id, mode="dev") + report = metrics.classification_report(y_true, y_pred, digits=4) + + logging.info("Eval at step " + str(global_step) + "\n" + report) + logging.info("eval_loss: " + str(eval_loss)) + + precision = metrics.precision_score(y_true, y_pred) + recall = metrics.recall_score(y_true, y_pred) + f1 = metrics.f1_score(y_true, y_pred) + + with writer.as_default(): + tf.summary.scalar("eval_loss", eval_loss, global_step) + tf.summary.scalar("precision", precision, global_step) + tf.summary.scalar("recall", recall, global_step) + tf.summary.scalar("f1", f1, global_step) + + lr = optimizer.learning_rate + learning_rate = lr(step) + + with writer.as_default(): + tf.summary.scalar("lr", learning_rate, global_step) + tf.summary.scalar("loss", (loss_metric.result() - logging_loss) / args['logging_steps'], global_step) + + logging_loss = loss_metric.result() + + with writer.as_default(): + tf.summary.scalar("loss", loss_metric.result(), step=step) + + if args['save_steps'] > 0 and global_step % args['save_steps'] == 0: + # Save model checkpoint + output_dir = os.path.join(args['output_dir'], "checkpoint-{}".format(global_step)) + + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + model.save_pretrained(output_dir) + logging.info("Saving model checkpoint to %s", output_dir) + + train_iterator.child.comment = f'loss : {loss_metric.result()}' + step += 1 + + train_iterator.write(f'loss epoch {epoch + 1}: {loss_metric.result()}') + + loss_metric.reset_states() + + logging.info(" Training took time = {}".format(datetime.datetime.now() - current_time)) + + +def evaluate(args, strategy, model, tokenizer, labels, pad_token_label_id, mode): + eval_batch_size = args['per_device_eval_batch_size'] * args['n_device'] + eval_dataset, size = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, eval_batch_size, mode=mode) + eval_dataset = strategy.experimental_distribute_dataset(eval_dataset) + preds = None + num_eval_steps = math.ceil(size / eval_batch_size) + master = master_bar(range(1)) + eval_iterator = progress_bar(eval_dataset, total=num_eval_steps, parent=master, display=args['n_device'] > 1) + loss_fct = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE) + loss = 0.0 + + logging.info("***** Running evaluation *****") + logging.info(" Num examples = %d", size) + logging.info(" Batch size = %d", eval_batch_size) + + for eval_features, eval_labels in eval_iterator: + inputs = {'attention_mask': eval_features['input_mask'], 'training': False} + + if args['model_type'] != "distilbert": + inputs["token_type_ids"] = eval_features['segment_ids'] if args['model_type'] in ["bert", "xlnet"] else None + + with strategy.scope(): + logits = model(eval_features['input_ids'], **inputs)[0] + tmp_logits = tf.reshape(logits, (-1, len(labels) + 1)) + active_loss = tf.reshape(eval_features['input_mask'], (-1,)) + active_logits = tf.boolean_mask(tmp_logits, active_loss) + tmp_eval_labels = tf.reshape(eval_labels, (-1,)) + active_labels = tf.boolean_mask(tmp_eval_labels, active_loss) + cross_entropy = loss_fct(active_labels, active_logits) + loss += tf.reduce_sum(cross_entropy) * (1.0 / eval_batch_size) + + if preds is None: + preds = logits.numpy() + label_ids = eval_labels.numpy() + else: + preds = np.append(preds, logits.numpy(), axis=0) + label_ids = np.append(label_ids, eval_labels.numpy(), axis=0) + + preds = np.argmax(preds, axis=2) + y_pred = [[] for _ in range(label_ids.shape[0])] + y_true = [[] for _ in range(label_ids.shape[0])] + loss = loss / num_eval_steps + + for i in range(label_ids.shape[0]): + for j in range(label_ids.shape[1]): + if label_ids[i, j] != pad_token_label_id: + y_pred[i].append(labels[preds[i, j] - 1]) + y_true[i].append(labels[label_ids[i, j] - 1]) + + return y_true, y_pred, loss.numpy() + + +def load_cache(cached_file, max_seq_length): + name_to_features = { + "input_ids": tf.io.FixedLenFeature([max_seq_length], tf.int64), + "input_mask": tf.io.FixedLenFeature([max_seq_length], tf.int64), + "segment_ids": tf.io.FixedLenFeature([max_seq_length], tf.int64), + "label_ids": tf.io.FixedLenFeature([max_seq_length], tf.int64), + } + + def _decode_record(record): + example = tf.io.parse_single_example(record, name_to_features) + features = {} + features['input_ids'] = example['input_ids'] + features['input_mask'] = example['input_mask'] + features['segment_ids'] = example['segment_ids'] + + return features, example['label_ids'] + + d = tf.data.TFRecordDataset(cached_file) + d = d.map(_decode_record, num_parallel_calls=4) + count = d.reduce(0, lambda x, _: x + 1) + + return d, count.numpy() + + +def save_cache(features, cached_features_file): + writer = tf.io.TFRecordWriter(cached_features_file) + + for (ex_index, feature) in enumerate(features): + if ex_index % 5000 == 0: + logging.info("Writing example %d of %d" % (ex_index, len(features))) + + def create_int_feature(values): + f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) + return f + + record_feature = collections.OrderedDict() + record_feature["input_ids"] = create_int_feature(feature.input_ids) + record_feature["input_mask"] = create_int_feature(feature.input_mask) + record_feature["segment_ids"] = create_int_feature(feature.segment_ids) + record_feature["label_ids"] = create_int_feature(feature.label_ids) + + tf_example = tf.train.Example(features=tf.train.Features(feature=record_feature)) + + writer.write(tf_example.SerializeToString()) + + writer.close() + + +def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, batch_size, mode): + drop_remainder = True if args['tpu'] or mode == 'train' else False + + # Load data features from cache or dataset file + cached_features_file = os.path.join(args['data_dir'], "cached_{}_{}_{}.tf_record".format(mode, + list(filter(None, args['model_name_or_path'].split("/"))).pop(), + str(args['max_seq_length']))) + if os.path.exists(cached_features_file) and not args['overwrite_cache']: + logging.info("Loading features from cached file %s", cached_features_file) + dataset, size = load_cache(cached_features_file, args['max_seq_length']) + else: + logging.info("Creating features from dataset file at %s", args['data_dir']) + examples = read_examples_from_file(args['data_dir'], mode) + features = convert_examples_to_features(examples, labels, args['max_seq_length'], tokenizer, + cls_token_at_end=bool(args['model_type'] in ["xlnet"]), + # xlnet has a cls token at the end + cls_token=tokenizer.cls_token, + cls_token_segment_id=2 if args['model_type'] in ["xlnet"] else 0, + sep_token=tokenizer.sep_token, + sep_token_extra=bool(args['model_type'] in ["roberta"]), + # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805 + pad_on_left=bool(args['model_type'] in ["xlnet"]), + # pad on the left for xlnet + pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], + pad_token_segment_id=4 if args['model_type'] in ["xlnet"] else 0, + pad_token_label_id=pad_token_label_id + ) + logging.info("Saving features into cached file %s", cached_features_file) + save_cache(features, cached_features_file) + dataset, size = load_cache(cached_features_file, args['max_seq_length']) + + if mode == 'train': + dataset = dataset.repeat() + dataset = dataset.shuffle(buffer_size=8192, seed=args['seed']) + + dataset = dataset.batch(batch_size, drop_remainder) + dataset = dataset.prefetch(buffer_size=batch_size) + + return dataset, size + + +def main(_): + logging.set_verbosity(logging.INFO) + args = flags.FLAGS.flag_values_dict() + + if os.path.exists(args['output_dir']) and os.listdir( + args['output_dir']) and args['do_train'] and not args['overwrite_output_dir']: + raise ValueError( + "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( + args['output_dir'])) + + if args['fp16']: + tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True}) + + if args['tpu']: + resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=args['tpu']) + tf.config.experimental_connect_to_cluster(resolver) + tf.tpu.experimental.initialize_tpu_system(resolver) + strategy = tf.distribute.experimental.TPUStrategy(resolver) + args['n_device'] = args['num_tpu_cores'] + elif len(args['gpus'].split(',')) > 1: + args['n_device'] = len([f"/gpu:{gpu}" for gpu in args['gpus'].split(',')]) + strategy = tf.distribute.MirroredStrategy(devices=[f"/gpu:{gpu}" for gpu in args['gpus'].split(',')]) + elif args['no_cuda']: + args['n_device'] = 1 + strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") + else: + args['n_device'] = len(args['gpus'].split(',')) + strategy = tf.distribute.OneDeviceStrategy(device="/gpu:" + args['gpus'].split(',')[0]) + + logging.warning("n_device: %s, distributed training: %s, 16-bits training: %s", + args['n_device'], bool(args['n_device'] > 1), args['fp16']) + + labels = get_labels(args['labels']) + num_labels = len(labels) + 1 + pad_token_label_id = 0 + config_class, model_class, tokenizer_class = MODEL_CLASSES[args['model_type']] + config = config_class.from_pretrained(args['config_name'] if args['config_name'] else args['model_name_or_path'], + num_labels=num_labels, + cache_dir=args['cache_dir'] if args['cache_dir'] else None) + + logging.info("Training/evaluation parameters %s", args) + + # Training + if args['do_train']: + tokenizer = tokenizer_class.from_pretrained(args['tokenizer_name'] if args['tokenizer_name'] else args['model_name_or_path'], + do_lower_case=args['do_lower_case'], + cache_dir=args['cache_dir'] if args['cache_dir'] else None) + + with strategy.scope(): + model = model_class.from_pretrained(args['model_name_or_path'], + from_pt=bool(".bin" in args['model_name_or_path']), + config=config, + cache_dir=args['cache_dir'] if args['cache_dir'] else None) + model.layers[-1].activation = tf.keras.activations.softmax + + train_batch_size = args['per_device_train_batch_size'] * args['n_device'] + train_dataset, num_train_examples = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, train_batch_size, mode="train") + train_dataset = strategy.experimental_distribute_dataset(train_dataset) + train(args, strategy, train_dataset, tokenizer, model, num_train_examples, labels, train_batch_size, pad_token_label_id) + + if not os.path.exists(args['output_dir']): + os.makedirs(args['output_dir']) + + logging.info("Saving model to %s", args['output_dir']) + + model.save_pretrained(args['output_dir']) + tokenizer.save_pretrained(args['output_dir']) + + # Evaluation + if args['do_eval']: + tokenizer = tokenizer_class.from_pretrained(args['output_dir'], do_lower_case=args['do_lower_case']) + checkpoints = [] + results = [] + + if args['eval_all_checkpoints']: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args['output_dir'] + "/**/" + TF2_WEIGHTS_NAME, recursive=True), key=lambda f: int(''.join(filter(str.isdigit, f)) or -1))) + + logging.info("Evaluate the following checkpoints: %s", checkpoints) + + if len(checkpoints) == 0: + checkpoints.append(args['output_dir']) + + for checkpoint in checkpoints: + global_step = checkpoint.split("-")[-1] if re.match(".*checkpoint-[0-9]", checkpoint) else "final" + + with strategy.scope(): + model = model_class.from_pretrained(checkpoint) + + y_true, y_pred, eval_loss = evaluate(args, strategy, model, tokenizer, labels, pad_token_label_id, mode="dev") + report = metrics.classification_report(y_true, y_pred, digits=4) + + if global_step: + results.append({global_step + "_report": report, global_step + "_loss": eval_loss}) + + output_eval_file = os.path.join(args['output_dir'], "eval_results.txt") + + with tf.io.gfile.GFile(output_eval_file, "w") as writer: + for res in results: + for key, val in res.items(): + if "loss" in key: + logging.info(key + " = " + str(val)) + writer.write(key + " = " + str(val)) + writer.write("\n") + else: + logging.info(key) + logging.info("\n" + report) + writer.write(key + "\n") + writer.write(report) + writer.write("\n") + + if args['do_predict']: + tokenizer = tokenizer_class.from_pretrained(args['output_dir'], do_lower_case=args['do_lower_case']) + model = model_class.from_pretrained(args['output_dir']) + eval_batch_size = args['per_device_eval_batch_size'] * args['n_device'] + predict_dataset, _ = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, eval_batch_size, mode="test") + y_true, y_pred, pred_loss = evaluate(args, strategy, model, tokenizer, labels, pad_token_label_id, mode="test") + output_test_results_file = os.path.join(args['output_dir'], "test_results.txt") + output_test_predictions_file = os.path.join(args['output_dir'], "test_predictions.txt") + report = metrics.classification_report(y_true, y_pred, digits=4) + + with tf.io.gfile.GFile(output_test_results_file, "w") as writer: + report = metrics.classification_report(y_true, y_pred, digits=4) + + logging.info("\n" + report) + + writer.write(report) + writer.write("\n\nloss = " + str(pred_loss)) + + with tf.io.gfile.GFile(output_test_predictions_file, "w") as writer: + with tf.io.gfile.GFile(os.path.join(args['data_dir'], "test.txt"), "r") as f: + example_id = 0 + + for line in f: + if line.startswith("-DOCSTART-") or line == "" or line == "\n": + writer.write(line) + + if not y_pred[example_id]: + example_id += 1 + elif y_pred[example_id]: + output_line = line.split()[0] + " " + y_pred[example_id].pop(0) + "\n" + writer.write(output_line) + else: + logging.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0]) + + +if __name__ == "__main__": + flags.mark_flag_as_required("data_dir") + flags.mark_flag_as_required("output_dir") + flags.mark_flag_as_required("model_name_or_path") + flags.mark_flag_as_required("model_type") + app.run(main) diff --git a/examples/run_xnli.py b/examples/run_xnli.py new file mode 100644 index 00000000000..42d134a43a0 --- /dev/null +++ b/examples/run_xnli.py @@ -0,0 +1,515 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Finetuning multi-lingual models on XNLI (Bert, DistilBERT, XLM). + Adapted from `examples/run_glue.py`""" + +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import random + +import numpy as np +import torch +from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, + TensorDataset) +from torch.utils.data.distributed import DistributedSampler + +try: + from torch.utils.tensorboard import SummaryWriter +except: + from tensorboardX import SummaryWriter + +from tqdm import tqdm, trange + +from transformers import (WEIGHTS_NAME, + BertConfig, BertForSequenceClassification, BertTokenizer, + XLMConfig, XLMForSequenceClassification, XLMTokenizer, + DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer) + +from transformers import AdamW, get_linear_schedule_with_warmup + +from transformers import xnli_compute_metrics as compute_metrics +from transformers import xnli_output_modes as output_modes +from transformers import xnli_processors as processors + +from transformers import glue_convert_examples_to_features as convert_examples_to_features + +logger = logging.getLogger(__name__) + +ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, DistilBertConfig, XLMConfig)), ()) + +MODEL_CLASSES = { + 'bert': (BertConfig, BertForSequenceClassification, BertTokenizer), + 'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer), + 'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer) +} + + +def set_seed(args): + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if args.n_gpu > 0: + torch.cuda.manual_seed_all(args.seed) + + +def train(args, train_dataset, model, tokenizer): + """ Train the model """ + if args.local_rank in [-1, 0]: + tb_writer = SummaryWriter() + + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) + train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) + + if args.max_steps > 0: + t_total = args.max_steps + args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 + else: + t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs + + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + output_device=args.local_rank, + find_unused_parameters=True) + + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + global_step = 0 + tr_loss, logging_loss = 0.0, 0.0 + model.zero_grad() + train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) + set_seed(args) # Added here for reproductibility (even between python 2 and 3) + for _ in train_iterator: + epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) + for step, batch in enumerate(epoch_iterator): + model.train() + batch = tuple(t.to(args.device) for t in batch) + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'labels': batch[3]} + if args.model_type != 'distilbert': + inputs['token_type_ids'] = batch[2] if args.model_type in ['bert'] else None # XLM and DistilBERT don't use segment_ids + outputs = model(**inputs) + loss = outputs[0] # model outputs are always tuple in transformers (see doc) + + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + + tr_loss += loss.item() + if (step + 1) % args.gradient_accumulation_steps == 0: + if args.fp16: + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + + optimizer.step() + scheduler.step() # Update learning rate schedule + model.zero_grad() + global_step += 1 + + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: + # Log metrics + if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + results = evaluate(args, model, tokenizer) + for key, value in results.items(): + tb_writer.add_scalar('eval_{}'.format(key), value, global_step) + tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) + tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) + logging_loss = tr_loss + + if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: + # Save model checkpoint + output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training + model_to_save.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) + logger.info("Saving model checkpoint to %s", output_dir) + + if args.max_steps > 0 and global_step > args.max_steps: + epoch_iterator.close() + break + if args.max_steps > 0 and global_step > args.max_steps: + train_iterator.close() + break + + if args.local_rank in [-1, 0]: + tb_writer.close() + + return global_step, tr_loss / global_step + + +def evaluate(args, model, tokenizer, prefix=""): + eval_task_names = (args.task_name,) + eval_outputs_dirs = (args.output_dir,) + + results = {} + for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): + eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) + + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: + os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # multi-gpu eval + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Eval! + logger.info("***** Running evaluation {} *****".format(prefix)) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + preds = None + out_label_ids = None + for batch in tqdm(eval_dataloader, desc="Evaluating"): + model.eval() + batch = tuple(t.to(args.device) for t in batch) + + with torch.no_grad(): + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'labels': batch[3]} + if args.model_type != 'distilbert': + inputs['token_type_ids'] = batch[2] if args.model_type in ['bert'] else None # XLM and DistilBERT don't use segment_ids + outputs = model(**inputs) + tmp_eval_loss, logits = outputs[:2] + + eval_loss += tmp_eval_loss.mean().item() + nb_eval_steps += 1 + if preds is None: + preds = logits.detach().cpu().numpy() + out_label_ids = inputs['labels'].detach().cpu().numpy() + else: + preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) + out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0) + + eval_loss = eval_loss / nb_eval_steps + if args.output_mode == "classification": + preds = np.argmax(preds, axis=1) + else: + raise ValueError('No other `output_mode` for XNLI.') + result = compute_metrics(eval_task, preds, out_label_ids) + results.update(result) + + output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") + with open(output_eval_file, "w") as writer: + logger.info("***** Eval results {} *****".format(prefix)) + for key in sorted(result.keys()): + logger.info(" %s = %s", key, str(result[key])) + writer.write("%s = %s\n" % (key, str(result[key]))) + + return results + + +def load_and_cache_examples(args, task, tokenizer, evaluate=False): + if args.local_rank not in [-1, 0] and not evaluate: + torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache + + processor = processors[task](language=args.language, train_language=args.train_language) + output_mode = output_modes[task] + # Load data features from cache or dataset file + cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}_{}'.format( + 'test' if evaluate else 'train', + list(filter(None, args.model_name_or_path.split('/'))).pop(), + str(args.max_seq_length), + str(task), + str(args.train_language if (not evaluate and args.train_language is not None) else args.language))) + if os.path.exists(cached_features_file) and not args.overwrite_cache: + logger.info("Loading features from cached file %s", cached_features_file) + features = torch.load(cached_features_file) + else: + logger.info("Creating features from dataset file at %s", args.data_dir) + label_list = processor.get_labels() + examples = processor.get_test_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) + features = convert_examples_to_features(examples, + tokenizer, + label_list=label_list, + max_length=args.max_seq_length, + output_mode=output_mode, + pad_on_left=False, + pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], + pad_token_segment_id=0, + ) + if args.local_rank in [-1, 0]: + logger.info("Saving features into cached file %s", cached_features_file) + torch.save(features, cached_features_file) + + if args.local_rank == 0 and not evaluate: + torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache + + # Convert to Tensors and build dataset + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) + all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) + all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) + if output_mode == "classification": + all_labels = torch.tensor([f.label for f in features], dtype=torch.long) + else: + raise ValueError('No other `output_mode` for XNLI.') + + dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) + return dataset + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--data_dir", default=None, type=str, required=True, + help="The input data dir. Should contain the .tsv files (or other data files) for the task.") + parser.add_argument("--model_type", default=None, type=str, required=True, + help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) + parser.add_argument("--model_name_or_path", default=None, type=str, required=True, + help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) + parser.add_argument("--language", default=None, type=str, required=True, + help="Evaluation language. Also train language if `train_language` is set to None.") + parser.add_argument("--train_language", default=None, type=str, + help="Train language if is different of the evaluation language.") + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + + ## Other parameters + parser.add_argument("--config_name", default="", type=str, + help="Pretrained config name or path if not the same as model_name") + parser.add_argument("--tokenizer_name", default="", type=str, + help="Pretrained tokenizer name or path if not the same as model_name") + parser.add_argument("--cache_dir", default="", type=str, + help="Where do you want to store the pre-trained models downloaded from s3") + parser.add_argument("--max_seq_length", default=128, type=int, + help="The maximum total input sequence length after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded.") + parser.add_argument("--do_train", action='store_true', + help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', + help="Whether to run eval on the test set.") + parser.add_argument("--evaluate_during_training", action='store_true', + help="Rul evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + + parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, + help="The initial learning rate for Adam.") + parser.add_argument("--weight_decay", default=0.0, type=float, + help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, + help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, + help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=3.0, type=float, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, + help="Linear warmup over warmup_steps.") + + parser.add_argument('--logging_steps', type=int, default=50, + help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=50, + help="Save checkpoint every X updates steps.") + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', + help="Avoid using CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', + help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', + help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, + help="random seed for initialization") + + parser.add_argument('--fp16', action='store_true', + help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument("--local_rank", type=int, default=-1, + help="For distributed training: local_rank") + parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") + args = parser.parse_args() + + if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: + raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + args.device = device + + # Setup logging + logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', + level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) + + # Set seed + set_seed(args) + + # Prepare XNLI task + args.task_name = 'xnli' + if args.task_name not in processors: + raise ValueError("Task not found: %s" % (args.task_name)) + processor = processors[args.task_name](language=args.language, train_language=args.train_language) + args.output_mode = output_modes[args.task_name] + label_list = processor.get_labels() + num_labels = len(label_list) + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + args.model_type = args.model_type.lower() + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, + num_labels=num_labels, + finetuning_task=args.task_name, + cache_dir=args.cache_dir if args.cache_dir else None) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, + do_lower_case=args.do_lower_case, + cache_dir=args.cache_dir if args.cache_dir else None) + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool('.ckpt' in args.model_name_or_path), + config=config, + cache_dir=args.cache_dir if args.cache_dir else None) + + if args.local_rank == 0: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + model.to(args.device) + + logger.info("Training/evaluation parameters %s", args) + + + # Training + if args.do_train: + train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) + global_step, tr_loss = train(args, train_dataset, model, tokenizer) + logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) + + + # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() + if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): + # Create output directory if needed + if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: + os.makedirs(args.output_dir) + + logger.info("Saving model checkpoint to %s", args.output_dir) + # Save a trained model, configuration and tokenizer using `save_pretrained()`. + # They can then be reloaded using `from_pretrained()` + model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training + model_to_save.save_pretrained(args.output_dir) + tokenizer.save_pretrained(args.output_dir) + + # Good practice: save your training arguments together with the trained model + torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) + + # Load a trained model and vocabulary that you have fine-tuned + model = model_class.from_pretrained(args.output_dir) + tokenizer = tokenizer_class.from_pretrained(args.output_dir) + model.to(args.device) + + + # Evaluation + results = {} + if args.do_eval and args.local_rank in [-1, 0]: + tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" + + model = model_class.from_pretrained(checkpoint) + model.to(args.device) + result = evaluate(args, model, tokenizer, prefix=prefix) + result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) + results.update(result) + + return results + + +if __name__ == "__main__": + main() diff --git a/examples/summarization/README.md b/examples/summarization/README.md new file mode 100644 index 00000000000..96825cfa465 --- /dev/null +++ b/examples/summarization/README.md @@ -0,0 +1,61 @@ +# Text Summarization with Pretrained Encoders + +This folder contains part of the code necessary to reproduce the results on abstractive summarization from the article [Text Summarization with Pretrained Encoders](https://arxiv.org/pdf/1908.08345.pdf) by [Yang Liu](https://nlp-yang.github.io/) and [Mirella Lapata](https://homepages.inf.ed.ac.uk/mlap/). It can also be used to summarize any document. + +The original code can be found on the Yang Liu's [github repository](https://github.com/nlpyang/PreSumm). + +The model is loaded with the pre-trained weights for the abstractive summarization model trained on the CNN/Daily Mail dataset with an extractive and then abstractive tasks. + +## Setup + +``` +git clone https://github.com/huggingface/transformers && cd transformers +pip install [--editable] . +pip install nltk py-rouge +cd examples/summarization +``` + +## Reproduce the authors' results on ROUGE + +To be able to reproduce the authors' results on the CNN/Daily Mail dataset you first need to download both CNN and Daily Mail datasets [from Kyunghyun Cho's website](https://cs.nyu.edu/~kcho/DMQA/) (the links next to "Stories") in the same folder. Then uncompress the archives by running: + +```bash +tar -xvf cnn_stories.tgz && tar -xvf dailymail_stories.tgz +``` + +And move all the stories to the same folder. We will refer as `$DATA_PATH` the path to where you uncompressed both archive. Then run the following in the same folder as `run_summarization.py`: + +```bash +python run_summarization.py \ + --documents_dir $DATA_PATH \ + --summaries_output_dir $SUMMARIES_PATH \ # optional + --to_cpu false \ + --batch_size 4 \ + --min_length 50 \ + --max_length 200 \ + --beam_size 5 \ + --alpha 0.95 \ + --block_trigram true \ + --compute_rouge true +``` + +The scripts executes on GPU if one is available and if `to_cpu` is not set to `true`. Inference on multiple GPUs is not suported yet. The ROUGE scores will be displayed in the console at the end of evaluation and written in a `rouge_scores.txt` file. The script takes 30 hours to compute with a single Tesla V100 GPU and a batch size of 10 (300,000 texts to summarize). + +## Summarize any text + +Put the documents that you would like to summarize in a folder (the path to which is referred to as `$DATA_PATH` below) and run the following in the same folder as `run_summarization.py`: + +```bash +python run_summarization.py \ + --documents_dir $DATA_PATH \ + --summaries_output_dir $SUMMARIES_PATH \ # optional + --to_cpu false \ + --batch_size 4 \ + --min_length 50 \ + --max_length 200 \ + --beam_size 5 \ + --alpha 0.95 \ + --block_trigram true \ +``` + +You may want to play around with `min_length`, `max_length` and `alpha` to suit your use case. If you want to compute ROUGE on another dataset you will need to tweak the stories/summaries import in `utils_summarization.py` and tell it where to fetch the reference summaries. diff --git a/examples/summarization/configuration_bertabs.py b/examples/summarization/configuration_bertabs.py new file mode 100644 index 00000000000..5bcb65b423f --- /dev/null +++ b/examples/summarization/configuration_bertabs.py @@ -0,0 +1,119 @@ +# coding=utf-8 +# Copyright 2019 The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" BertAbs configuration """ +import json +import logging +import sys + +from transformers import PretrainedConfig + + +logger = logging.getLogger(__name__) + + +BERTABS_FINETUNED_CONFIG_MAP = { + "bertabs-finetuned-cnndm": "https://s3.amazonaws.com/models.huggingface.co/bert/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization-config.json", +} + + +class BertAbsConfig(PretrainedConfig): + r""" Class to store the configuration of the BertAbs model. + + Arguments: + max_pos: int + The maximum sequence length that this model will be used with. + enc_layer: int + The numner of hidden layers in the Transformer encoder. + enc_hidden_size: int + The size of the encoder's layers. + enc_heads: int + The number of attention heads for each attention layer in the encoder. + enc_ff_size: int + The size of the encoder's feed-forward layers. + enc_dropout: int + The dropout probabilitiy for all fully connected layers in the + embeddings, layers, pooler and also the attention probabilities in + the encoder. + dec_layer: int + The numner of hidden layers in the decoder. + dec_hidden_size: int + The size of the decoder's layers. + dec_heads: int + The number of attention heads for each attention layer in the decoder. + dec_ff_size: int + The size of the decoder's feed-forward layers. + dec_dropout: int + The dropout probabilitiy for all fully connected layers in the + embeddings, layers, pooler and also the attention probabilities in + the decoder. + """ + + pretrained_config_archive_map = BERTABS_FINETUNED_CONFIG_MAP + + def __init__( + self, + vocab_size_or_config_json_file=30522, + max_pos=512, + enc_layers=6, + enc_hidden_size=512, + enc_heads=8, + enc_ff_size=512, + enc_dropout=0.2, + dec_layers=6, + dec_hidden_size=768, + dec_heads=8, + dec_ff_size=2048, + dec_dropout=0.2, + **kwargs, + ): + super(BertAbsConfig, self).__init__(**kwargs) + + if self._input_is_path_to_json(vocab_size_or_config_json_file): + path_to_json = vocab_size_or_config_json_file + with open(path_to_json, "r", encoding="utf-8") as reader: + json_config = json.loads(reader.read()) + for key, value in json_config.items(): + self.__dict__[key] = value + elif isinstance(vocab_size_or_config_json_file, int): + self.vocab_size = vocab_size_or_config_json_file + self.max_pos = max_pos + + self.enc_layers = enc_layers + self.enc_hidden_size = enc_hidden_size + self.enc_heads = enc_heads + self.enc_ff_size = enc_ff_size + self.enc_dropout = enc_dropout + + self.dec_layers = dec_layers + self.dec_hidden_size = dec_hidden_size + self.dec_heads = dec_heads + self.dec_ff_size = dec_ff_size + self.dec_dropout = dec_dropout + else: + raise ValueError( + "First argument must be either a vocabulary size (int)" + "or the path to a pretrained model config file (str)" + ) + + def _input_is_path_to_json(self, first_argument): + """ Checks whether the first argument passed to config + is the path to a JSON file that contains the config. + """ + is_python_2 = sys.version_info[0] == 2 + if is_python_2: + return isinstance(first_argument, unicode) + else: + return isinstance(first_argument, str) diff --git a/examples/summarization/convert_bertabs_original_pytorch_checkpoint.py b/examples/summarization/convert_bertabs_original_pytorch_checkpoint.py new file mode 100644 index 00000000000..33b17bfb6fd --- /dev/null +++ b/examples/summarization/convert_bertabs_original_pytorch_checkpoint.py @@ -0,0 +1,163 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Convert BertExtAbs's checkpoints. + +The script looks like it is doing something trivial but it is not. The "weights" +proposed by the authors are actually the entire model pickled. We need to load +the model within the original codebase to be able to only save its `state_dict`. +""" + +import argparse +from collections import namedtuple +import logging +import torch + +from models.model_builder import AbsSummarizer # The authors' implementation +from model_bertabs import BertAbsSummarizer + +from transformers import BertTokenizer + + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +SAMPLE_TEXT = 'Hello world! cécé herlolip' + + +BertAbsConfig = namedtuple( + "BertAbsConfig", + ["temp_dir", "large", "use_bert_emb", "finetune_bert", "encoder", "share_emb", "max_pos", "enc_layers", "enc_hidden_size", "enc_heads", "enc_ff_size", "enc_dropout", "dec_layers", "dec_hidden_size", "dec_heads", "dec_ff_size", "dec_dropout"], +) + + +def convert_bertabs_checkpoints(path_to_checkpoints, dump_path): + """ Copy/paste and tweak the pre-trained weights provided by the creators + of BertAbs for the internal architecture. + """ + + # Instantiate the authors' model with the pre-trained weights + config = BertAbsConfig( + temp_dir=".", + finetune_bert=False, + large=False, + share_emb=True, + use_bert_emb=False, + encoder="bert", + max_pos=512, + enc_layers=6, + enc_hidden_size=512, + enc_heads=8, + enc_ff_size=512, + enc_dropout=0.2, + dec_layers=6, + dec_hidden_size=768, + dec_heads=8, + dec_ff_size=2048, + dec_dropout=0.2, + ) + checkpoints = torch.load(path_to_checkpoints, lambda storage, loc: storage) + original = AbsSummarizer(config, torch.device("cpu"), checkpoints) + original.eval() + + new_model = BertAbsSummarizer(config, torch.device("cpu")) + new_model.eval() + + # ------------------- + # Convert the weights + # ------------------- + + logging.info("convert the model") + new_model.bert.load_state_dict(original.bert.state_dict()) + new_model.decoder.load_state_dict(original.decoder.state_dict()) + new_model.generator.load_state_dict(original.generator.state_dict()) + + # ---------------------------------- + # Make sure the outpus are identical + # ---------------------------------- + + logging.info("Make sure that the models' outputs are identical") + tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") + + # prepare the model inputs + encoder_input_ids = tokenizer.encode("This is sample éàalj'-.") + encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(encoder_input_ids))) + encoder_input_ids = torch.tensor(encoder_input_ids).unsqueeze(0) + decoder_input_ids = tokenizer.encode("This is sample 3 éàalj'-.") + decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(decoder_input_ids))) + decoder_input_ids = torch.tensor(decoder_input_ids).unsqueeze(0) + + # failsafe to make sure the weights reset does not affect the + # loaded weights. + assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0 + + # forward pass + src = encoder_input_ids + tgt = decoder_input_ids + segs = token_type_ids = None + clss = None + mask_src = encoder_attention_mask = None + mask_tgt = decoder_attention_mask = None + mask_cls = None + + # The original model does not apply the geneator layer immediatly but rather in + # the beam search (where it combines softmax + linear layer). Since we already + # apply the softmax in our generation process we only apply the linear layer here. + # We make sure that the outputs of the full stack are identical + output_original_model = original(src, tgt, segs, clss, mask_src, mask_tgt, mask_cls)[0] + output_original_generator = original.generator(output_original_model) + + output_converted_model = new_model(encoder_input_ids, decoder_input_ids, token_type_ids, encoder_attention_mask, decoder_attention_mask)[0] + output_converted_generator = new_model.generator(output_converted_model) + + maximum_absolute_difference = torch.max(torch.abs(output_converted_model - output_original_model)).item() + print("Maximum absolute difference beween weights: {:.2f}".format(maximum_absolute_difference)) + maximum_absolute_difference = torch.max(torch.abs(output_converted_generator - output_original_generator)).item() + print("Maximum absolute difference beween weights: {:.2f}".format(maximum_absolute_difference)) + + are_identical = torch.allclose(output_converted_model, output_original_model, atol=1e-3) + if are_identical: + logging.info("all weights are equal up to 1e-3") + else: + raise ValueError("the weights are different. The new model is likely different from the original one.") + + # The model has been saved with torch.save(model) and this is bound to the exact + # directory structure. We save the state_dict instead. + logging.info("saving the model's state dictionary") + torch.save(new_model.state_dict(), "bertabs-finetuned-cnndm-extractive-abstractive-summarization-pytorch_model.bin") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--bertabs_checkpoint_path", + default=None, + type=str, + required=True, + help="Path the official PyTorch dump.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", + default=None, + type=str, + required=True, + help="Path to the output PyTorch model.", + ) + args = parser.parse_args() + + convert_bertabs_checkpoints( + args.bertabs_checkpoint_path, + args.pytorch_dump_folder_path, + ) diff --git a/examples/summarization/modeling_bertabs.py b/examples/summarization/modeling_bertabs.py new file mode 100644 index 00000000000..5bf1599ad29 --- /dev/null +++ b/examples/summarization/modeling_bertabs.py @@ -0,0 +1,1161 @@ +# MIT License + +# Copyright (c) 2019 Yang Liu and the HuggingFace team + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import copy +import math + +import numpy as np +import torch +from torch import nn +from torch.nn.init import xavier_uniform_ + +from transformers import BertModel, BertConfig, PreTrainedModel + +from configuration_bertabs import BertAbsConfig + + +MAX_SIZE = 5000 + +BERTABS_FINETUNED_MODEL_MAP = { + "bertabs-finetuned-cnndm": "https://s3.amazonaws.com/models.huggingface.co/bert/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization-pytorch_model.bin", +} + + +class BertAbsPreTrainedModel(PreTrainedModel): + config_class = BertAbsConfig + pretrained_model_archive_map = BERTABS_FINETUNED_MODEL_MAP + load_tf_weights = False + base_model_prefix = "bert" + + +class BertAbs(BertAbsPreTrainedModel): + def __init__(self, args, checkpoint=None, bert_extractive_checkpoint=None): + super(BertAbs, self).__init__(args) + self.args = args + self.bert = Bert() + + # If pre-trained weights are passed for Bert, load these. + load_bert_pretrained_extractive = True if bert_extractive_checkpoint else False + if load_bert_pretrained_extractive: + self.bert.model.load_state_dict( + dict( + [ + (n[11:], p) + for n, p in bert_extractive_checkpoint.items() + if n.startswith("bert.model") + ] + ), + strict=True, + ) + + self.vocab_size = self.bert.model.config.vocab_size + + if args.max_pos > 512: + my_pos_embeddings = nn.Embedding( + args.max_pos, self.bert.model.config.hidden_size + ) + my_pos_embeddings.weight.data[ + :512 + ] = self.bert.model.embeddings.position_embeddings.weight.data + my_pos_embeddings.weight.data[ + 512: + ] = self.bert.model.embeddings.position_embeddings.weight.data[-1][ + None, : + ].repeat( + args.max_pos - 512, 1 + ) + self.bert.model.embeddings.position_embeddings = my_pos_embeddings + tgt_embeddings = nn.Embedding( + self.vocab_size, self.bert.model.config.hidden_size, padding_idx=0 + ) + + tgt_embeddings.weight = copy.deepcopy( + self.bert.model.embeddings.word_embeddings.weight + ) + + self.decoder = TransformerDecoder( + self.args.dec_layers, + self.args.dec_hidden_size, + heads=self.args.dec_heads, + d_ff=self.args.dec_ff_size, + dropout=self.args.dec_dropout, + embeddings=tgt_embeddings, + vocab_size=self.vocab_size, + ) + + gen_func = nn.LogSoftmax(dim=-1) + self.generator = nn.Sequential( + nn.Linear(args.dec_hidden_size, args.vocab_size), gen_func + ) + self.generator[0].weight = self.decoder.embeddings.weight + + load_from_checkpoints = False if checkpoint is None else True + if load_from_checkpoints: + self.load_state_dict(checkpoint) + + def init_weights(self): + for module in self.decoder.modules(): + if isinstance(module, (nn.Linear, nn.Embedding)): + module.weight.data.normal_(mean=0.0, std=0.02) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + for p in self.generator.parameters(): + if p.dim() > 1: + xavier_uniform_(p) + else: + p.data.zero_() + + def forward( + self, + encoder_input_ids, + decoder_input_ids, + token_type_ids, + encoder_attention_mask, + decoder_attention_mask, + ): + encoder_output = self.bert( + input_ids=encoder_input_ids, + token_type_ids=token_type_ids, + attention_mask=encoder_attention_mask, + ) + encoder_hidden_states = encoder_output[0] + dec_state = self.decoder.init_decoder_state( + encoder_input_ids, encoder_hidden_states + ) + decoder_outputs, _ = self.decoder( + decoder_input_ids[:, :-1], encoder_hidden_states, dec_state + ) + return decoder_outputs + + +class Bert(nn.Module): + """ This class is not really necessary and should probably disappear. + """ + + def __init__(self): + super(Bert, self).__init__() + config = BertConfig.from_pretrained("bert-base-uncased") + self.model = BertModel(config) + + def forward(self, input_ids, attention_mask=None, token_type_ids=None, **kwargs): + self.eval() + with torch.no_grad(): + encoder_outputs, _ = self.model( + input_ids, + token_type_ids=token_type_ids, + attention_mask=attention_mask, + **kwargs + ) + return encoder_outputs + + +class TransformerDecoder(nn.Module): + """ + The Transformer decoder from "Attention is All You Need". + + Args: + num_layers (int): number of encoder layers. + d_model (int): size of the model + heads (int): number of heads + d_ff (int): size of the inner FF layer + dropout (float): dropout parameters + embeddings (:obj:`onmt.modules.Embeddings`): + embeddings to use, should have positional encodings + attn_type (str): if using a seperate copy attention + """ + + def __init__(self, num_layers, d_model, heads, d_ff, dropout, embeddings, vocab_size): + super(TransformerDecoder, self).__init__() + + # Basic attributes. + self.decoder_type = "transformer" + self.num_layers = num_layers + self.embeddings = embeddings + self.pos_emb = PositionalEncoding(dropout, self.embeddings.embedding_dim) + + # Build TransformerDecoder. + self.transformer_layers = nn.ModuleList( + [ + TransformerDecoderLayer(d_model, heads, d_ff, dropout) + for _ in range(num_layers) + ] + ) + + self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) + + # forward(input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask) + # def forward(self, input_ids, state, attention_mask=None, memory_lengths=None, + # step=None, cache=None, encoder_attention_mask=None, encoder_hidden_states=None, memory_masks=None): + def forward( + self, + input_ids, + encoder_hidden_states=None, + state=None, + attention_mask=None, + memory_lengths=None, + step=None, + cache=None, + encoder_attention_mask=None, + ): + """ + See :obj:`onmt.modules.RNNDecoderBase.forward()` + memory_bank = encoder_hidden_states + """ + # Name conversion + tgt = input_ids + memory_bank = encoder_hidden_states + memory_mask = encoder_attention_mask + + # src_words = state.src + src_words = state.src + src_batch, src_len = src_words.size() + + padding_idx = self.embeddings.padding_idx + + # Decoder padding mask + tgt_words = tgt + tgt_batch, tgt_len = tgt_words.size() + tgt_pad_mask = ( + tgt_words.data.eq(padding_idx).unsqueeze(1).expand(tgt_batch, tgt_len, tgt_len) + ) + + # Encoder padding mask + if memory_mask is not None: + src_len = memory_mask.size(-1) + src_pad_mask = memory_mask.expand(src_batch, tgt_len, src_len) + else: + src_pad_mask = ( + src_words.data.eq(padding_idx) + .unsqueeze(1) + .expand(src_batch, tgt_len, src_len) + ) + + # Pass through the embeddings + emb = self.embeddings(input_ids) + output = self.pos_emb(emb, step) + assert emb.dim() == 3 # len x batch x embedding_dim + + if state.cache is None: + saved_inputs = [] + + for i in range(self.num_layers): + prev_layer_input = None + if state.cache is None: + if state.previous_input is not None: + prev_layer_input = state.previous_layer_inputs[i] + + output, all_input = self.transformer_layers[i]( + output, + memory_bank, + src_pad_mask, + tgt_pad_mask, + previous_input=prev_layer_input, + layer_cache=state.cache["layer_{}".format(i)] + if state.cache is not None + else None, + step=step, + ) + if state.cache is None: + saved_inputs.append(all_input) + + if state.cache is None: + saved_inputs = torch.stack(saved_inputs) + + output = self.layer_norm(output) + + if state.cache is None: + state = state.update_state(tgt, saved_inputs) + + # Decoders in transformers return a tuple. Beam search will fail + # if we don't follow this convention. + return output, state # , state + + def init_decoder_state(self, src, memory_bank, with_cache=False): + """ Init decoder state """ + state = TransformerDecoderState(src) + if with_cache: + state._init_cache(memory_bank, self.num_layers) + return state + + +class PositionalEncoding(nn.Module): + def __init__(self, dropout, dim, max_len=5000): + pe = torch.zeros(max_len, dim) + position = torch.arange(0, max_len).unsqueeze(1) + div_term = torch.exp( + (torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim)) + ) + pe[:, 0::2] = torch.sin(position.float() * div_term) + pe[:, 1::2] = torch.cos(position.float() * div_term) + pe = pe.unsqueeze(0) + super(PositionalEncoding, self).__init__() + self.register_buffer("pe", pe) + self.dropout = nn.Dropout(p=dropout) + self.dim = dim + + def forward(self, emb, step=None): + emb = emb * math.sqrt(self.dim) + if step: + emb = emb + self.pe[:, step][:, None, :] + + else: + emb = emb + self.pe[:, : emb.size(1)] + emb = self.dropout(emb) + return emb + + def get_emb(self, emb): + return self.pe[:, : emb.size(1)] + + +class TransformerDecoderLayer(nn.Module): + """ + Args: + d_model (int): the dimension of keys/values/queries in + MultiHeadedAttention, also the input size of + the first-layer of the PositionwiseFeedForward. + heads (int): the number of heads for MultiHeadedAttention. + d_ff (int): the second-layer of the PositionwiseFeedForward. + dropout (float): dropout probability(0-1.0). + self_attn_type (string): type of self-attention scaled-dot, average + """ + + def __init__(self, d_model, heads, d_ff, dropout): + super(TransformerDecoderLayer, self).__init__() + + self.self_attn = MultiHeadedAttention(heads, d_model, dropout=dropout) + + self.context_attn = MultiHeadedAttention(heads, d_model, dropout=dropout) + self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) + self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6) + self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6) + self.drop = nn.Dropout(dropout) + mask = self._get_attn_subsequent_mask(MAX_SIZE) + # Register self.mask as a buffer in TransformerDecoderLayer, so + # it gets TransformerDecoderLayer's cuda behavior automatically. + self.register_buffer("mask", mask) + + def forward( + self, + inputs, + memory_bank, + src_pad_mask, + tgt_pad_mask, + previous_input=None, + layer_cache=None, + step=None, + ): + """ + Args: + inputs (`FloatTensor`): `[batch_size x 1 x model_dim]` + memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]` + src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]` + tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]` + + Returns: + (`FloatTensor`, `FloatTensor`, `FloatTensor`): + + * output `[batch_size x 1 x model_dim]` + * attn `[batch_size x 1 x src_len]` + * all_input `[batch_size x current_step x model_dim]` + + """ + dec_mask = torch.gt( + tgt_pad_mask + self.mask[:, : tgt_pad_mask.size(1), : tgt_pad_mask.size(1)], 0 + ) + input_norm = self.layer_norm_1(inputs) + all_input = input_norm + if previous_input is not None: + all_input = torch.cat((previous_input, input_norm), dim=1) + dec_mask = None + + query = self.self_attn( + all_input, + all_input, + input_norm, + mask=dec_mask, + layer_cache=layer_cache, + type="self", + ) + + query = self.drop(query) + inputs + + query_norm = self.layer_norm_2(query) + mid = self.context_attn( + memory_bank, + memory_bank, + query_norm, + mask=src_pad_mask, + layer_cache=layer_cache, + type="context", + ) + output = self.feed_forward(self.drop(mid) + query) + + return output, all_input + # return output + + def _get_attn_subsequent_mask(self, size): + """ + Get an attention mask to avoid using the subsequent info. + + Args: + size: int + + Returns: + (`LongTensor`): + + * subsequent_mask `[1 x size x size]` + """ + attn_shape = (1, size, size) + subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype("uint8") + subsequent_mask = torch.from_numpy(subsequent_mask) + return subsequent_mask + + +class MultiHeadedAttention(nn.Module): + """ + Multi-Head Attention module from + "Attention is All You Need" + :cite:`DBLP:journals/corr/VaswaniSPUJGKP17`. + + Similar to standard `dot` attention but uses + multiple attention distributions simulataneously + to select relevant items. + + .. mermaid:: + + graph BT + A[key] + B[value] + C[query] + O[output] + subgraph Attn + D[Attn 1] + E[Attn 2] + F[Attn N] + end + A --> D + C --> D + A --> E + C --> E + A --> F + C --> F + D --> O + E --> O + F --> O + B --> O + + Also includes several additional tricks. + + Args: + head_count (int): number of parallel heads + model_dim (int): the dimension of keys/values/queries, + must be divisible by head_count + dropout (float): dropout parameter + """ + + def __init__(self, head_count, model_dim, dropout=0.1, use_final_linear=True): + assert model_dim % head_count == 0 + self.dim_per_head = model_dim // head_count + self.model_dim = model_dim + + super(MultiHeadedAttention, self).__init__() + self.head_count = head_count + + self.linear_keys = nn.Linear(model_dim, head_count * self.dim_per_head) + self.linear_values = nn.Linear(model_dim, head_count * self.dim_per_head) + self.linear_query = nn.Linear(model_dim, head_count * self.dim_per_head) + self.softmax = nn.Softmax(dim=-1) + self.dropout = nn.Dropout(dropout) + self.use_final_linear = use_final_linear + if self.use_final_linear: + self.final_linear = nn.Linear(model_dim, model_dim) + + def forward( + self, + key, + value, + query, + mask=None, + layer_cache=None, + type=None, + predefined_graph_1=None, + ): + """ + Compute the context vector and the attention vectors. + + Args: + key (`FloatTensor`): set of `key_len` + key vectors `[batch, key_len, dim]` + value (`FloatTensor`): set of `key_len` + value vectors `[batch, key_len, dim]` + query (`FloatTensor`): set of `query_len` + query vectors `[batch, query_len, dim]` + mask: binary mask indicating which keys have + non-zero attention `[batch, query_len, key_len]` + Returns: + (`FloatTensor`, `FloatTensor`) : + + * output context vectors `[batch, query_len, dim]` + * one of the attention vectors `[batch, query_len, key_len]` + """ + batch_size = key.size(0) + dim_per_head = self.dim_per_head + head_count = self.head_count + key_len = key.size(1) + query_len = query.size(1) + + def shape(x): + """ projection """ + return x.view(batch_size, -1, head_count, dim_per_head).transpose(1, 2) + + def unshape(x): + """ compute context """ + return ( + x.transpose(1, 2) + .contiguous() + .view(batch_size, -1, head_count * dim_per_head) + ) + + # 1) Project key, value, and query. + if layer_cache is not None: + if type == "self": + query, key, value = ( + self.linear_query(query), + self.linear_keys(query), + self.linear_values(query), + ) + + key = shape(key) + value = shape(value) + + if layer_cache is not None: + device = key.device + if layer_cache["self_keys"] is not None: + key = torch.cat((layer_cache["self_keys"].to(device), key), dim=2) + if layer_cache["self_values"] is not None: + value = torch.cat( + (layer_cache["self_values"].to(device), value), dim=2 + ) + layer_cache["self_keys"] = key + layer_cache["self_values"] = value + elif type == "context": + query = self.linear_query(query) + if layer_cache is not None: + if layer_cache["memory_keys"] is None: + key, value = self.linear_keys(key), self.linear_values(value) + key = shape(key) + value = shape(value) + else: + key, value = ( + layer_cache["memory_keys"], + layer_cache["memory_values"], + ) + layer_cache["memory_keys"] = key + layer_cache["memory_values"] = value + else: + key, value = self.linear_keys(key), self.linear_values(value) + key = shape(key) + value = shape(value) + else: + key = self.linear_keys(key) + value = self.linear_values(value) + query = self.linear_query(query) + key = shape(key) + value = shape(value) + + query = shape(query) + + key_len = key.size(2) + query_len = query.size(2) + + # 2) Calculate and scale scores. + query = query / math.sqrt(dim_per_head) + scores = torch.matmul(query, key.transpose(2, 3)) + + if mask is not None: + mask = mask.unsqueeze(1).expand_as(scores) + scores = scores.masked_fill(mask, -1e18) + + # 3) Apply attention dropout and compute context vectors. + + attn = self.softmax(scores) + + if not predefined_graph_1 is None: + attn_masked = attn[:, -1] * predefined_graph_1 + attn_masked = attn_masked / (torch.sum(attn_masked, 2).unsqueeze(2) + 1e-9) + + attn = torch.cat([attn[:, :-1], attn_masked.unsqueeze(1)], 1) + + drop_attn = self.dropout(attn) + if self.use_final_linear: + context = unshape(torch.matmul(drop_attn, value)) + output = self.final_linear(context) + return output + else: + context = torch.matmul(drop_attn, value) + return context + + +class DecoderState(object): + """Interface for grouping together the current state of a recurrent + decoder. In the simplest case just represents the hidden state of + the model. But can also be used for implementing various forms of + input_feeding and non-recurrent models. + + Modules need to implement this to utilize beam search decoding. + """ + + def detach(self): + """ Need to document this """ + self.hidden = tuple([_.detach() for _ in self.hidden]) + self.input_feed = self.input_feed.detach() + + def beam_update(self, idx, positions, beam_size): + """ Need to document this """ + for e in self._all: + sizes = e.size() + br = sizes[1] + if len(sizes) == 3: + sent_states = e.view(sizes[0], beam_size, br // beam_size, sizes[2])[ + :, :, idx + ] + else: + sent_states = e.view( + sizes[0], beam_size, br // beam_size, sizes[2], sizes[3] + )[:, :, idx] + + sent_states.data.copy_(sent_states.data.index_select(1, positions)) + + def map_batch_fn(self, fn): + raise NotImplementedError() + + +class TransformerDecoderState(DecoderState): + """ Transformer Decoder state base class """ + + def __init__(self, src): + """ + Args: + src (FloatTensor): a sequence of source words tensors + with optional feature tensors, of size (len x batch). + """ + self.src = src + self.previous_input = None + self.previous_layer_inputs = None + self.cache = None + + @property + def _all(self): + """ + Contains attributes that need to be updated in self.beam_update(). + """ + if self.previous_input is not None and self.previous_layer_inputs is not None: + return (self.previous_input, self.previous_layer_inputs, self.src) + else: + return (self.src,) + + def detach(self): + if self.previous_input is not None: + self.previous_input = self.previous_input.detach() + if self.previous_layer_inputs is not None: + self.previous_layer_inputs = self.previous_layer_inputs.detach() + self.src = self.src.detach() + + def update_state(self, new_input, previous_layer_inputs): + state = TransformerDecoderState(self.src) + state.previous_input = new_input + state.previous_layer_inputs = previous_layer_inputs + return state + + def _init_cache(self, memory_bank, num_layers): + self.cache = {} + + for l in range(num_layers): + layer_cache = {"memory_keys": None, "memory_values": None} + layer_cache["self_keys"] = None + layer_cache["self_values"] = None + self.cache["layer_{}".format(l)] = layer_cache + + def repeat_beam_size_times(self, beam_size): + """ Repeat beam_size times along batch dimension. """ + self.src = self.src.data.repeat(1, beam_size, 1) + + def map_batch_fn(self, fn): + def _recursive_map(struct, batch_dim=0): + for k, v in struct.items(): + if v is not None: + if isinstance(v, dict): + _recursive_map(v) + else: + struct[k] = fn(v, batch_dim) + + self.src = fn(self.src, 0) + if self.cache is not None: + _recursive_map(self.cache) + + +def gelu(x): + return ( + 0.5 + * x + * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) + ) + + +class PositionwiseFeedForward(nn.Module): + """ A two-layer Feed-Forward-Network with residual layer norm. + + Args: + d_model (int): the size of input for the first-layer of the FFN. + d_ff (int): the hidden layer size of the second-layer + of the FNN. + dropout (float): dropout probability in :math:`[0, 1)`. + """ + + def __init__(self, d_model, d_ff, dropout=0.1): + super(PositionwiseFeedForward, self).__init__() + self.w_1 = nn.Linear(d_model, d_ff) + self.w_2 = nn.Linear(d_ff, d_model) + self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) + self.actv = gelu + self.dropout_1 = nn.Dropout(dropout) + self.dropout_2 = nn.Dropout(dropout) + + def forward(self, x): + inter = self.dropout_1(self.actv(self.w_1(self.layer_norm(x)))) + output = self.dropout_2(self.w_2(inter)) + return output + x + + +# +# TRANSLATOR +# The following code is used to generate summaries using the +# pre-trained weights and beam search. +# + + +def build_predictor(args, tokenizer, symbols, model, logger=None): + # we should be able to refactor the global scorer a lot + scorer = GNMTGlobalScorer(args.alpha, length_penalty="wu") + translator = Translator( + args, model, tokenizer, symbols, global_scorer=scorer, logger=logger + ) + return translator + + +class GNMTGlobalScorer(object): + """ + NMT re-ranking score from + "Google's Neural Machine Translation System" :cite:`wu2016google` + + Args: + alpha (float): length parameter + beta (float): coverage parameter + """ + + def __init__(self, alpha, length_penalty): + self.alpha = alpha + penalty_builder = PenaltyBuilder(length_penalty) + self.length_penalty = penalty_builder.length_penalty() + + def score(self, beam, logprobs): + """ + Rescores a prediction based on penalty functions + """ + normalized_probs = self.length_penalty(beam, logprobs, self.alpha) + return normalized_probs + + +class PenaltyBuilder(object): + """ + Returns the Length and Coverage Penalty function for Beam Search. + + Args: + length_pen (str): option name of length pen + cov_pen (str): option name of cov pen + """ + + def __init__(self, length_pen): + self.length_pen = length_pen + + def length_penalty(self): + if self.length_pen == "wu": + return self.length_wu + elif self.length_pen == "avg": + return self.length_average + else: + return self.length_none + + """ + Below are all the different penalty terms implemented so far + """ + + def length_wu(self, beam, logprobs, alpha=0.0): + """ + NMT length re-ranking score from + "Google's Neural Machine Translation System" :cite:`wu2016google`. + """ + + modifier = ((5 + len(beam.next_ys)) ** alpha) / ((5 + 1) ** alpha) + return logprobs / modifier + + def length_average(self, beam, logprobs, alpha=0.0): + """ + Returns the average probability of tokens in a sequence. + """ + return logprobs / len(beam.next_ys) + + def length_none(self, beam, logprobs, alpha=0.0, beta=0.0): + """ + Returns unmodified scores. + """ + return logprobs + + +class Translator(object): + """ + Uses a model to translate a batch of sentences. + + Args: + model (:obj:`onmt.modules.NMTModel`): + NMT model to use for translation + fields (dict of Fields): data fields + beam_size (int): size of beam to use + n_best (int): number of translations produced + max_length (int): maximum length output to produce + global_scores (:obj:`GlobalScorer`): + object to rescore final translations + copy_attn (bool): use copy attention during translation + beam_trace (bool): trace beam search for debugging + logger(logging.Logger): logger. + """ + + def __init__(self, args, model, vocab, symbols, global_scorer=None, logger=None): + self.logger = logger + + self.args = args + self.model = model + self.generator = self.model.generator + self.vocab = vocab + self.symbols = symbols + self.start_token = symbols["BOS"] + self.end_token = symbols["EOS"] + + self.global_scorer = global_scorer + self.beam_size = args.beam_size + self.min_length = args.min_length + self.max_length = args.max_length + + def translate(self, batch, step, attn_debug=False): + """ Generates summaries from one batch of data. + """ + self.model.eval() + with torch.no_grad(): + batch_data = self.translate_batch(batch) + translations = self.from_batch(batch_data) + return translations + + def translate_batch(self, batch, fast=False): + """ + Translate a batch of sentences. + + Mostly a wrapper around :obj:`Beam`. + + Args: + batch (:obj:`Batch`): a batch from a dataset object + data (:obj:`Dataset`): the dataset object + fast (bool): enables fast beam search (may not support all features) + + Todo: + Shouldn't need the original dataset. + """ + with torch.no_grad(): + return self._fast_translate_batch( + batch, self.max_length, min_length=self.min_length + ) + + # Where the beam search lives + # I have no idea why it is being called from the method above + def _fast_translate_batch(self, batch, max_length, min_length=0): + """ Beam Search using the encoder inputs contained in `batch`. + """ + + # The batch object is funny + # Instead of just looking at the size of the arguments we encapsulate + # a size argument. + # Where is it defined? + beam_size = self.beam_size + batch_size = batch.batch_size + src = batch.src + segs = batch.segs + mask_src = batch.mask_src + + src_features = self.model.bert(src, segs, mask_src) + dec_states = self.model.decoder.init_decoder_state( + src, src_features, with_cache=True + ) + device = src_features.device + + # Tile states and memory beam_size times. + dec_states.map_batch_fn(lambda state, dim: tile(state, beam_size, dim=dim)) + src_features = tile(src_features, beam_size, dim=0) + batch_offset = torch.arange(batch_size, dtype=torch.long, device=device) + beam_offset = torch.arange( + 0, batch_size * beam_size, step=beam_size, dtype=torch.long, device=device + ) + alive_seq = torch.full( + [batch_size * beam_size, 1], self.start_token, dtype=torch.long, device=device + ) + + # Give full probability to the first beam on the first step. + topk_log_probs = torch.tensor( + [0.0] + [float("-inf")] * (beam_size - 1), device=device + ).repeat(batch_size) + + # Structure that holds finished hypotheses. + hypotheses = [[] for _ in range(batch_size)] # noqa: F812 + + results = {} + results["predictions"] = [[] for _ in range(batch_size)] # noqa: F812 + results["scores"] = [[] for _ in range(batch_size)] # noqa: F812 + results["gold_score"] = [0] * batch_size + results["batch"] = batch + + for step in range(max_length): + decoder_input = alive_seq[:, -1].view(1, -1) + + # Decoder forward. + decoder_input = decoder_input.transpose(0, 1) + + dec_out, dec_states = self.model.decoder( + decoder_input, src_features, dec_states, step=step + ) + + # Generator forward. + log_probs = self.generator.forward(dec_out.transpose(0, 1).squeeze(0)) + vocab_size = log_probs.size(-1) + + if step < min_length: + log_probs[:, self.end_token] = -1e20 + + # Multiply probs by the beam probability. + log_probs += topk_log_probs.view(-1).unsqueeze(1) + + alpha = self.global_scorer.alpha + length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha + + # Flatten probs into a list of possibilities. + curr_scores = log_probs / length_penalty + + if self.args.block_trigram: + cur_len = alive_seq.size(1) + if cur_len > 3: + for i in range(alive_seq.size(0)): + fail = False + words = [int(w) for w in alive_seq[i]] + words = [self.vocab.ids_to_tokens[w] for w in words] + words = " ".join(words).replace(" ##", "").split() + if len(words) <= 3: + continue + trigrams = [ + (words[i - 1], words[i], words[i + 1]) + for i in range(1, len(words) - 1) + ] + trigram = tuple(trigrams[-1]) + if trigram in trigrams[:-1]: + fail = True + if fail: + curr_scores[i] = -10e20 + + curr_scores = curr_scores.reshape(-1, beam_size * vocab_size) + topk_scores, topk_ids = curr_scores.topk(beam_size, dim=-1) + + # Recover log probs. + topk_log_probs = topk_scores * length_penalty + + # Resolve beam origin and true word ids. + topk_beam_index = topk_ids.div(vocab_size) + topk_ids = topk_ids.fmod(vocab_size) + + # Map beam_index to batch_index in the flat representation. + batch_index = topk_beam_index + beam_offset[ + : topk_beam_index.size(0) + ].unsqueeze(1) + select_indices = batch_index.view(-1) + + # Append last prediction. + alive_seq = torch.cat( + [alive_seq.index_select(0, select_indices), topk_ids.view(-1, 1)], -1 + ) + + is_finished = topk_ids.eq(self.end_token) + if step + 1 == max_length: + is_finished.fill_(1) + # End condition is top beam is finished. + end_condition = is_finished[:, 0].eq(1) + # Save finished hypotheses. + if is_finished.any(): + predictions = alive_seq.view(-1, beam_size, alive_seq.size(-1)) + for i in range(is_finished.size(0)): + b = batch_offset[i] + if end_condition[i]: + is_finished[i].fill_(1) + finished_hyp = is_finished[i].nonzero().view(-1) + # Store finished hypotheses for this batch. + for j in finished_hyp: + hypotheses[b].append((topk_scores[i, j], predictions[i, j, 1:])) + # If the batch reached the end, save the n_best hypotheses. + if end_condition[i]: + best_hyp = sorted(hypotheses[b], key=lambda x: x[0], reverse=True) + score, pred = best_hyp[0] + + results["scores"][b].append(score) + results["predictions"][b].append(pred) + non_finished = end_condition.eq(0).nonzero().view(-1) + # If all sentences are translated, no need to go further. + if len(non_finished) == 0: + break + # Remove finished batches for the next step. + topk_log_probs = topk_log_probs.index_select(0, non_finished) + batch_index = batch_index.index_select(0, non_finished) + batch_offset = batch_offset.index_select(0, non_finished) + alive_seq = predictions.index_select(0, non_finished).view( + -1, alive_seq.size(-1) + ) + # Reorder states. + select_indices = batch_index.view(-1) + src_features = src_features.index_select(0, select_indices) + dec_states.map_batch_fn( + lambda state, dim: state.index_select(dim, select_indices) + ) + + return results + + def from_batch(self, translation_batch): + batch = translation_batch["batch"] + assert len(translation_batch["gold_score"]) == len(translation_batch["predictions"]) + batch_size = batch.batch_size + + preds, _, _, tgt_str, src = ( + translation_batch["predictions"], + translation_batch["scores"], + translation_batch["gold_score"], + batch.tgt_str, + batch.src, + ) + + translations = [] + for b in range(batch_size): + pred_sents = self.vocab.convert_ids_to_tokens([int(n) for n in preds[b][0]]) + pred_sents = " ".join(pred_sents).replace(" ##", "") + gold_sent = " ".join(tgt_str[b].split()) + raw_src = [self.vocab.ids_to_tokens[int(t)] for t in src[b]][:500] + raw_src = " ".join(raw_src) + translation = (pred_sents, gold_sent, raw_src) + translations.append(translation) + + return translations + + +def tile(x, count, dim=0): + """ + Tiles x on dimension dim count times. + """ + perm = list(range(len(x.size()))) + if dim != 0: + perm[0], perm[dim] = perm[dim], perm[0] + x = x.permute(perm).contiguous() + out_size = list(x.size()) + out_size[0] *= count + batch = x.size(0) + x = ( + x.view(batch, -1) + .transpose(0, 1) + .repeat(count, 1) + .transpose(0, 1) + .contiguous() + .view(*out_size) + ) + if dim != 0: + x = x.permute(perm).contiguous() + return x + + +# +# Optimizer for training. We keep this here in case we want to add +# a finetuning script. +# + +class BertSumOptimizer(object): + """ Specific optimizer for BertSum. + + As described in [1], the authors fine-tune BertSum for abstractive + summarization using two Adam Optimizers with different warm-up steps and + learning rate. They also use a custom learning rate scheduler. + + [1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders." + arXiv preprint arXiv:1908.08345 (2019). + """ + + def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-8): + self.encoder = model.encoder + self.decoder = model.decoder + self.lr = lr + self.warmup_steps = warmup_steps + + self.optimizers = { + "encoder": torch.optim.Adam( + model.encoder.parameters(), + lr=lr["encoder"], + betas=(beta_1, beta_2), + eps=eps, + ), + "decoder": torch.optim.Adam( + model.decoder.parameters(), + lr=lr["decoder"], + betas=(beta_1, beta_2), + eps=eps, + ), + } + + self._step = 0 + self.current_learning_rates = {} + + def _update_rate(self, stack): + return self.lr[stack] * min( + self._step ** (-0.5), self._step * self.warmup_steps[stack] ** (-1.5) + ) + + def zero_grad(self): + self.optimizer_decoder.zero_grad() + self.optimizer_encoder.zero_grad() + + def step(self): + self._step += 1 + for stack, optimizer in self.optimizers.items(): + new_rate = self._update_rate(stack) + for param_group in optimizer.param_groups: + param_group["lr"] = new_rate + optimizer.step() + self.current_learning_rates[stack] = new_rate diff --git a/examples/summarization/requirements.txt b/examples/summarization/requirements.txt new file mode 100644 index 00000000000..36d75a5edc5 --- /dev/null +++ b/examples/summarization/requirements.txt @@ -0,0 +1,9 @@ +# progress bars in model download and training scripts +tqdm +# Accessing files from S3 directly. +boto3 +# Used for downloading models over HTTP +requests +# For ROUGE +nltk +py-rouge diff --git a/examples/summarization/run_summarization.py b/examples/summarization/run_summarization.py new file mode 100644 index 00000000000..3c339d0c30a --- /dev/null +++ b/examples/summarization/run_summarization.py @@ -0,0 +1,344 @@ +#! /usr/bin/python3 +import argparse +from collections import namedtuple +import logging +import os +import sys + +import torch +from torch.utils.data import DataLoader, SequentialSampler +from tqdm import tqdm + +from transformers import BertTokenizer + +from modeling_bertabs import BertAbs, build_predictor + +from utils_summarization import ( + SummarizationDataset, + encode_for_summarization, + build_mask, + fit_to_block_size, + compute_token_type_ids, +) + +logger = logging.getLogger(__name__) +logging.basicConfig(stream=sys.stdout, level=logging.INFO) + + +Batch = namedtuple( + "Batch", ["document_names", "batch_size", "src", "segs", "mask_src", "tgt_str"] +) + + +def evaluate(args): + tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True) + model = BertAbs.from_pretrained("bertabs-finetuned-cnndm") + model.to(args.device) + model.eval() + + symbols = { + "BOS": tokenizer.vocab["[unused0]"], + "EOS": tokenizer.vocab["[unused1]"], + "PAD": tokenizer.vocab["[PAD]"], + } + + if args.compute_rouge: + reference_summaries = [] + generated_summaries = [] + + import rouge + import nltk + nltk.download('punkt') + rouge_evaluator = rouge.Rouge( + metrics=['rouge-n', 'rouge-l'], + max_n=2, + limit_length=True, + length_limit=args.beam_size, + length_limit_type='words', + apply_avg=True, + apply_best=False, + alpha=0.5, # Default F1_score + weight_factor=1.2, + stemming=True, + ) + + # these (unused) arguments are defined to keep the compatibility + # with the legacy code and will be deleted in a next iteration. + args.result_path = "" + args.temp_dir = "" + + data_iterator = build_data_iterator(args, tokenizer) + predictor = build_predictor(args, tokenizer, symbols, model) + + logger.info("***** Running evaluation *****") + logger.info(" Number examples = %d", len(data_iterator.dataset)) + logger.info(" Batch size = %d", args.batch_size) + logger.info("") + logger.info("***** Beam Search parameters *****") + logger.info(" Beam size = %d", args.beam_size) + logger.info(" Minimum length = %d", args.min_length) + logger.info(" Maximum length = %d", args.max_length) + logger.info(" Alpha (length penalty) = %.2f", args.alpha) + logger.info(" Trigrams %s be blocked", ("will" if args.block_trigram else "will NOT")) + + for batch in tqdm(data_iterator): + batch_data = predictor.translate_batch(batch) + translations = predictor.from_batch(batch_data) + summaries = [format_summary(t) for t in translations] + save_summaries(summaries, args.summaries_output_dir, batch.document_names) + + if args.compute_rouge: + reference_summaries += batch.tgt_str + generated_summaries += summaries + + if args.compute_rouge: + scores = rouge_evaluator.get_scores(generated_summaries, reference_summaries) + str_scores = format_rouge_scores(scores) + save_rouge_scores(str_scores) + print(str_scores) + + +def save_summaries(summaries, path, original_document_name): + """ Write the summaries in fies that are prefixed by the original + files' name with the `_summary` appended. + + Attributes: + original_document_names: List[string] + Name of the document that was summarized. + path: string + Path were the summaries will be written + summaries: List[string] + The summaries that we produced. + """ + for summary, document_name in zip(summaries, original_document_name): + # Prepare the summary file's name + if "." in document_name: + bare_document_name = ".".join(document_name.split(".")[:-1]) + extension = document_name.split(".")[-1] + name = bare_document_name + "_summary." + extension + else: + name = document_name + "_summary" + + file_path = os.path.join(path, name) + with open(file_path, "w") as output: + output.write(summary) + + +def format_summary(translation): + """ Transforms the output of the `from_batch` function + into nicely formatted summaries. + """ + raw_summary, _, _ = translation + summary = ( + raw_summary.replace("[unused0]", "") + .replace("[unused3]", "") + .replace("[PAD]", "") + .replace("[unused1]", "") + .replace(r" +", " ") + .replace(" [unused2] ", ". ") + .replace("[unused2]", "") + .strip() + ) + + return summary + + +def format_rouge_scores(scores): + return """\n +****** ROUGE SCORES ****** + +** ROUGE 1 +F1 >> {:.3f} +Precision >> {:.3f} +Recall >> {:.3f} + +** ROUGE 2 +F1 >> {:.3f} +Precision >> {:.3f} +Recall >> {:.3f} + +** ROUGE L +F1 >> {:.3f} +Precision >> {:.3f} +Recall >> {:.3f}""".format( + scores['rouge-1']['f'], + scores['rouge-1']['p'], + scores['rouge-1']['r'], + scores['rouge-2']['f'], + scores['rouge-2']['p'], + scores['rouge-2']['r'], + scores['rouge-l']['f'], + scores['rouge-l']['p'], + scores['rouge-l']['r'], + ) + + +def save_rouge_scores(str_scores): + with open("rouge_scores.txt", "w") as output: + output.write(str_scores) + + +# +# LOAD the dataset +# + + +def build_data_iterator(args, tokenizer): + dataset = load_and_cache_examples(args, tokenizer) + sampler = SequentialSampler(dataset) + collate_fn = lambda data: collate(data, tokenizer, block_size=512, device=args.device) + iterator = DataLoader( + dataset, sampler=sampler, batch_size=args.batch_size, collate_fn=collate_fn, + ) + + return iterator + + +def load_and_cache_examples(args, tokenizer): + dataset = SummarizationDataset(args.documents_dir) + return dataset + + +def collate(data, tokenizer, block_size, device): + """ Collate formats the data passed to the data loader. + + In particular we tokenize the data batch after batch to avoid keeping them + all in memory. We output the data as a namedtuple to fit the original BertAbs's + API. + """ + data = [x for x in data if not len(x[1]) == 0] # remove empty_files + names = [name for name, _, _ in data] + summaries = [" ".join(summary_list) for _, _, summary_list in data] + + encoded_text = [ + encode_for_summarization(story, summary, tokenizer) for _, story, summary in data + ] + encoded_stories = torch.tensor( + [ + fit_to_block_size(story, block_size, tokenizer.pad_token_id) + for story, _ in encoded_text + ] + ) + encoder_token_type_ids = compute_token_type_ids(encoded_stories, tokenizer.cls_token_id) + encoder_mask = build_mask(encoded_stories, tokenizer.pad_token_id) + + batch = Batch( + document_names=names, + batch_size=len(encoded_stories), + src=encoded_stories.to(device), + segs=encoder_token_type_ids.to(device), + mask_src=encoder_mask.to(device), + tgt_str=summaries, + ) + + return batch + + +def decode_summary(summary_tokens, tokenizer): + """ Decode the summary and return it in a format + suitable for evaluation. + """ + summary_tokens = summary_tokens.to("cpu").numpy() + summary = tokenizer.decode(summary_tokens) + sentences = summary.split(".") + sentences = [s + "." for s in sentences] + return sentences + + +def main(): + """ The main function defines the interface with the users. + """ + parser = argparse.ArgumentParser() + parser.add_argument( + "--documents_dir", + default=None, + type=str, + required=True, + help="The folder where the documents to summarize are located.", + ) + parser.add_argument( + "--summaries_output_dir", + default=None, + type=str, + required=False, + help="The folder in wich the summaries should be written. Defaults to the folder where the documents are", + ) + parser.add_argument( + "--compute_rouge", + default=False, + type=bool, + required=False, + help="Compute the ROUGE metrics during evaluation. Only available for the CNN/DailyMail dataset.", + ) + # EVALUATION options + parser.add_argument( + "--no_cuda", + default=False, + type=bool, + help="Whether to force the execution on CPU.", + ) + parser.add_argument( + "--batch_size", default=4, type=int, help="Batch size per GPU/CPU for training.", + ) + # BEAM SEARCH arguments + parser.add_argument( + "--min_length", + default=50, + type=int, + help="Minimum number of tokens for the summaries.", + ) + parser.add_argument( + "--max_length", + default=200, + type=int, + help="Maixmum number of tokens for the summaries.", + ) + parser.add_argument( + "--beam_size", + default=5, + type=int, + help="The number of beams to start with for each example.", + ) + parser.add_argument( + "--alpha", + default=0.95, + type=float, + help="The value of alpha for the length penalty in the beam search.", + ) + parser.add_argument( + "--block_trigram", + default=True, + type=bool, + help="Whether to block the existence of repeating trigrams in the text generated by beam search.", + ) + args = parser.parse_args() + + # Select device (distibuted not available) + args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + + # Check the existence of directories + if not args.summaries_output_dir: + args.summaries_output_dir = args.documents_dir + + if not documents_dir_is_valid(args.documents_dir): + raise FileNotFoundError( + "We could not find the directory you specified for the documents to summarize, or it was empty. Please specify a valid path." + ) + os.makedirs(args.summaries_output_dir, exist_ok=True) + + evaluate(args) + + +def documents_dir_is_valid(path): + if not os.path.exists(path): + return False + + file_list = os.listdir(path) + if len(file_list) == 0: + return False + + return True + + +if __name__ == "__main__": + main() diff --git a/examples/summarization/utils_summarization.py b/examples/summarization/utils_summarization.py new file mode 100644 index 00000000000..1d8c436ac96 --- /dev/null +++ b/examples/summarization/utils_summarization.py @@ -0,0 +1,173 @@ +from collections import deque +import os + +import torch +from torch.utils.data import Dataset + + +# ------------ +# Data loading +# ------------ + + +class SummarizationDataset(Dataset): + """ Abstracts the dataset used to train seq2seq models. + + The class will process the documents that are located in the specified + folder. The preprocessing will work on any document that is reasonably + formatted. On the CNN/DailyMail dataset it will extract both the story + and the summary. + + CNN/Daily News: + + The CNN/Daily News raw datasets are downloaded from [1]. The stories are + stored in different files; the summary appears at the end of the story as + sentences that are prefixed by the special `@highlight` line. To process + the data, untar both datasets in the same folder, and pass the path to this + folder as the "data_dir argument. The formatting code was inspired by [2]. + + [1] https://cs.nyu.edu/~kcho/ + [2] https://github.com/abisee/cnn-dailymail/ + """ + + def __init__(self, path="", prefix="train"): + """ We initialize the class by listing all the documents to summarize. + Files are not read in memory due to the size of some datasets (like CNN/DailyMail). + """ + assert os.path.isdir(path) + + self.documents = [] + story_filenames_list = os.listdir(path) + for story_filename in story_filenames_list: + if "summary" in story_filename: + continue + path_to_story = os.path.join(path, story_filename) + if not os.path.isfile(path_to_story): + continue + self.documents.append(path_to_story) + + def __len__(self): + """ Returns the number of documents. """ + return len(self.documents) + + def __getitem__(self, idx): + document_path = self.documents[idx] + document_name = document_path.split("/")[-1] + with open(document_path, encoding="utf-8") as source: + raw_story = source.read() + story_lines, summary_lines = process_story(raw_story) + return document_name, story_lines, summary_lines + + +def process_story(raw_story): + """ Extract the story and summary from a story file. + + Attributes: + raw_story (str): content of the story file as an utf-8 encoded string. + + Raises: + IndexError: If the stoy is empty or contains no highlights. + """ + nonempty_lines = list( + filter(lambda x: len(x) != 0, [line.strip() for line in raw_story.split("\n")]) + ) + + # for some unknown reason some lines miss a period, add it + nonempty_lines = [_add_missing_period(line) for line in nonempty_lines] + + # gather article lines + story_lines = [] + lines = deque(nonempty_lines) + while True: + try: + element = lines.popleft() + if element.startswith("@highlight"): + break + story_lines.append(element) + except IndexError: + # if "@highlight" is absent from the file we pop + # all elements until there is None, raising an exception. + return story_lines, [] + + # gather summary lines + summary_lines = list(filter(lambda t: not t.startswith("@highlight"), lines)) + + return story_lines, summary_lines + + +def _add_missing_period(line): + END_TOKENS = [".", "!", "?", "...", "'", "`", '"', u"\u2019", u"\u2019", ")"] + if line.startswith("@highlight"): + return line + if line[-1] in END_TOKENS: + return line + return line + "." + + +# -------------------------- +# Encoding and preprocessing +# -------------------------- + + +def fit_to_block_size(sequence, block_size, pad_token_id): + """ Adapt the source and target sequences' lengths to the block size. + If the sequence is shorter we append padding token to the right of the sequence. + """ + if len(sequence) > block_size: + return sequence[:block_size] + else: + sequence.extend([pad_token_id] * (block_size - len(sequence))) + return sequence + + +def build_mask(sequence, pad_token_id): + """ Builds the mask. The attention mechanism will only attend to positions + with value 1. """ + mask = torch.ones_like(sequence) + idx_pad_tokens = sequence == pad_token_id + mask[idx_pad_tokens] = 0 + return mask + + +def encode_for_summarization(story_lines, summary_lines, tokenizer): + """ Encode the story and summary lines, and join them + as specified in [1] by using `[SEP] [CLS]` tokens to separate + sentences. + """ + story_lines_token_ids = [tokenizer.encode(line) for line in story_lines] + story_token_ids = [ + token for sentence in story_lines_token_ids for token in sentence + ] + summary_lines_token_ids = [tokenizer.encode(line) for line in summary_lines] + summary_token_ids = [ + token for sentence in summary_lines_token_ids for token in sentence + ] + + return story_token_ids, summary_token_ids + + +def compute_token_type_ids(batch, separator_token_id): + """ Segment embeddings as described in [1] + + The values {0,1} were found in the repository [2]. + + Attributes: + batch: torch.Tensor, size [batch_size, block_size] + Batch of input. + separator_token_id: int + The value of the token that separates the segments. + + [1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders." + arXiv preprint arXiv:1908.08345 (2019). + [2] https://github.com/nlpyang/PreSumm (/src/prepro/data_builder.py, commit fac1217) + """ + batch_embeddings = [] + for sequence in batch: + sentence_num = -1 + embeddings = [] + for s in sequence: + if s == separator_token_id: + sentence_num += 1 + embeddings.append(sentence_num % 2) + batch_embeddings.append(embeddings) + return torch.tensor(batch_embeddings) diff --git a/examples/summarization/utils_summarization_test.py b/examples/summarization/utils_summarization_test.py new file mode 100644 index 00000000000..8bfbf6ab231 --- /dev/null +++ b/examples/summarization/utils_summarization_test.py @@ -0,0 +1,121 @@ +# coding=utf-8 +# Copyright 2019 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest + +import numpy as np +import torch + +from utils_summarization import ( + compute_token_type_ids, + fit_to_block_size, + build_mask, + process_story, +) + + +class SummarizationDataProcessingTest(unittest.TestCase): + def setUp(self): + self.block_size = 10 + + def test_fit_to_block_sequence_too_small(self): + """ Pad the sequence with 0 if the sequence is smaller than the block size.""" + sequence = [1, 2, 3, 4] + expected_output = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] + self.assertEqual( + fit_to_block_size(sequence, self.block_size, 0), expected_output + ) + + def test_fit_to_block_sequence_fit_exactly(self): + """ Do nothing if the sequence is the right size. """ + sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + self.assertEqual( + fit_to_block_size(sequence, self.block_size, 0), expected_output + ) + + def test_fit_to_block_sequence_too_big(self): + """ Truncate the sequence if it is too long. """ + sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + self.assertEqual( + fit_to_block_size(sequence, self.block_size, 0), expected_output + ) + + def test_process_story_no_highlights(self): + """ Processing a story with no highlights returns an empty list for the summary. + """ + raw_story = """It was the year of Our Lord one thousand seven hundred and + seventy-five.\n\nSpiritual revelations were conceded to England at that + favoured period, as at this.""" + _, summary_lines = process_story(raw_story) + self.assertEqual(summary_lines, []) + + def test_process_empty_story(self): + """ An empty story returns an empty collection of lines. + """ + raw_story = "" + story_lines, summary_lines = process_story(raw_story) + self.assertEqual(story_lines, []) + self.assertEqual(summary_lines, []) + + def test_process_story_with_missing_period(self): + raw_story = ( + "It was the year of Our Lord one thousand seven hundred and " + "seventy-five\n\nSpiritual revelations were conceded to England " + "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" + ) + story_lines, summary_lines = process_story(raw_story) + + expected_story_lines = [ + "It was the year of Our Lord one thousand seven hundred and seventy-five.", + "Spiritual revelations were conceded to England at that favoured period, as at this.", + ] + self.assertEqual(expected_story_lines, story_lines) + + expected_summary_lines = ["It was the best of times."] + self.assertEqual(expected_summary_lines, summary_lines) + + def test_build_mask_no_padding(self): + sequence = torch.tensor([1, 2, 3, 4]) + expected = torch.tensor([1, 1, 1, 1]) + np.testing.assert_array_equal(build_mask(sequence, 0).numpy(), expected.numpy()) + + def test_build_mask(self): + sequence = torch.tensor([1, 2, 3, 4, 23, 23, 23]) + expected = torch.tensor([1, 1, 1, 1, 0, 0, 0]) + np.testing.assert_array_equal( + build_mask(sequence, 23).numpy(), expected.numpy() + ) + + def test_build_mask_with_padding_equal_to_one(self): + sequence = torch.tensor([8, 2, 3, 4, 1, 1, 1]) + expected = torch.tensor([1, 1, 1, 1, 0, 0, 0]) + np.testing.assert_array_equal(build_mask(sequence, 1).numpy(), expected.numpy()) + + def test_compute_token_type_ids(self): + separator = 101 + batch = torch.tensor( + [[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] + ) + expected = torch.tensor( + [[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] + ) + + result = compute_token_type_ids(batch, separator) + np.testing.assert_array_equal(result, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/examples/test_examples.py b/examples/test_examples.py index b04d722b7b0..632d2f728e6 100644 --- a/examples/test_examples.py +++ b/examples/test_examples.py @@ -72,8 +72,7 @@ class ExamplesTests(unittest.TestCase): logger.addHandler(stream_handler) testargs = ["run_squad.py", - "--train_file=./examples/tests_samples/SQUAD/dev-v2.0-small.json", - "--predict_file=./examples/tests_samples/SQUAD/dev-v2.0-small.json", + "--data_dir=./examples/tests_samples/SQUAD", "--model_name=bert-base-uncased", "--output_dir=./examples/tests_samples/temp_dir", "--max_steps=10", diff --git a/examples/tests_samples/SQUAD/dev-v2.0-small.json b/examples/tests_samples/SQUAD/dev-v2.0.json similarity index 100% rename from examples/tests_samples/SQUAD/dev-v2.0-small.json rename to examples/tests_samples/SQUAD/dev-v2.0.json diff --git a/examples/tests_samples/SQUAD/train-v2.0.json b/examples/tests_samples/SQUAD/train-v2.0.json new file mode 100644 index 00000000000..834d9ee6602 --- /dev/null +++ b/examples/tests_samples/SQUAD/train-v2.0.json @@ -0,0 +1,140 @@ +{ + "version": "v2.0", + "data": [{ + "title": "Normans", + "paragraphs": [{ + "qas": [{ + "question": "In what country is Normandy located?", + "id": "56ddde6b9a695914005b9628", + "answers": [{ + "text": "France", + "answer_start": 159 + }], + "is_impossible": false + }, { + "question": "When were the Normans in Normandy?", + "id": "56ddde6b9a695914005b9629", + "answers": [{ + "text": "10th and 11th centuries", + "answer_start": 94 + }], + "is_impossible": false + }, { + "question": "From which countries did the Norse originate?", + "id": "56ddde6b9a695914005b962a", + "answers": [{ + "text": "Denmark, Iceland and Norway", + "answer_start": 256 + }], + "is_impossible": false + }, { + "plausible_answers": [{ + "text": "Rollo", + "answer_start": 308 + }], + "question": "Who did King Charles III swear fealty to?", + "id": "5ad39d53604f3c001a3fe8d3", + "answers": [], + "is_impossible": true + }, { + "plausible_answers": [{ + "text": "10th century", + "answer_start": 671 + }], + "question": "When did the Frankish identity emerge?", + "id": "5ad39d53604f3c001a3fe8d4", + "answers": [], + "is_impossible": true + }], + "context": "The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. They were descended from Norse (\"Norman\" comes from \"Norseman\") raiders and pirates from Denmark, Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, their descendants would gradually merge with the Carolingian-based cultures of West Francia. The distinct cultural and ethnic identity of the Normans emerged initially in the first half of the 10th century, and it continued to evolve over the succeeding centuries." + }, { + "qas": [{ + "question": "Who was the duke in the battle of Hastings?", + "id": "56dddf4066d3e219004dad5f", + "answers": [{ + "text": "William the Conqueror", + "answer_start": 1022 + }], + "is_impossible": false + }, { + "plausible_answers": [{ + "text": "Antioch", + "answer_start": 1295 + }], + "question": "What principality did William the conquerer found?", + "id": "5ad3a266604f3c001a3fea2b", + "answers": [], + "is_impossible": true + }], + "context": "The Norman dynasty had a major political, cultural and military impact on medieval Europe and even the Near East. The Normans were famed for their martial spirit and eventually for their Christian piety, becoming exponents of the Catholic orthodoxy into which they assimilated. They adopted the Gallo-Romance language of the Frankish land they settled, their dialect becoming known as Norman, Normaund or Norman French, an important literary language. The Duchy of Normandy, which they formed by treaty with the French crown, was a great fief of medieval France, and under Richard I of Normandy was forged into a cohesive and formidable principality in feudal tenure. The Normans are noted both for their culture, such as their unique Romanesque architecture and musical traditions, and for their significant military accomplishments and innovations. Norman adventurers founded the Kingdom of Sicily under Roger II after conquering southern Italy on the Saracens and Byzantines, and an expedition on behalf of their duke, William the Conqueror, led to the Norman conquest of England at the Battle of Hastings in 1066. Norman cultural and military influence spread from these new European centres to the Crusader states of the Near East, where their prince Bohemond I founded the Principality of Antioch in the Levant, to Scotland and Wales in Great Britain, to Ireland, and to the coasts of north Africa and the Canary Islands." + }] + }, { + "title": "Computational_complexity_theory", + "paragraphs": [{ + "qas": [{ + "question": "What branch of theoretical computer science deals with broadly classifying computational problems by difficulty and class of relationship?", + "id": "56e16182e3433e1400422e28", + "answers": [{ + "text": "Computational complexity theory", + "answer_start": 0 + }], + "is_impossible": false + }, { + "plausible_answers": [{ + "text": "algorithm", + "answer_start": 472 + }], + "question": "What is a manual application of mathematical steps?", + "id": "5ad5316b5b96ef001a10ab76", + "answers": [], + "is_impossible": true + }], + "context": "Computational complexity theory is a branch of the theory of computation in theoretical computer science that focuses on classifying computational problems according to their inherent difficulty, and relating those classes to each other. A computational problem is understood to be a task that is in principle amenable to being solved by a computer, which is equivalent to stating that the problem may be solved by mechanical application of mathematical steps, such as an algorithm." + }, { + "qas": [{ + "question": "What measure of a computational problem broadly defines the inherent difficulty of the solution?", + "id": "56e16839cd28a01900c67887", + "answers": [{ + "text": "if its solution requires significant resources", + "answer_start": 46 + }], + "is_impossible": false + }, { + "question": "What method is used to intuitively assess or quantify the amount of resources required to solve a computational problem?", + "id": "56e16839cd28a01900c67888", + "answers": [{ + "text": "mathematical models of computation", + "answer_start": 176 + }], + "is_impossible": false + }, { + "question": "What are two basic primary resources used to guage complexity?", + "id": "56e16839cd28a01900c67889", + "answers": [{ + "text": "time and storage", + "answer_start": 305 + }], + "is_impossible": false + }, { + "plausible_answers": [{ + "text": "the number of gates in a circuit", + "answer_start": 436 + }], + "question": "What unit is measured to determine circuit simplicity?", + "id": "5ad532575b96ef001a10ab7f", + "answers": [], + "is_impossible": true + }, { + "plausible_answers": [{ + "text": "the number of processors", + "answer_start": 502 + }], + "question": "What number is used in perpendicular computing?", + "id": "5ad532575b96ef001a10ab80", + "answers": [], + "is_impossible": true + }], + "context": "A problem is regarded as inherently difficult if its solution requires significant resources, whatever the algorithm used. The theory formalizes this intuition, by introducing mathematical models of computation to study these problems and quantifying the amount of resources needed to solve them, such as time and storage. Other complexity measures are also used, such as the amount of communication (used in communication complexity), the number of gates in a circuit (used in circuit complexity) and the number of processors (used in parallel computing). One of the roles of computational complexity theory is to determine the practical limits on what computers can and cannot do." + }] + }] +} \ No newline at end of file diff --git a/examples/utils_squad_evaluate.py b/examples/utils_squad_evaluate.py deleted file mode 100644 index ed162e6fe60..00000000000 --- a/examples/utils_squad_evaluate.py +++ /dev/null @@ -1,330 +0,0 @@ -""" Official evaluation script for SQuAD version 2.0. - Modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0 - -In addition to basic functionality, we also compute additional statistics and -plot precision-recall curves if an additional na_prob.json file is provided. -This file is expected to map question ID's to the model's predicted probability -that a question is unanswerable. -""" -import argparse -import collections -import json -import numpy as np -import os -import re -import string -import sys - -class EVAL_OPTS(): - def __init__(self, data_file, pred_file, out_file="", - na_prob_file="na_prob.json", na_prob_thresh=1.0, - out_image_dir=None, verbose=False): - self.data_file = data_file - self.pred_file = pred_file - self.out_file = out_file - self.na_prob_file = na_prob_file - self.na_prob_thresh = na_prob_thresh - self.out_image_dir = out_image_dir - self.verbose = verbose - -OPTS = None - -def parse_args(): - parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.') - parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.') - parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.') - parser.add_argument('--out-file', '-o', metavar='eval.json', - help='Write accuracy metrics to file (default is stdout).') - parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json', - help='Model estimates of probability of no answer.') - parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0, - help='Predict "" if no-answer probability exceeds this (default = 1.0).') - parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None, - help='Save precision-recall curves to directory.') - parser.add_argument('--verbose', '-v', action='store_true') - if len(sys.argv) == 1: - parser.print_help() - sys.exit(1) - return parser.parse_args() - -def make_qid_to_has_ans(dataset): - qid_to_has_ans = {} - for article in dataset: - for p in article['paragraphs']: - for qa in p['qas']: - qid_to_has_ans[qa['id']] = bool(qa['answers']) - return qid_to_has_ans - -def normalize_answer(s): - """Lower text and remove punctuation, articles and extra whitespace.""" - def remove_articles(text): - regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) - return re.sub(regex, ' ', text) - def white_space_fix(text): - return ' '.join(text.split()) - def remove_punc(text): - exclude = set(string.punctuation) - return ''.join(ch for ch in text if ch not in exclude) - def lower(text): - return text.lower() - return white_space_fix(remove_articles(remove_punc(lower(s)))) - -def get_tokens(s): - if not s: return [] - return normalize_answer(s).split() - -def compute_exact(a_gold, a_pred): - return int(normalize_answer(a_gold) == normalize_answer(a_pred)) - -def compute_f1(a_gold, a_pred): - gold_toks = get_tokens(a_gold) - pred_toks = get_tokens(a_pred) - common = collections.Counter(gold_toks) & collections.Counter(pred_toks) - num_same = sum(common.values()) - if len(gold_toks) == 0 or len(pred_toks) == 0: - # If either is no-answer, then F1 is 1 if they agree, 0 otherwise - return int(gold_toks == pred_toks) - if num_same == 0: - return 0 - precision = 1.0 * num_same / len(pred_toks) - recall = 1.0 * num_same / len(gold_toks) - f1 = (2 * precision * recall) / (precision + recall) - return f1 - -def get_raw_scores(dataset, preds): - exact_scores = {} - f1_scores = {} - for article in dataset: - for p in article['paragraphs']: - for qa in p['qas']: - qid = qa['id'] - gold_answers = [a['text'] for a in qa['answers'] - if normalize_answer(a['text'])] - if not gold_answers: - # For unanswerable questions, only correct answer is empty string - gold_answers = [''] - if qid not in preds: - print('Missing prediction for %s' % qid) - continue - a_pred = preds[qid] - # Take max over all gold answers - exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers) - f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers) - return exact_scores, f1_scores - -def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh): - new_scores = {} - for qid, s in scores.items(): - pred_na = na_probs[qid] > na_prob_thresh - if pred_na: - new_scores[qid] = float(not qid_to_has_ans[qid]) - else: - new_scores[qid] = s - return new_scores - -def make_eval_dict(exact_scores, f1_scores, qid_list=None): - if not qid_list: - total = len(exact_scores) - return collections.OrderedDict([ - ('exact', 100.0 * sum(exact_scores.values()) / total), - ('f1', 100.0 * sum(f1_scores.values()) / total), - ('total', total), - ]) - else: - total = len(qid_list) - return collections.OrderedDict([ - ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total), - ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total), - ('total', total), - ]) - -def merge_eval(main_eval, new_eval, prefix): - for k in new_eval: - main_eval['%s_%s' % (prefix, k)] = new_eval[k] - -def plot_pr_curve(precisions, recalls, out_image, title): - plt.step(recalls, precisions, color='b', alpha=0.2, where='post') - plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b') - plt.xlabel('Recall') - plt.ylabel('Precision') - plt.xlim([0.0, 1.05]) - plt.ylim([0.0, 1.05]) - plt.title(title) - plt.savefig(out_image) - plt.clf() - -def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans, - out_image=None, title=None): - qid_list = sorted(na_probs, key=lambda k: na_probs[k]) - true_pos = 0.0 - cur_p = 1.0 - cur_r = 0.0 - precisions = [1.0] - recalls = [0.0] - avg_prec = 0.0 - for i, qid in enumerate(qid_list): - if qid_to_has_ans[qid]: - true_pos += scores[qid] - cur_p = true_pos / float(i+1) - cur_r = true_pos / float(num_true_pos) - if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]: - # i.e., if we can put a threshold after this point - avg_prec += cur_p * (cur_r - recalls[-1]) - precisions.append(cur_p) - recalls.append(cur_r) - if out_image: - plot_pr_curve(precisions, recalls, out_image, title) - return {'ap': 100.0 * avg_prec} - -def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, - qid_to_has_ans, out_image_dir): - if out_image_dir and not os.path.exists(out_image_dir): - os.makedirs(out_image_dir) - num_true_pos = sum(1 for v in qid_to_has_ans.values() if v) - if num_true_pos == 0: - return - pr_exact = make_precision_recall_eval( - exact_raw, na_probs, num_true_pos, qid_to_has_ans, - out_image=os.path.join(out_image_dir, 'pr_exact.png'), - title='Precision-Recall curve for Exact Match score') - pr_f1 = make_precision_recall_eval( - f1_raw, na_probs, num_true_pos, qid_to_has_ans, - out_image=os.path.join(out_image_dir, 'pr_f1.png'), - title='Precision-Recall curve for F1 score') - oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()} - pr_oracle = make_precision_recall_eval( - oracle_scores, na_probs, num_true_pos, qid_to_has_ans, - out_image=os.path.join(out_image_dir, 'pr_oracle.png'), - title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)') - merge_eval(main_eval, pr_exact, 'pr_exact') - merge_eval(main_eval, pr_f1, 'pr_f1') - merge_eval(main_eval, pr_oracle, 'pr_oracle') - -def histogram_na_prob(na_probs, qid_list, image_dir, name): - if not qid_list: - return - x = [na_probs[k] for k in qid_list] - weights = np.ones_like(x) / float(len(x)) - plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0)) - plt.xlabel('Model probability of no-answer') - plt.ylabel('Proportion of dataset') - plt.title('Histogram of no-answer probability: %s' % name) - plt.savefig(os.path.join(image_dir, 'na_prob_hist_%s.png' % name)) - plt.clf() - -def find_best_thresh(preds, scores, na_probs, qid_to_has_ans): - num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) - cur_score = num_no_ans - best_score = cur_score - best_thresh = 0.0 - qid_list = sorted(na_probs, key=lambda k: na_probs[k]) - for i, qid in enumerate(qid_list): - if qid not in scores: continue - if qid_to_has_ans[qid]: - diff = scores[qid] - else: - if preds[qid]: - diff = -1 - else: - diff = 0 - cur_score += diff - if cur_score > best_score: - best_score = cur_score - best_thresh = na_probs[qid] - return 100.0 * best_score / len(scores), best_thresh - -def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans): - num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) - cur_score = num_no_ans - best_score = cur_score - best_thresh = 0.0 - qid_list = sorted(na_probs, key=lambda k: na_probs[k]) - for i, qid in enumerate(qid_list): - if qid not in scores: continue - if qid_to_has_ans[qid]: - diff = scores[qid] - else: - if preds[qid]: - diff = -1 - else: - diff = 0 - cur_score += diff - if cur_score > best_score: - best_score = cur_score - best_thresh = na_probs[qid] - - has_ans_score, has_ans_cnt = 0, 0 - for qid in qid_list: - if not qid_to_has_ans[qid]: continue - has_ans_cnt += 1 - - if qid not in scores: continue - has_ans_score += scores[qid] - - return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt - -def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): - best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans) - best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans) - main_eval['best_exact'] = best_exact - main_eval['best_exact_thresh'] = exact_thresh - main_eval['best_f1'] = best_f1 - main_eval['best_f1_thresh'] = f1_thresh - -def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): - best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans) - best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans) - main_eval['best_exact'] = best_exact - main_eval['best_exact_thresh'] = exact_thresh - main_eval['best_f1'] = best_f1 - main_eval['best_f1_thresh'] = f1_thresh - main_eval['has_ans_exact'] = has_ans_exact - main_eval['has_ans_f1'] = has_ans_f1 - -def main(OPTS): - with open(OPTS.data_file) as f: - dataset_json = json.load(f) - dataset = dataset_json['data'] - with open(OPTS.pred_file) as f: - preds = json.load(f) - if OPTS.na_prob_file: - with open(OPTS.na_prob_file) as f: - na_probs = json.load(f) - else: - na_probs = {k: 0.0 for k in preds} - qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False - has_ans_qids = [k for k, v in qid_to_has_ans.items() if v] - no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v] - exact_raw, f1_raw = get_raw_scores(dataset, preds) - exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans, - OPTS.na_prob_thresh) - f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans, - OPTS.na_prob_thresh) - out_eval = make_eval_dict(exact_thresh, f1_thresh) - if has_ans_qids: - has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids) - merge_eval(out_eval, has_ans_eval, 'HasAns') - if no_ans_qids: - no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids) - merge_eval(out_eval, no_ans_eval, 'NoAns') - if OPTS.na_prob_file: - find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans) - if OPTS.na_prob_file and OPTS.out_image_dir: - run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs, - qid_to_has_ans, OPTS.out_image_dir) - histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns') - histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns') - if OPTS.out_file: - with open(OPTS.out_file, 'w') as f: - json.dump(out_eval, f) - else: - print(json.dumps(out_eval, indent=2)) - return out_eval - -if __name__ == '__main__': - OPTS = parse_args() - if OPTS.out_image_dir: - import matplotlib - matplotlib.use('Agg') - import matplotlib.pyplot as plt - main(OPTS) diff --git a/setup.py b/setup.py index f49aee68d4d..c4af32df83a 100644 --- a/setup.py +++ b/setup.py @@ -36,9 +36,15 @@ To create the package for pypi. from io import open from setuptools import find_packages, setup + +extras = { + 'serving': ['uvicorn', 'fastapi'] +} +extras['all'] = [package for package in extras.values()] + setup( name="transformers", - version="2.1.1", + version="2.2.1", author="Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors", author_email="thomas@huggingface.co", description="State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch", @@ -61,8 +67,11 @@ setup( "transformers=transformers.__main__:main", ] }, + extras_require=extras, + scripts=[ + 'transformers-cli' + ], # python_requires='>=3.5.0', - tests_require=['pytest'], classifiers=[ 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', diff --git a/templates/adding_a_new_example_script/README.md b/templates/adding_a_new_example_script/README.md new file mode 100644 index 00000000000..2afca08bf84 --- /dev/null +++ b/templates/adding_a_new_example_script/README.md @@ -0,0 +1,5 @@ +# How to add a new example script in 🤗Transformers + +This folder provide a template for adding a new example script implementing a training or inference task with the models in the 🤗Transformers library. + +Currently only examples for PyTorch are provided which are adaptations of the library's SQuAD examples which implement single-GPU and distributed training with gradient accumulation and mixed-precision (using NVIDIA's apex library) to cover a reasonable range of use cases. diff --git a/templates/adding_a_new_example_script/run_xxx.py b/templates/adding_a_new_example_script/run_xxx.py new file mode 100644 index 00000000000..77ce587a548 --- /dev/null +++ b/templates/adding_a_new_example_script/run_xxx.py @@ -0,0 +1,559 @@ +# coding=utf-8 +# Copyright 2018 XXX. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Finetuning the library models for task XXX.""" + +from __future__ import absolute_import, division, print_function + +import argparse +import logging +import os +import random +import glob + +import numpy as np +import torch +from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, + TensorDataset) +from torch.utils.data.distributed import DistributedSampler + +try: + from torch.utils.tensorboard import SummaryWriter +except: + from tensorboardX import SummaryWriter + +from tqdm import tqdm, trange + +from transformers import (WEIGHTS_NAME, BertConfig, + BertForQuestionAnswering, BertTokenizer, + XLMConfig, XLMForQuestionAnswering, + XLMTokenizer, XLNetConfig, + XLNetForQuestionAnswering, + XLNetTokenizer, + DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) + +from transformers import AdamW, get_linear_schedule_with_warmup + +from utils_squad import (read_squad_examples, convert_examples_to_features, + RawResult, write_predictions, + RawResultExtended, write_predictions_extended) + +# The follwing import is the official SQuAD evaluation script (2.0). +# You can remove it from the dependencies if you are using this script outside of the library +# We've added it here for automated tests (see examples/test_examples.py file) +from utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad + +logger = logging.getLogger(__name__) + +ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \ + for conf in (BertConfig, XLNetConfig, XLMConfig)), ()) + +MODEL_CLASSES = { + 'bert': (BertConfig, BertForQuestionAnswering, BertTokenizer), + 'xlnet': (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer), + 'xlm': (XLMConfig, XLMForQuestionAnswering, XLMTokenizer), + 'distilbert': (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer) +} + +def set_seed(args): + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if args.n_gpu > 0: + torch.cuda.manual_seed_all(args.seed) + +def to_list(tensor): + return tensor.detach().cpu().tolist() + +def train(args, train_dataset, model, tokenizer): + """ Train the model """ + if args.local_rank in [-1, 0]: + tb_writer = SummaryWriter() + + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) + train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) + + if args.max_steps > 0: + t_total = args.max_steps + args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 + else: + t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs + + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + output_device=args.local_rank, + find_unused_parameters=True) + + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + global_step = 0 + tr_loss, logging_loss = 0.0, 0.0 + model.zero_grad() + train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) + set_seed(args) # Added here for reproductibility (even between python 2 and 3) + for _ in train_iterator: + epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) + for step, batch in enumerate(epoch_iterator): + model.train() + batch = tuple(t.to(args.device) for t in batch) + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'start_positions': batch[3], + 'end_positions': batch[4]} + if args.model_type != 'distilbert': + inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] + if args.model_type in ['xlnet', 'xlm']: + inputs.update({'cls_index': batch[5], + 'p_mask': batch[6]}) + outputs = model(**inputs) + loss = outputs[0] # model outputs are always tuple in transformers (see doc) + + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + + tr_loss += loss.item() + if (step + 1) % args.gradient_accumulation_steps == 0: + if args.fp16: + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + + optimizer.step() + scheduler.step() # Update learning rate schedule + model.zero_grad() + global_step += 1 + + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: + # Log metrics + if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + results = evaluate(args, model, tokenizer) + for key, value in results.items(): + tb_writer.add_scalar('eval_{}'.format(key), value, global_step) + tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step) + tb_writer.add_scalar('loss', (tr_loss - logging_loss)/args.logging_steps, global_step) + logging_loss = tr_loss + + if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: + # Save model checkpoint + output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training + model_to_save.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) + logger.info("Saving model checkpoint to %s", output_dir) + + if args.max_steps > 0 and global_step > args.max_steps: + epoch_iterator.close() + break + if args.max_steps > 0 and global_step > args.max_steps: + train_iterator.close() + break + + if args.local_rank in [-1, 0]: + tb_writer.close() + + return global_step, tr_loss / global_step + + +def evaluate(args, model, tokenizer, prefix=""): + dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) + + if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: + os.makedirs(args.output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) + eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # Eval! + logger.info("***** Running evaluation {} *****".format(prefix)) + logger.info(" Num examples = %d", len(dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + all_results = [] + for batch in tqdm(eval_dataloader, desc="Evaluating"): + model.eval() + batch = tuple(t.to(args.device) for t in batch) + with torch.no_grad(): + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1] + } + if args.model_type != 'distilbert': + inputs['token_type_ids'] = None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids + example_indices = batch[3] + if args.model_type in ['xlnet', 'xlm']: + inputs.update({'cls_index': batch[4], + 'p_mask': batch[5]}) + outputs = model(**inputs) + + for i, example_index in enumerate(example_indices): + eval_feature = features[example_index.item()] + unique_id = int(eval_feature.unique_id) + if args.model_type in ['xlnet', 'xlm']: + # XLNet uses a more complex post-processing procedure + result = RawResultExtended(unique_id = unique_id, + start_top_log_probs = to_list(outputs[0][i]), + start_top_index = to_list(outputs[1][i]), + end_top_log_probs = to_list(outputs[2][i]), + end_top_index = to_list(outputs[3][i]), + cls_logits = to_list(outputs[4][i])) + else: + result = RawResult(unique_id = unique_id, + start_logits = to_list(outputs[0][i]), + end_logits = to_list(outputs[1][i])) + all_results.append(result) + + # Compute predictions + output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) + output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) + if args.version_2_with_negative: + output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) + else: + output_null_log_odds_file = None + + if args.model_type in ['xlnet', 'xlm']: + # XLNet uses a more complex post-processing procedure + write_predictions_extended(examples, features, all_results, args.n_best_size, + args.max_answer_length, output_prediction_file, + output_nbest_file, output_null_log_odds_file, args.predict_file, + model.config.start_n_top, model.config.end_n_top, + args.version_2_with_negative, tokenizer, args.verbose_logging) + else: + write_predictions(examples, features, all_results, args.n_best_size, + args.max_answer_length, args.do_lower_case, output_prediction_file, + output_nbest_file, output_null_log_odds_file, args.verbose_logging, + args.version_2_with_negative, args.null_score_diff_threshold) + + # Evaluate with the official SQuAD script + evaluate_options = EVAL_OPTS(data_file=args.predict_file, + pred_file=output_prediction_file, + na_prob_file=output_null_log_odds_file) + results = evaluate_on_squad(evaluate_options) + return results + + +def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): + if args.local_rank not in [-1, 0] and not evaluate: + torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache + + # Load data features from cache or dataset file + input_file = args.predict_file if evaluate else args.train_file + cached_features_file = os.path.join(os.path.dirname(input_file), 'cached_{}_{}_{}'.format( + 'dev' if evaluate else 'train', + list(filter(None, args.model_name_or_path.split('/'))).pop(), + str(args.max_seq_length))) + if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: + logger.info("Loading features from cached file %s", cached_features_file) + features = torch.load(cached_features_file) + else: + logger.info("Creating features from dataset file at %s", input_file) + examples = read_squad_examples(input_file=input_file, + is_training=not evaluate, + version_2_with_negative=args.version_2_with_negative) + features = convert_examples_to_features(examples=examples, + tokenizer=tokenizer, + max_seq_length=args.max_seq_length, + doc_stride=args.doc_stride, + max_query_length=args.max_query_length, + is_training=not evaluate) + if args.local_rank in [-1, 0]: + logger.info("Saving features into cached file %s", cached_features_file) + torch.save(features, cached_features_file) + + if args.local_rank == 0 and not evaluate: + torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache + + # Convert to Tensors and build dataset + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) + all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) + all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) + all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) + all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) + if evaluate: + all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) + dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, + all_example_index, all_cls_index, all_p_mask) + else: + all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) + all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) + dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, + all_start_positions, all_end_positions, + all_cls_index, all_p_mask) + + if output_examples: + return dataset, examples, features + return dataset + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--train_file", default=None, type=str, required=True, + help="SQuAD json for training. E.g., train-v1.1.json") + parser.add_argument("--predict_file", default=None, type=str, required=True, + help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") + parser.add_argument("--model_type", default=None, type=str, required=True, + help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) + parser.add_argument("--model_name_or_path", default=None, type=str, required=True, + help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model checkpoints and predictions will be written.") + + ## Other parameters + parser.add_argument("--config_name", default="", type=str, + help="Pretrained config name or path if not the same as model_name") + parser.add_argument("--tokenizer_name", default="", type=str, + help="Pretrained tokenizer name or path if not the same as model_name") + parser.add_argument("--cache_dir", default="", type=str, + help="Where do you want to store the pre-trained models downloaded from s3") + + parser.add_argument('--version_2_with_negative', action='store_true', + help='If true, the SQuAD examples contain some that do not have an answer.') + parser.add_argument('--null_score_diff_threshold', type=float, default=0.0, + help="If null_score - best_non_null is greater than the threshold predict null.") + + parser.add_argument("--max_seq_length", default=384, type=int, + help="The maximum total input sequence length after WordPiece tokenization. Sequences " + "longer than this will be truncated, and sequences shorter than this will be padded.") + parser.add_argument("--doc_stride", default=128, type=int, + help="When splitting up a long document into chunks, how much stride to take between chunks.") + parser.add_argument("--max_query_length", default=64, type=int, + help="The maximum number of tokens for the question. Questions longer than this will " + "be truncated to this length.") + parser.add_argument("--do_train", action='store_true', + help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--evaluate_during_training", action='store_true', + help="Rul evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + + parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument("--learning_rate", default=5e-5, type=float, + help="The initial learning rate for Adam.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--weight_decay", default=0.0, type=float, + help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, + help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, + help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=3.0, type=float, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, + help="Linear warmup over warmup_steps.") + parser.add_argument("--n_best_size", default=20, type=int, + help="The total number of n-best predictions to generate in the nbest_predictions.json output file.") + parser.add_argument("--max_answer_length", default=30, type=int, + help="The maximum length of an answer that can be generated. This is needed because the start " + "and end predictions are not conditioned on one another.") + parser.add_argument("--verbose_logging", action='store_true', + help="If true, all of the warnings related to data processing will be printed. " + "A number of warnings are expected for a normal SQuAD evaluation.") + + parser.add_argument('--logging_steps', type=int, default=50, + help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=50, + help="Save checkpoint every X updates steps.") + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', + help="Whether not to use CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', + help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', + help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, + help="random seed for initialization") + + parser.add_argument("--local_rank", type=int, default=-1, + help="local_rank for distributed training on gpus") + parser.add_argument('--fp16', action='store_true', + help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") + args = parser.parse_args() + + if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: + raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + args.device = device + + # Setup logging + logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', + level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) + + # Set seed + set_seed(args) + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + args.model_type = args.model_type.lower() + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, + cache_dir=args.cache_dir if args.cache_dir else None) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, + do_lower_case=args.do_lower_case, + cache_dir=args.cache_dir if args.cache_dir else None) + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool('.ckpt' in args.model_name_or_path), + config=config, + cache_dir=args.cache_dir if args.cache_dir else None) + + if args.local_rank == 0: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + model.to(args.device) + + logger.info("Training/evaluation parameters %s", args) + + # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. + # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will + # remove the need for this code, but it is still valid. + if args.fp16: + try: + import apex + apex.amp.register_half_function(torch, 'einsum') + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + + # Training + if args.do_train: + train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) + global_step, tr_loss = train(args, train_dataset, model, tokenizer) + logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) + + + # Save the trained model and the tokenizer + if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): + # Create output directory if needed + if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: + os.makedirs(args.output_dir) + + logger.info("Saving model checkpoint to %s", args.output_dir) + # Save a trained model, configuration and tokenizer using `save_pretrained()`. + # They can then be reloaded using `from_pretrained()` + model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training + model_to_save.save_pretrained(args.output_dir) + tokenizer.save_pretrained(args.output_dir) + + # Good practice: save your training arguments together with the trained model + torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) + + # Load a trained model and vocabulary that you have fine-tuned + model = model_class.from_pretrained(args.output_dir) + tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) + model.to(args.device) + + + # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory + results = {} + if args.do_eval and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs + + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + # Reload the model + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint) + model.to(args.device) + + # Evaluate + result = evaluate(args, model, tokenizer, prefix=global_step) + + result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items()) + results.update(result) + + logger.info("Results: {}".format(results)) + + return results + + +if __name__ == "__main__": + main() diff --git a/examples/utils_squad.py b/templates/adding_a_new_example_script/utils_xxx.py similarity index 99% rename from examples/utils_squad.py rename to templates/adding_a_new_example_script/utils_xxx.py index b990ecc8420..3f4145e028c 100644 --- a/examples/utils_squad.py +++ b/templates/adding_a_new_example_script/utils_xxx.py @@ -1,7 +1,6 @@ # coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# Copyright 2018 XXX. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -""" Load SQuAD dataset. """ +""" Load XXX dataset. """ from __future__ import absolute_import, division, print_function diff --git a/templates/adding_a_new_model/README.md b/templates/adding_a_new_model/README.md new file mode 100644 index 00000000000..b546555bd6e --- /dev/null +++ b/templates/adding_a_new_model/README.md @@ -0,0 +1,62 @@ +# How to add a new model in 🤗Transformers + +This folder describes the process to add a new model in 🤗Transformers and provide templates for the required files. + +The library is designed to incorporate a variety of models and code bases. As such the process for adding a new model usually mostly consists in copy-pasting to relevant original code in the various sections of the templates included in the present repository. + +One important point though is that the library has the following goals impacting the way models are incorporated: + +- one specific feature of the API is the capability to run the model and tokenizer inline. The tokenization code thus often have to be slightly adapted to allow for running in the python interpreter. +- the package is also designed to be as self-consistent and with a small and reliable set of packages dependencies. In consequence, additional dependencies are usually not allowed when adding a model but can be allowed for the inclusion of a new tokenizer (recent examples of dependencies added for tokenizer specificities include `sentencepiece` and `sacremoses`). Please make sure to check the existing dependencies when possible before adding a new one. + +For a quick overview of the library organization, please check the [QuickStart section of the documentation](https://huggingface.co/transformers/quickstart.html). + +# Typical workflow for including a model + +Here an overview of the general workflow: + +- [ ] add model/configuration/tokenization classes +- [ ] add conversion scripts +- [ ] add tests +- [ ] finalize + +Let's detail what should be done at each step + +## Adding model/configuration/tokenization classes + +Here is the workflow for adding model/configuration/tokenization classes: + +- [ ] copy the python files from the present folder to the main folder and rename them, replacing `xxx` with your model name, +- [ ] edit the files to replace `XXX` (with various casing) with your model name +- [ ] copy-paste or create a simple configuration class for your model in the `configuration_...` file +- [ ] copy-paste or create the code for your model in the `modeling_...` files (PyTorch and TF 2.0) +- [ ] copy-paste or create a tokenizer class for your model in the `tokenization_...` file + +# Adding conversion scripts + +Here is the workflow for the conversion scripts: + +- [ ] copy the conversion script (`convert_...`) from the present folder to the main folder. +- [ ] edit this script to convert your original checkpoint weights to the current pytorch ones. + +# Adding tests: + +Here is the workflow for the adding tests: + +- [ ] copy the python files from the `tests` sub-folder of the present folder to the `tests` subfolder of the main folder and rename them, replacing `xxx` with your model name, +- [ ] edit the tests files to replace `XXX` (with various casing) with your model name +- [ ] edit the tests code as needed + +# Final steps + +You can then finish the addition step by adding imports for your classes in the common files: + +- [ ] add import for all the relevant classes in `__init__.py` +- [ ] add your configuration in `configuration_auto.py` +- [ ] add your PyTorch and TF 2.0 model respectively in `modeling_auto.py` and `modeling_tf_auto.py` +- [ ] add your tokenizer in `tokenization_auto.py` +- [ ] add your models and tokenizer to `pipeline.py` +- [ ] add a link to your conversion script in the main conversion utility (currently in `__main__` but will be moved to the `commands` subfolder in the near future) +- [ ] edit the PyTorch to TF 2.0 conversion script to add your model in the `convert_pytorch_checkpoint_to_tf2.py` file +- [ ] add a mention of your model in the doc: `README.md` and the documentation itself at `docs/source/pretrained_models.rst`. +- [ ] upload the pretrained weigths, configurations and vocabulary files. diff --git a/templates/adding_a_new_model/configuration_xxx.py b/templates/adding_a_new_model/configuration_xxx.py new file mode 100644 index 00000000000..b1614e71af0 --- /dev/null +++ b/templates/adding_a_new_model/configuration_xxx.py @@ -0,0 +1,130 @@ +# coding=utf-8 +# Copyright 2010, XXX authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" XXX model configuration """ + +from __future__ import absolute_import, division, print_function, unicode_literals + +import json +import logging +import sys +import six +from io import open + +from .configuration_utils import PretrainedConfig + +logger = logging.getLogger(__name__) + +XXX_PRETRAINED_CONFIG_ARCHIVE_MAP = { + 'xxx-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-base-uncased-config.json", + 'xxx-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-large-uncased-config.json", +} + + +class XxxConfig(PretrainedConfig): + r""" + :class:`~transformers.XxxConfig` is the configuration class to store the configuration of a + `XxxModel`. + + + Arguments: + vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `XxxModel`. + hidden_size: Size of the encoder layers and the pooler layer. + num_hidden_layers: Number of hidden layers in the Transformer encoder. + num_attention_heads: Number of attention heads for each attention layer in + the Transformer encoder. + intermediate_size: The size of the "intermediate" (i.e., feed-forward) + layer in the Transformer encoder. + hidden_act: The non-linear activation function (function or string) in the + encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported. + hidden_dropout_prob: The dropout probabilitiy for all fully connected + layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob: The dropout ratio for the attention + probabilities. + max_position_embeddings: The maximum sequence length that this model might + ever be used with. Typically set this to something large just in case + (e.g., 512 or 1024 or 2048). + type_vocab_size: The vocabulary size of the `token_type_ids` passed into + `XxxModel`. + initializer_range: The sttdev of the truncated_normal_initializer for + initializing all weight matrices. + layer_norm_eps: The epsilon used by LayerNorm. + """ + pretrained_config_archive_map = XXX_PRETRAINED_CONFIG_ARCHIVE_MAP + + def __init__(self, + vocab_size_or_config_json_file=50257, + n_positions=1024, + n_ctx=1024, + n_embd=768, + n_layer=12, + n_head=12, + resid_pdrop=0.1, + embd_pdrop=0.1, + attn_pdrop=0.1, + layer_norm_epsilon=1e-5, + initializer_range=0.02, + + num_labels=1, + summary_type='cls_index', + summary_use_proj=True, + summary_activation=None, + summary_proj_to_labels=True, + summary_first_dropout=0.1, + **kwargs): + super(XxxConfig, self).__init__(**kwargs) + self.vocab_size = vocab_size_or_config_json_file if isinstance(vocab_size_or_config_json_file, six.string_types) else -1 + self.n_ctx = n_ctx + self.n_positions = n_positions + self.n_embd = n_embd + self.n_layer = n_layer + self.n_head = n_head + self.resid_pdrop = resid_pdrop + self.embd_pdrop = embd_pdrop + self.attn_pdrop = attn_pdrop + self.layer_norm_epsilon = layer_norm_epsilon + self.initializer_range = initializer_range + + self.num_labels = num_labels + self.summary_type = summary_type + self.summary_use_proj = summary_use_proj + self.summary_activation = summary_activation + self.summary_first_dropout = summary_first_dropout + self.summary_proj_to_labels = summary_proj_to_labels + if isinstance(vocab_size_or_config_json_file, six.string_types): + with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader: + json_config = json.loads(reader.read()) + for key, value in json_config.items(): + self.__dict__[key] = value + elif not isinstance(vocab_size_or_config_json_file, int): + raise ValueError( + "First argument must be either a vocabulary size (int)" + "or the path to a pretrained model config file (str)" + ) + + @property + def max_position_embeddings(self): + return self.n_positions + + @property + def hidden_size(self): + return self.n_embd + + @property + def num_attention_heads(self): + return self.n_head + + @property + def num_hidden_layers(self): + return self.n_layer diff --git a/templates/adding_a_new_model/convert_xxx_original_tf_checkpoint_to_pytorch.py b/templates/adding_a_new_model/convert_xxx_original_tf_checkpoint_to_pytorch.py new file mode 100755 index 00000000000..d50d129cba3 --- /dev/null +++ b/templates/adding_a_new_model/convert_xxx_original_tf_checkpoint_to_pytorch.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert XXX checkpoint.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import torch + +from transformers import XxxConfig, XxxForPreTraining, load_tf_weights_in_xxx + +import logging +logging.basicConfig(level=logging.INFO) + +def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, xxx_config_file, pytorch_dump_path): + # Initialise PyTorch model + config = XxxConfig.from_json_file(xxx_config_file) + print("Building PyTorch model from configuration: {}".format(str(config))) + model = XxxForPreTraining(config) + + # Load weights from tf checkpoint + load_tf_weights_in_xxx(model, config, tf_checkpoint_path) + + # Save pytorch-model + print("Save PyTorch model to {}".format(pytorch_dump_path)) + torch.save(model.state_dict(), pytorch_dump_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + ## Required parameters + parser.add_argument("--tf_checkpoint_path", + default = None, + type = str, + required = True, + help = "Path to the TensorFlow checkpoint path.") + parser.add_argument("--xxx_config_file", + default = None, + type = str, + required = True, + help = "The config json file corresponding to the pre-trained XXX model. \n" + "This specifies the model architecture.") + parser.add_argument("--pytorch_dump_path", + default = None, + type = str, + required = True, + help = "Path to the output PyTorch model.") + args = parser.parse_args() + convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, + args.xxx_config_file, + args.pytorch_dump_path) diff --git a/templates/adding_a_new_model/modeling_tf_xxx.py b/templates/adding_a_new_model/modeling_tf_xxx.py new file mode 100644 index 00000000000..59f798bdbfb --- /dev/null +++ b/templates/adding_a_new_model/modeling_tf_xxx.py @@ -0,0 +1,504 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TF 2.0 XXX model. """ + +#################################################### +# In this template, replace all the XXX (various casings) with your model name +#################################################### + +from __future__ import absolute_import, division, print_function, unicode_literals + +import json +import logging +import math +import os +import sys +from io import open + +import numpy as np +import tensorflow as tf + +from .configuration_xxx import XxxConfig +from .modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list +from .file_utils import add_start_docstrings + +logger = logging.getLogger(__name__) + +#################################################### +# This dict contrains shortcut names and associated url +# for the pretrained weights provided with the models +#################################################### +TF_XXX_PRETRAINED_MODEL_ARCHIVE_MAP = { + 'xxx-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-base-uncased-tf_model.h5", + 'xxx-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-large-uncased-tf_model.h5", +} + +#################################################### +# TF 2.0 Models are constructed using Keras imperative API by sub-classing +# - tf.keras.layers.Layer for the layers and +# - TFPreTrainedModel for the models (itself a sub-class of tf.keras.Model) +#################################################### + +#################################################### +# Here is an example of typical layer in a TF 2.0 model of the library +# The classes are usually identical to the PyTorch ones and prefixed with 'TF'. +# +# Note that class __init__ parameters includes **kwargs (send to 'super'). +# This let us have a control on class scope and variable names: +# More precisely, we set the names of the class attributes (lower level layers) to +# to the equivalent attributes names in the PyTorch model so we can have equivalent +# class and scope structure between PyTorch and TF 2.0 models and easily load one in the other. +# +# See the conversion methods in modeling_tf_pytorch_utils.py for more details +#################################################### +class TFXxxLayer(tf.keras.layers.Layer): + def __init__(self, config, **kwargs): + super(TFXxxLayer, self).__init__(**kwargs) + self.attention = TFXxxAttention(config, name='attention') + self.intermediate = TFXxxIntermediate(config, name='intermediate') + self.transformer_output = TFXxxOutput(config, name='output') + + def call(self, inputs, training=False): + hidden_states, attention_mask, head_mask = inputs + + attention_outputs = self.attention([hidden_states, attention_mask, head_mask], training=training) + attention_output = attention_outputs[0] + intermediate_output = self.intermediate(attention_output) + layer_output = self.transformer_output([intermediate_output, attention_output], training=training) + outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them + return outputs + + +#################################################### +# The full model without a specific pretrained or finetuning head is +# provided as a tf.keras.layers.Layer usually called "TFXxxMainLayer" +#################################################### +class TFXxxMainLayer(tf.keras.layers.Layer): + def __init__(self, config, **kwargs): + super(TFXxxMainLayer, self).__init__(**kwargs) + + def _resize_token_embeddings(self, new_num_tokens): + raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models + + def _prune_heads(self, heads_to_prune): + raise NotImplementedError # Not implemented yet in the library fr TF 2.0 models + + def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, training=False): + # We allow three types of multi-inputs: + # - traditional keyword arguments in the call method + # - all the arguments provided as a dict in the first positional argument of call + # - all the arguments provided as a list/tuple (ordered) in the first positional argument of call + # The last two options are useful to use the tf.keras fit() method. + + if isinstance(inputs, (tuple, list)): + input_ids = inputs[0] + attention_mask = inputs[1] if len(inputs) > 1 else attention_mask + token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids + position_ids = inputs[3] if len(inputs) > 3 else position_ids + head_mask = inputs[4] if len(inputs) > 4 else head_mask + assert len(inputs) <= 5, "Too many inputs." + elif isinstance(inputs, dict): + input_ids = inputs.get('input_ids') + attention_mask = inputs.get('attention_mask', attention_mask) + token_type_ids = inputs.get('token_type_ids', token_type_ids) + position_ids = inputs.get('position_ids', position_ids) + head_mask = inputs.get('head_mask', head_mask) + assert len(inputs) <= 5, "Too many inputs." + else: + input_ids = inputs + + if attention_mask is None: + attention_mask = tf.fill(shape_list(input_ids), 1) + if token_type_ids is None: + token_type_ids = tf.fill(shape_list(input_ids), 0) + + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + + extended_attention_mask = tf.cast(extended_attention_mask, tf.float32) + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + if not head_mask is None: + raise NotImplementedError + else: + head_mask = [None] * self.num_hidden_layers + # head_mask = tf.constant([0] * self.num_hidden_layers) + + ################################## + # Replace this with your model code + embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids) + encoder_outputs = self.encoder([embedding_output, extended_attention_mask, head_mask], training=training) + sequence_output = encoder_outputs[0] + outputs = (sequence_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here + + return outputs # sequence_output, (hidden_states), (attentions) + + +#################################################### +# TFXxxPreTrainedModel is a sub-class of tf.keras.Model +# which take care of loading and saving pretrained weights +# and various common utilities. +# Here you just need to specify a few (self-explanatory) +# pointers for your model. +#################################################### +class TFXxxPreTrainedModel(TFPreTrainedModel): + """ An abstract class to handle weights initialization and + a simple interface for dowloading and loading pretrained models. + """ + config_class = XxxConfig + pretrained_model_archive_map = TF_XXX_PRETRAINED_MODEL_ARCHIVE_MAP + base_model_prefix = "transformer" + + +XXX_START_DOCSTRING = r""" The XXX model was proposed in + `XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ + by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer + pre-trained using a combination of masked language modeling objective and next sentence prediction + on a large corpus comprising the Toronto Book Corpus and Wikipedia. + + This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and + refer to the TF 2.0 documentation for all matter related to general usage and behavior. + + .. _`XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`: + https://arxiv.org/abs/1810.04805 + + .. _`tf.keras.Model`: + https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model + + Note on the model inputs: + TF 2.0 models accepts two formats as inputs: + + - having all inputs as keyword arguments (like PyTorch models), or + - having all inputs as a list, tuple or dict in the first positional arguments. + + This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. + + If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : + + - a single Tensor with input_ids only and nothing else: `model(inputs_ids) + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associaed to the input names given in the docstring: + `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})` + + Parameters: + config (:class:`~transformers.XxxConfig`): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the configuration. + Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. +""" + +XXX_INPUTS_DOCSTRING = r""" + Inputs: + **input_ids**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: + Indices of input sequence tokens in the vocabulary. + To match pre-training, XXX input sequence should be formatted with [CLS] and [SEP] tokens as follows: + + (a) For sequence pairs: + + ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` + + ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` + + (b) For single sequences: + + ``tokens: [CLS] the dog is hairy . [SEP]`` + + ``token_type_ids: 0 0 0 0 0 0 0`` + + Xxx is a model with absolute position embeddings so it's usually advised to pad the inputs on + the right rather than the left. + + Indices can be obtained using :class:`transformers.XxxTokenizer`. + See :func:`transformers.PreTrainedTokenizer.encode` and + :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. + **attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: + Mask to avoid performing attention on padding token indices. + Mask values selected in ``[0, 1]``: + ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. + **token_type_ids**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: + Segment token indices to indicate first and second portions of the inputs. + Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` + corresponds to a `sentence B` token + (see `XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). + **position_ids**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: + Indices of positions of each input sequence tokens in the position embeddings. + Selected in the range ``[0, config.max_position_embeddings - 1]``. + **head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: + Mask to nullify selected heads of the self-attention modules. + Mask values selected in ``[0, 1]``: + ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. +""" + +@add_start_docstrings("The bare Xxx Model transformer outputing raw hidden-states without any specific head on top.", + XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) +class TFXxxModel(TFXxxPreTrainedModel): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **last_hidden_state**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, hidden_size)`` + Sequence of hidden-states at the output of the last layer of the model. + **pooler_output**: ``tf.Tensor`` of shape ``(batch_size, hidden_size)`` + Last layer hidden-state of the first token of the sequence (classification token) + further processed by a Linear layer and a Tanh activation function. The Linear + layer weights are trained from the next sentence prediction (classification) + objective during Xxx pretraining. This output is usually *not* a good summary + of the semantic content of the input, you're often better with averaging or pooling + the sequence of hidden-states for the whole input sequence. + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + import tensorflow as tf + from transformers import XxxTokenizer, TFXxxModel + + tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') + model = TFXxxModel.from_pretrained('xxx-base-uncased') + input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 + outputs = model(input_ids) + last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple + + """ + def __init__(self, config, *inputs, **kwargs): + super(TFXxxModel, self).__init__(config, *inputs, **kwargs) + self.transformer = TFXxxMainLayer(config, name='transformer') + + def call(self, inputs, **kwargs): + outputs = self.transformer(inputs, **kwargs) + return outputs + + +@add_start_docstrings("""Xxx Model with a `language modeling` head on top. """, + XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) +class TFXxxForMaskedLM(TFXxxPreTrainedModel): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **prediction_scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + import tensorflow as tf + from transformers import XxxTokenizer, TFXxxForMaskedLM + + tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') + model = TFXxxForMaskedLM.from_pretrained('xxx-base-uncased') + input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 + outputs = model(input_ids) + prediction_scores = outputs[0] + + """ + def __init__(self, config, *inputs, **kwargs): + super(TFXxxForMaskedLM, self).__init__(config, *inputs, **kwargs) + + self.transformer = TFXxxMainLayer(config, name='transformer') + self.mlm = TFXxxMLMHead(config, self.transformer.embeddings, name='mlm') + + def call(self, inputs, **kwargs): + outputs = self.transformer(inputs, **kwargs) + + sequence_output = outputs[0] + prediction_scores = self.mlm(sequence_output, training=kwargs.get('training', False)) + + outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here + + return outputs # prediction_scores, (hidden_states), (attentions) + + +@add_start_docstrings("""Xxx Model transformer with a sequence classification/regression head on top (a linear layer on top of + the pooled output) e.g. for GLUE tasks. """, + XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) +class TFXxxForSequenceClassification(TFXxxPreTrainedModel): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **logits**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, config.num_labels)`` + Classification (or regression if config.num_labels==1) scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + import tensorflow as tf + from transformers import XxxTokenizer, TFXxxForSequenceClassification + + tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') + model = TFXxxForSequenceClassification.from_pretrained('xxx-base-uncased') + input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 + outputs = model(input_ids) + logits = outputs[0] + + """ + def __init__(self, config, *inputs, **kwargs): + super(TFXxxForSequenceClassification, self).__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + + self.transformer = TFXxxMainLayer(config, name='transformer') + self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.classifier = tf.keras.layers.Dense(config.num_labels, + kernel_initializer=get_initializer(config.initializer_range), + name='classifier') + + def call(self, inputs, **kwargs): + outputs = self.transformer(inputs, **kwargs) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output, training=kwargs.get('training', False)) + logits = self.classifier(pooled_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + + return outputs # logits, (hidden_states), (attentions) + + +@add_start_docstrings("""Xxx Model with a token classification head on top (a linear layer on top of + the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, + XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) +class TFXxxForTokenClassification(TFXxxPreTrainedModel): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` + Classification scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + import tensorflow as tf + from transformers import XxxTokenizer, TFXxxForTokenClassification + + tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') + model = TFXxxForTokenClassification.from_pretrained('xxx-base-uncased') + input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 + outputs = model(input_ids) + scores = outputs[0] + + """ + def __init__(self, config, *inputs, **kwargs): + super(TFXxxForTokenClassification, self).__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + + self.transformer = TFXxxMainLayer(config, name='transformer') + self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.classifier = tf.keras.layers.Dense(config.num_labels, + kernel_initializer=get_initializer(config.initializer_range), + name='classifier') + + def call(self, inputs, **kwargs): + outputs = self.transformer(inputs, **kwargs) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output, training=kwargs.get('training', False)) + logits = self.classifier(sequence_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + + return outputs # scores, (hidden_states), (attentions) + + +@add_start_docstrings("""Xxx Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of + the hidden-states output to compute `span start logits` and `span end logits`). """, + XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) +class TFXxxForQuestionAnswering(TFXxxPreTrainedModel): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **start_scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length,)`` + Span-start scores (before SoftMax). + **end_scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length,)`` + Span-end scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + import tensorflow as tf + from transformers import XxxTokenizer, TFXxxForQuestionAnswering + + tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') + model = TFXxxForQuestionAnswering.from_pretrained('xxx-base-uncased') + input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 + outputs = model(input_ids) + start_scores, end_scores = outputs[:2] + + """ + def __init__(self, config, *inputs, **kwargs): + super(TFXxxForQuestionAnswering, self).__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + + self.transformer = TFXxxMainLayer(config, name='transformer') + self.qa_outputs = tf.keras.layers.Dense(config.num_labels, + kernel_initializer=get_initializer(config.initializer_range), + name='qa_outputs') + + def call(self, inputs, **kwargs): + outputs = self.transformer(inputs, **kwargs) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = tf.split(logits, 2, axis=-1) + start_logits = tf.squeeze(start_logits, axis=-1) + end_logits = tf.squeeze(end_logits, axis=-1) + + outputs = (start_logits, end_logits,) + outputs[2:] + + return outputs # start_logits, end_logits, (hidden_states), (attentions) diff --git a/templates/adding_a_new_model/modeling_xxx.py b/templates/adding_a_new_model/modeling_xxx.py new file mode 100644 index 00000000000..94c4b0db9a1 --- /dev/null +++ b/templates/adding_a_new_model/modeling_xxx.py @@ -0,0 +1,658 @@ +# coding=utf-8 +# Copyright 2018 XXX Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch XXX model. """ + +#################################################### +# In this template, replace all the XXX (various casings) with your model name +#################################################### + +from __future__ import absolute_import, division, print_function, unicode_literals + +import json +import logging +import math +import os +import sys +from io import open + +import torch +from torch import nn +from torch.nn import CrossEntropyLoss, MSELoss + +from .modeling_utils import PreTrainedModel, prune_linear_layer +from .configuration_xxx import XxxConfig +from .file_utils import add_start_docstrings + +logger = logging.getLogger(__name__) + +#################################################### +# This dict contrains shortcut names and associated url +# for the pretrained weights provided with the models +#################################################### +XXX_PRETRAINED_MODEL_ARCHIVE_MAP = { + 'xxx-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-base-uncased-pytorch_model.bin", + 'xxx-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-large-uncased-pytorch_model.bin", +} + +#################################################### +# This is a conversion method from TF 1.0 to PyTorch +# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28 +#################################################### +def load_tf_weights_in_xxx(model, config, tf_checkpoint_path): + """ Load tf checkpoints in a pytorch model. + """ + try: + import re + import numpy as np + import tensorflow as tf + except ImportError: + logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions.") + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info("Loading TF weight {} with shape {}".format(name, shape)) + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split('/') + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any(n in ["adam_v", "adam_m", "global_step"] for n in name): + logger.info("Skipping {}".format("/".join(name))) + continue + pointer = model + for m_name in name: + if re.fullmatch(r'[A-Za-z]+_\d+', m_name): + l = re.split(r'_(\d+)', m_name) + else: + l = [m_name] + if l[0] == 'kernel' or l[0] == 'gamma': + pointer = getattr(pointer, 'weight') + elif l[0] == 'output_bias' or l[0] == 'beta': + pointer = getattr(pointer, 'bias') + elif l[0] == 'output_weights': + pointer = getattr(pointer, 'weight') + elif l[0] == 'squad': + pointer = getattr(pointer, 'classifier') + else: + try: + pointer = getattr(pointer, l[0]) + except AttributeError: + logger.info("Skipping {}".format("/".join(name))) + continue + if len(l) >= 2: + num = int(l[1]) + pointer = pointer[num] + if m_name[-11:] == '_embeddings': + pointer = getattr(pointer, 'weight') + elif m_name == 'kernel': + array = np.transpose(array) + try: + assert pointer.shape == array.shape + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + logger.info("Initialize PyTorch weight {}".format(name)) + pointer.data = torch.from_numpy(array) + return model + + +#################################################### +# PyTorch Models are constructed by sub-classing +# - torch.nn.Module for the layers and +# - PreTrainedModel for the models (itself a sub-class of torch.nn.Module) +#################################################### + +#################################################### +# Here is an example of typical layer in a PyTorch model of the library +# The classes are usually identical to the TF 2.0 ones without the 'TF' prefix. +# +# See the conversion methods in modeling_tf_pytorch_utils.py for more details +#################################################### +class XxxLayer(nn.Module): + def __init__(self, config): + super(XxxLayer, self).__init__() + self.attention = XxxAttention(config) + self.intermediate = XxxIntermediate(config) + self.output = XxxOutput(config) + + def forward(self, hidden_states, attention_mask=None, head_mask=None): + attention_outputs = self.attention(hidden_states, attention_mask, head_mask) + attention_output = attention_outputs[0] + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them + return outputs + + + +#################################################### +# PreTrainedModel is a sub-class of torch.nn.Module +# which take care of loading and saving pretrained weights +# and various common utilities. +# +# Here you just need to specify a few (self-explanatory) +# pointers for your model and the weights initialization +# method if its not fully covered by PreTrainedModel's default method +#################################################### +class XxxPreTrainedModel(PreTrainedModel): + """ An abstract class to handle weights initialization and + a simple interface for dowloading and loading pretrained models. + """ + config_class = XxxConfig + pretrained_model_archive_map = XXX_PRETRAINED_MODEL_ARCHIVE_MAP + load_tf_weights = load_tf_weights_in_xxx + base_model_prefix = "transformer" + + def _init_weights(self, module): + """ Initialize the weights """ + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, XxxLayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +XXX_START_DOCSTRING = r""" The XXX model was proposed in + `XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ + by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer + pre-trained using a combination of masked language modeling objective and next sentence prediction + on a large corpus comprising the Toronto Book Corpus and Wikipedia. + + This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and + refer to the PyTorch documentation for all matter related to general usage and behavior. + + .. _`XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`: + https://arxiv.org/abs/1810.04805 + + .. _`torch.nn.Module`: + https://pytorch.org/docs/stable/nn.html#module + + Parameters: + config (:class:`~transformers.XxxConfig`): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the configuration. + Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. +""" + +XXX_INPUTS_DOCSTRING = r""" + Inputs: + **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Indices of input sequence tokens in the vocabulary. + To match pre-training, XXX input sequence should be formatted with [CLS] and [SEP] tokens as follows: + + (a) For sequence pairs: + + ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` + + ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` + + (b) For single sequences: + + ``tokens: [CLS] the dog is hairy . [SEP]`` + + ``token_type_ids: 0 0 0 0 0 0 0`` + + Xxx is a model with absolute position embeddings so it's usually advised to pad the inputs on + the right rather than the left. + + Indices can be obtained using :class:`transformers.XxxTokenizer`. + See :func:`transformers.PreTrainedTokenizer.encode` and + :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. + **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: + Mask to avoid performing attention on padding token indices. + Mask values selected in ``[0, 1]``: + ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. + **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Segment token indices to indicate first and second portions of the inputs. + Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` + corresponds to a `sentence B` token + (see `XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). + **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Indices of positions of each input sequence tokens in the position embeddings. + Selected in the range ``[0, config.max_position_embeddings - 1]``. + **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: + Mask to nullify selected heads of the self-attention modules. + Mask values selected in ``[0, 1]``: + ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. +""" + +@add_start_docstrings("The bare Xxx Model transformer outputting raw hidden-states without any specific head on top.", + XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) +class XxxModel(XxxPreTrainedModel): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` + Sequence of hidden-states at the output of the last layer of the model. + **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)`` + Last layer hidden-state of the first token of the sequence (classification token) + further processed by a Linear layer and a Tanh activation function. The Linear + layer weights are trained from the next sentence prediction (classification) + objective during Xxx pretraining. This output is usually *not* a good summary + of the semantic content of the input, you're often better with averaging or pooling + the sequence of hidden-states for the whole input sequence. + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') + model = XxxModel.from_pretrained('xxx-base-uncased') + input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + outputs = model(input_ids) + last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple + + """ + def __init__(self, config): + super(XxxModel, self).__init__(config) + + self.embeddings = XxxEmbeddings(config) + self.encoder = XxxEncoder(config) + self.pooler = XxxPooler(config) + + self.init_weights() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, new_embeddings): + self.embeddings.word_embeddings = new_embeddings + + def _prune_heads(self, heads_to_prune): + """ Prunes heads of the model. + heads_to_prune: dict of {layer_num: list of heads to prune in this layer} + See base class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None): + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if attention_mask is None: + attention_mask = torch.ones(input_shape, device=device) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + if head_mask is not None: + if head_mask.dim() == 1: + head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) + head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) + elif head_mask.dim() == 2: + head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer + head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility + else: + head_mask = [None] * self.config.num_hidden_layers + + ################################## + # Replace this with your model code + embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) + encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask) + sequence_output = encoder_outputs[0] + outputs = (sequence_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here + + return outputs # sequence_output, (hidden_states), (attentions) + + +@add_start_docstrings("""Xxx Model with a `language modeling` head on top. """, + XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) +class XxxForMaskedLM(XxxPreTrainedModel): + r""" + **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Labels for computing the masked language modeling loss. + Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) + Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels + in ``[0, ..., config.vocab_size]`` + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Masked language modeling loss. + **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') + model = XxxForMaskedLM.from_pretrained('xxx-base-uncased') + input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + outputs = model(input_ids, masked_lm_labels=input_ids) + loss, prediction_scores = outputs[:2] + + """ + def __init__(self, config): + super(XxxForMaskedLM, self).__init__(config) + + self.transformer = XxxModel(config) + self.lm_head = nn.Linear(config.n_embd, config.vocab_size) + + self.init_weights() + + def get_output_embeddings(self): + return self.lm_head + + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, + masked_lm_labels=None): + + outputs = self.transformer(input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here + if masked_lm_labels is not None: + loss_fct = CrossEntropyLoss(ignore_index=-1) + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) + outputs = (masked_lm_loss,) + outputs + + return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions) + + +@add_start_docstrings("""Xxx Model transformer with a sequence classification/regression head on top (a linear layer on top of + the pooled output) e.g. for GLUE tasks. """, + XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) +class XxxForSequenceClassification(XxxPreTrainedModel): + r""" + **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: + Labels for computing the sequence classification/regression loss. + Indices should be in ``[0, ..., config.num_labels - 1]``. + If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), + If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Classification (or regression if config.num_labels==1) loss. + **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` + Classification (or regression if config.num_labels==1) scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') + model = XxxForSequenceClassification.from_pretrained('xxx-base-uncased') + input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 + outputs = model(input_ids, labels=labels) + loss, logits = outputs[:2] + + """ + def __init__(self, config): + super(XxxForSequenceClassification, self).__init__(config) + self.num_labels = config.num_labels + + self.transformer = XxxModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) + + self.init_weights() + + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, + position_ids=None, head_mask=None, inputs_embeds=None, labels=None): + + outputs = self.transformer(input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + + if labels is not None: + if self.num_labels == 1: + # We are doing regression + loss_fct = MSELoss() + loss = loss_fct(logits.view(-1), labels.view(-1)) + else: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + outputs = (loss,) + outputs + + return outputs # (loss), logits, (hidden_states), (attentions) + + +@add_start_docstrings("""Xxx Model with a token classification head on top (a linear layer on top of + the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, + XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) +class XxxForTokenClassification(XxxPreTrainedModel): + r""" + **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Labels for computing the token classification loss. + Indices should be in ``[0, ..., config.num_labels - 1]``. + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Classification loss. + **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` + Classification scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') + model = XxxForTokenClassification.from_pretrained('xxx-base-uncased') + input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1 + outputs = model(input_ids, labels=labels) + loss, scores = outputs[:2] + + """ + def __init__(self, config): + super(XxxForTokenClassification, self).__init__(config) + self.num_labels = config.num_labels + + self.transformer = XxxModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + self.init_weights() + + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, + position_ids=None, head_mask=None, inputs_embeds=None, labels=None): + + outputs = self.transformer(input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + if labels is not None: + loss_fct = CrossEntropyLoss() + # Only keep active parts of the loss + if attention_mask is not None: + active_loss = attention_mask.view(-1) == 1 + active_logits = logits.view(-1, self.num_labels)[active_loss] + active_labels = labels.view(-1)[active_loss] + loss = loss_fct(active_logits, active_labels) + else: + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + outputs = (loss,) + outputs + + return outputs # (loss), scores, (hidden_states), (attentions) + + +@add_start_docstrings("""Xxx Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of + the hidden-states output to compute `span start logits` and `span end logits`). """, + XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING) +class XxxForQuestionAnswering(XxxPreTrainedModel): + r""" + **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). + Position outside of the sequence are not taken into account for computing the loss. + **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). + Position outside of the sequence are not taken into account for computing the loss. + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. + **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` + Span-start scores (before SoftMax). + **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` + Span-end scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased') + model = XxxForQuestionAnswering.from_pretrained('xxx-large-uncased-whole-word-masking-finetuned-squad') + question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" + input_text = "[CLS] " + question + " [SEP] " + text + " [SEP]" + input_ids = tokenizer.encode(input_text) + token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))] + start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids])) + all_tokens = tokenizer.convert_ids_to_tokens(input_ids) + print(' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])) + # a nice puppet + + + """ + def __init__(self, config): + super(XxxForQuestionAnswering, self).__init__(config) + self.num_labels = config.num_labels + + self.transformer = XxxModel(config) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + self.init_weights() + + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, + start_positions=None, end_positions=None): + + outputs = self.transformer(input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1) + end_logits = end_logits.squeeze(-1) + + outputs = (start_logits, end_logits,) + outputs[2:] + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions.clamp_(0, ignored_index) + end_positions.clamp_(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + outputs = (total_loss,) + outputs + + return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions) diff --git a/templates/adding_a_new_model/tests/modeling_tf_xxx_test.py b/templates/adding_a_new_model/tests/modeling_tf_xxx_test.py new file mode 100644 index 00000000000..d7e576bf8b1 --- /dev/null +++ b/templates/adding_a_new_model/tests/modeling_tf_xxx_test.py @@ -0,0 +1,255 @@ +# coding=utf-8 +# Copyright 2018 XXX Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +import shutil +import sys + +from .modeling_tf_common_test import (TFCommonTestCases, ids_tensor) +from .configuration_common_test import ConfigTester +from .utils import require_tf, slow + +from transformers import XxxConfig, is_tf_available + +if is_tf_available(): + import tensorflow as tf + from transformers.modeling_tf_xxx import (TFXxxModel, TFXxxForMaskedLM, + TFXxxForSequenceClassification, + TFXxxForTokenClassification, + TFXxxForQuestionAnswering, + TF_XXX_PRETRAINED_MODEL_ARCHIVE_MAP) + + +@require_tf +class TFXxxModelTest(TFCommonTestCases.TFCommonModelTester): + + all_model_classes = (TFXxxModel, TFXxxForMaskedLM, TFXxxForQuestionAnswering, + TFXxxForSequenceClassification, + TFXxxForTokenClassification) if is_tf_available() else () + + class TFXxxModelTester(object): + + def __init__(self, + parent, + batch_size=13, + seq_length=7, + is_training=True, + use_input_mask=True, + use_token_type_ids=True, + use_labels=True, + vocab_size=99, + hidden_size=32, + num_hidden_layers=5, + num_attention_heads=4, + intermediate_size=37, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=16, + type_sequence_label_size=2, + initializer_range=0.02, + num_labels=3, + num_choices=4, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_input_mask = use_input_mask + self.use_token_type_ids = use_token_type_ids + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.type_sequence_label_size = type_sequence_label_size + self.initializer_range = initializer_range + self.num_labels = num_labels + self.num_choices = num_choices + self.scope = scope + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + + input_mask = None + if self.use_input_mask: + input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) + + token_type_ids = None + if self.use_token_type_ids: + token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) + + sequence_labels = None + token_labels = None + choice_labels = None + if self.use_labels: + sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) + token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) + choice_labels = ids_tensor([self.batch_size], self.num_choices) + + config = XxxConfig( + vocab_size_or_config_json_file=self.vocab_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + hidden_act=self.hidden_act, + hidden_dropout_prob=self.hidden_dropout_prob, + attention_probs_dropout_prob=self.attention_probs_dropout_prob, + max_position_embeddings=self.max_position_embeddings, + type_vocab_size=self.type_vocab_size, + initializer_range=self.initializer_range) + + return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + + def create_and_check_xxx_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + model = TFXxxModel(config=config) + inputs = {'input_ids': input_ids, + 'attention_mask': input_mask, + 'token_type_ids': token_type_ids} + sequence_output, pooled_output = model(inputs) + + inputs = [input_ids, input_mask] + sequence_output, pooled_output = model(inputs) + + sequence_output, pooled_output = model(input_ids) + + result = { + "sequence_output": sequence_output.numpy(), + "pooled_output": pooled_output.numpy(), + } + self.parent.assertListEqual( + list(result["sequence_output"].shape), + [self.batch_size, self.seq_length, self.hidden_size]) + self.parent.assertListEqual(list(result["pooled_output"].shape), [self.batch_size, self.hidden_size]) + + + def create_and_check_xxx_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + model = TFXxxForMaskedLM(config=config) + inputs = {'input_ids': input_ids, + 'attention_mask': input_mask, + 'token_type_ids': token_type_ids} + prediction_scores, = model(inputs) + result = { + "prediction_scores": prediction_scores.numpy(), + } + self.parent.assertListEqual( + list(result["prediction_scores"].shape), + [self.batch_size, self.seq_length, self.vocab_size]) + + + def create_and_check_xxx_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + config.num_labels = self.num_labels + model = TFXxxForSequenceClassification(config=config) + inputs = {'input_ids': input_ids, + 'attention_mask': input_mask, + 'token_type_ids': token_type_ids} + logits, = model(inputs) + result = { + "logits": logits.numpy(), + } + self.parent.assertListEqual( + list(result["logits"].shape), + [self.batch_size, self.num_labels]) + + + def create_and_check_xxx_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + config.num_labels = self.num_labels + model = TFXxxForTokenClassification(config=config) + inputs = {'input_ids': input_ids, + 'attention_mask': input_mask, + 'token_type_ids': token_type_ids} + logits, = model(inputs) + result = { + "logits": logits.numpy(), + } + self.parent.assertListEqual( + list(result["logits"].shape), + [self.batch_size, self.seq_length, self.num_labels]) + + + def create_and_check_xxx_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + model = TFXxxForQuestionAnswering(config=config) + inputs = {'input_ids': input_ids, + 'attention_mask': input_mask, + 'token_type_ids': token_type_ids} + start_logits, end_logits = model(inputs) + result = { + "start_logits": start_logits.numpy(), + "end_logits": end_logits.numpy(), + } + self.parent.assertListEqual( + list(result["start_logits"].shape), + [self.batch_size, self.seq_length]) + self.parent.assertListEqual( + list(result["end_logits"].shape), + [self.batch_size, self.seq_length]) + + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + (config, input_ids, token_type_ids, input_mask, + sequence_labels, token_labels, choice_labels) = config_and_inputs + inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} + return config, inputs_dict + + def setUp(self): + self.model_tester = TFXxxModelTest.TFXxxModelTester(self) + self.config_tester = ConfigTester(self, config_class=XxxConfig, hidden_size=37) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_xxx_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_xxx_model(*config_and_inputs) + + def test_for_masked_lm(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_xxx_for_masked_lm(*config_and_inputs) + + def test_for_question_answering(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_xxx_for_question_answering(*config_and_inputs) + + def test_for_sequence_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_xxx_for_sequence_classification(*config_and_inputs) + + def test_for_token_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_xxx_for_token_classification(*config_and_inputs) + + @slow + def test_model_from_pretrained(self): + cache_dir = "/tmp/transformers_test/" + for model_name in ['xxx-base-uncased']: + model = TFXxxModel.from_pretrained(model_name, cache_dir=cache_dir) + shutil.rmtree(cache_dir) + self.assertIsNotNone(model) + +if __name__ == "__main__": + unittest.main() diff --git a/templates/adding_a_new_model/tests/modeling_xxx_test.py b/templates/adding_a_new_model/tests/modeling_xxx_test.py new file mode 100644 index 00000000000..bfc70921cd9 --- /dev/null +++ b/templates/adding_a_new_model/tests/modeling_xxx_test.py @@ -0,0 +1,259 @@ +# coding=utf-8 +# Copyright 2018 XXX Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +import shutil + +from transformers import is_torch_available + +from .modeling_common_test import (CommonTestCases, ids_tensor) +from .configuration_common_test import ConfigTester +from .utils import require_torch, slow, torch_device + +if is_torch_available(): + from transformers import (XxxConfig, XxxModel, XxxForMaskedLM, + XxxForNextSentencePrediction, XxxForPreTraining, + XxxForQuestionAnswering, XxxForSequenceClassification, + XxxForTokenClassification, XxxForMultipleChoice) + from transformers.modeling_xxx import XXX_PRETRAINED_MODEL_ARCHIVE_MAP + + +@require_torch +class XxxModelTest(CommonTestCases.CommonModelTester): + + all_model_classes = (XxxModel, XxxForMaskedLM, XxxForQuestionAnswering, + XxxForSequenceClassification, + XxxForTokenClassification) if is_torch_available() else () + + class XxxModelTester(object): + + def __init__(self, + parent, + batch_size=13, + seq_length=7, + is_training=True, + use_input_mask=True, + use_token_type_ids=True, + use_labels=True, + vocab_size=99, + hidden_size=32, + num_hidden_layers=5, + num_attention_heads=4, + intermediate_size=37, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=16, + type_sequence_label_size=2, + initializer_range=0.02, + num_labels=3, + num_choices=4, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_input_mask = use_input_mask + self.use_token_type_ids = use_token_type_ids + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.type_sequence_label_size = type_sequence_label_size + self.initializer_range = initializer_range + self.num_labels = num_labels + self.num_choices = num_choices + self.scope = scope + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + + input_mask = None + if self.use_input_mask: + input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) + + token_type_ids = None + if self.use_token_type_ids: + token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) + + sequence_labels = None + token_labels = None + choice_labels = None + if self.use_labels: + sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) + token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) + choice_labels = ids_tensor([self.batch_size], self.num_choices) + + config = XxxConfig( + vocab_size_or_config_json_file=self.vocab_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + hidden_act=self.hidden_act, + hidden_dropout_prob=self.hidden_dropout_prob, + attention_probs_dropout_prob=self.attention_probs_dropout_prob, + max_position_embeddings=self.max_position_embeddings, + type_vocab_size=self.type_vocab_size, + initializer_range=self.initializer_range) + + return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + + def check_loss_output(self, result): + self.parent.assertListEqual( + list(result["loss"].size()), + []) + + def create_and_check_xxx_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + model = XxxModel(config=config) + model.to(torch_device) + model.eval() + sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) + sequence_output, pooled_output = model(input_ids, token_type_ids=token_type_ids) + sequence_output, pooled_output = model(input_ids) + + result = { + "sequence_output": sequence_output, + "pooled_output": pooled_output, + } + self.parent.assertListEqual( + list(result["sequence_output"].size()), + [self.batch_size, self.seq_length, self.hidden_size]) + self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size]) + + + def create_and_check_xxx_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + model = XxxForMaskedLM(config=config) + model.to(torch_device) + model.eval() + loss, prediction_scores = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels) + result = { + "loss": loss, + "prediction_scores": prediction_scores, + } + self.parent.assertListEqual( + list(result["prediction_scores"].size()), + [self.batch_size, self.seq_length, self.vocab_size]) + self.check_loss_output(result) + + + def create_and_check_xxx_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + model = XxxForQuestionAnswering(config=config) + model.to(torch_device) + model.eval() + loss, start_logits, end_logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, + start_positions=sequence_labels, end_positions=sequence_labels) + result = { + "loss": loss, + "start_logits": start_logits, + "end_logits": end_logits, + } + self.parent.assertListEqual( + list(result["start_logits"].size()), + [self.batch_size, self.seq_length]) + self.parent.assertListEqual( + list(result["end_logits"].size()), + [self.batch_size, self.seq_length]) + self.check_loss_output(result) + + + def create_and_check_xxx_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + config.num_labels = self.num_labels + model = XxxForSequenceClassification(config) + model.to(torch_device) + model.eval() + loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) + result = { + "loss": loss, + "logits": logits, + } + self.parent.assertListEqual( + list(result["logits"].size()), + [self.batch_size, self.num_labels]) + self.check_loss_output(result) + + + def create_and_check_xxx_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + config.num_labels = self.num_labels + model = XxxForTokenClassification(config=config) + model.to(torch_device) + model.eval() + loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) + result = { + "loss": loss, + "logits": logits, + } + self.parent.assertListEqual( + list(result["logits"].size()), + [self.batch_size, self.seq_length, self.num_labels]) + self.check_loss_output(result) + + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + (config, input_ids, token_type_ids, input_mask, + sequence_labels, token_labels, choice_labels) = config_and_inputs + inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} + return config, inputs_dict + + def setUp(self): + self.model_tester = XxxModelTest.XxxModelTester(self) + self.config_tester = ConfigTester(self, config_class=XxxConfig, hidden_size=37) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_xxx_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_xxx_model(*config_and_inputs) + + def test_for_masked_lm(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_xxx_for_masked_lm(*config_and_inputs) + + def test_for_question_answering(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_xxx_for_question_answering(*config_and_inputs) + + def test_for_sequence_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_xxx_for_sequence_classification(*config_and_inputs) + + def test_for_token_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_xxx_for_token_classification(*config_and_inputs) + + @slow + def test_model_from_pretrained(self): + cache_dir = "/tmp/transformers_test/" + for model_name in list(XXX_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: + model = XxxModel.from_pretrained(model_name, cache_dir=cache_dir) + shutil.rmtree(cache_dir) + self.assertIsNotNone(model) + +if __name__ == "__main__": + unittest.main() diff --git a/templates/adding_a_new_model/tests/tokenization_xxx_test.py b/templates/adding_a_new_model/tests/tokenization_xxx_test.py new file mode 100644 index 00000000000..116083edc8c --- /dev/null +++ b/templates/adding_a_new_model/tests/tokenization_xxx_test.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# Copyright 2018 XXX Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function, unicode_literals + +import os +import unittest +from io import open + +from transformers.tokenization_bert import (XxxTokenizer, VOCAB_FILES_NAMES) + +from .tokenization_tests_commons import CommonTestCases + +class XxxTokenizationTest(CommonTestCases.CommonTokenizerTester): + + tokenizer_class = XxxTokenizer + + def setUp(self): + super(XxxTokenizationTest, self).setUp() + + vocab_tokens = [ + "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", + "##ing", ",", "low", "lowest", + ] + self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file']) + with open(self.vocab_file, "w", encoding='utf-8') as vocab_writer: + vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) + + def get_tokenizer(self, **kwargs): + return XxxTokenizer.from_pretrained(self.tmpdirname, **kwargs) + + def get_input_output_texts(self): + input_text = u"UNwant\u00E9d,running" + output_text = u"unwanted, running" + return input_text, output_text + + def test_full_tokenizer(self): + tokenizer = self.tokenizer_class(self.vocab_file) + + tokens = tokenizer.tokenize(u"UNwant\u00E9d,running") + self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) + self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) + + +if __name__ == '__main__': + unittest.main() diff --git a/templates/adding_a_new_model/tokenization_xxx.py b/templates/adding_a_new_model/tokenization_xxx.py new file mode 100644 index 00000000000..3d6b4ad9df9 --- /dev/null +++ b/templates/adding_a_new_model/tokenization_xxx.py @@ -0,0 +1,218 @@ +# coding=utf-8 +# Copyright 2018 XXX Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Tokenization class for model XXX.""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +import collections +import logging +import os +import unicodedata +from io import open + +from .tokenization_utils import PreTrainedTokenizer + +logger = logging.getLogger(__name__) + +#################################################### +# In this template, replace all the XXX (various casings) with your model name +#################################################### + +#################################################### +# Mapping from the keyword arguments names of Tokenizer `__init__` +# to file names for serializing Tokenizer instances +#################################################### +VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'} + +#################################################### +# Mapping from the keyword arguments names of Tokenizer `__init__` +# to pretrained vocabulary URL for all the model shortcut names. +#################################################### +PRETRAINED_VOCAB_FILES_MAP = { + 'vocab_file': + { + 'xxx-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-base-uncased-vocab.txt", + 'xxx-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/xxx-large-uncased-vocab.txt", + } +} + +#################################################### +# Mapping from model shortcut names to max length of inputs +#################################################### +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + 'xxx-base-uncased': 512, + 'xxx-large-uncased': 512, +} + +#################################################### +# Mapping from model shortcut names to a dictionary of additional +# keyword arguments for Tokenizer `__init__`. +# To be used for checkpoint specific configurations. +#################################################### +PRETRAINED_INIT_CONFIGURATION = { + 'xxx-base-uncased': {'do_lower_case': True}, + 'xxx-large-uncased': {'do_lower_case': True}, +} + + +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + with open(vocab_file, "r", encoding="utf-8") as reader: + tokens = reader.readlines() + for index, token in enumerate(tokens): + token = token.rstrip('\n') + vocab[token] = index + return vocab + + +class XxxTokenizer(PreTrainedTokenizer): + r""" + Constructs a XxxTokenizer. + :class:`~transformers.XxxTokenizer` runs end-to-end tokenization: punctuation splitting + wordpiece + + Args: + vocab_file: Path to a one-wordpiece-per-line vocabulary file + do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + + def __init__(self, vocab_file, do_lower_case=True, + unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", + mask_token="[MASK]", **kwargs): + """Constructs a XxxTokenizer. + + Args: + **vocab_file**: Path to a one-wordpiece-per-line vocabulary file + **do_lower_case**: (`optional`) boolean (default True) + Whether to lower case the input + Only has an effect when do_basic_tokenize=True + """ + super(XxxTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token, + pad_token=pad_token, cls_token=cls_token, + mask_token=mask_token, **kwargs) + self.max_len_single_sentence = self.max_len - 2 # take into account special tokens + self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens + + if not os.path.isfile(vocab_file): + raise ValueError( + "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " + "model use `tokenizer = XxxTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) + self.vocab = load_vocab(vocab_file) + + @property + def vocab_size(self): + return len(self.vocab) + + def _tokenize(self, text): + """ Take as input a string and return a list of strings (tokens) for words/sub-words + """ + split_tokens = [] + if self.do_basic_tokenize: + for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): + for sub_token in self.wordpiece_tokenizer.tokenize(token): + split_tokens.append(sub_token) + else: + split_tokens = self.wordpiece_tokenizer.tokenize(text) + return split_tokens + + def _convert_token_to_id(self, token): + """ Converts a token (str/unicode) in an id using the vocab. """ + return self.vocab.get(token, self.vocab.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (string/unicode) using the vocab.""" + return self.ids_to_tokens.get(index, self.unk_token) + + def convert_tokens_to_string(self, tokens): + """ Converts a sequence of tokens (string) in a single string. """ + out_string = ' '.join(tokens).replace(' ##', '').strip() + return out_string + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks + by concatenating and adding special tokens. + A BERT sequence has the following format: + single sequence: [CLS] X [SEP] + pair of sequences: [CLS] A [SEP] B [SEP] + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + token_ids_1 + sep + + def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): + """ + Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. + + Args: + token_ids_0: list of ids (must not contain special tokens) + token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids + for sequence pairs + already_has_special_tokens: (default False) Set to True if the token list is already formated with + special tokens for the model + + Returns: + A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + if token_ids_1 is not None: + raise ValueError("You should not supply a second sequence if the provided sequence of " + "ids is already formated with special tokens for the model.") + return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) + + if token_ids_1 is not None: + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1] + + def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): + """ + Creates a mask from the two sequences passed to be used in a sequence-pair classification task. + A BERT sequence pair mask has the following format: + 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence + + if token_ids_1 is None, only returns the first portion of the mask (0's). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, vocab_path): + """Save the tokenizer vocabulary to a directory or file.""" + index = 0 + if os.path.isdir(vocab_path): + vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file']) + else: + vocab_file = vocab_path + with open(vocab_file, "w", encoding="utf-8") as writer: + for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive." + " Please check that the vocabulary is not corrupted!".format(vocab_file)) + index = token_index + writer.write(token + u'\n') + index += 1 + return (vocab_file,) diff --git a/transformers-cli b/transformers-cli new file mode 100644 index 00000000000..ef00d15aa3a --- /dev/null +++ b/transformers-cli @@ -0,0 +1,23 @@ +#!/usr/bin/env python +from argparse import ArgumentParser + +from transformers.commands.user import UserCommands + + +if __name__ == '__main__': + parser = ArgumentParser(description='Transformers CLI tool', usage='transformers-cli []') + commands_parser = parser.add_subparsers(help='transformers-cli command helpers') + + # Register commands + UserCommands.register_subcommand(commands_parser) + + # Let's go + args = parser.parse_args() + + if not hasattr(args, 'func'): + parser.print_help() + exit(1) + + # Run + service = args.func(args) + service.run() diff --git a/transformers/__init__.py b/transformers/__init__.py index fbc92f078e1..5d7b0b772cb 100644 --- a/transformers/__init__.py +++ b/transformers/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.1.1" +__version__ = "2.2.1" # Work around to update TensorFlow's absl.logging threshold which alters the # default Python logging output behavior when present. @@ -25,15 +25,19 @@ from .file_utils import (TRANSFORMERS_CACHE, PYTORCH_TRANSFORMERS_CACHE, PYTORCH from .data import (is_sklearn_available, InputExample, InputFeatures, DataProcessor, glue_output_modes, glue_convert_examples_to_features, - glue_processors, glue_tasks_num_labels) + glue_processors, glue_tasks_num_labels, + xnli_output_modes, xnli_processors, xnli_tasks_num_labels, + squad_convert_examples_to_features, SquadFeatures, + SquadExample, SquadV1Processor, SquadV2Processor) if is_sklearn_available(): - from .data import glue_compute_metrics + from .data import glue_compute_metrics, xnli_compute_metrics # Tokenizers from .tokenization_utils import (PreTrainedTokenizer) from .tokenization_auto import AutoTokenizer from .tokenization_bert import BertTokenizer, BasicTokenizer, WordpieceTokenizer +from .tokenization_bert_japanese import BertJapaneseTokenizer, MecabTokenizer, CharacterTokenizer from .tokenization_openai import OpenAIGPTTokenizer from .tokenization_transfo_xl import (TransfoXLTokenizer, TransfoXLCorpus) from .tokenization_gpt2 import GPT2Tokenizer @@ -42,6 +46,8 @@ from .tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE from .tokenization_xlm import XLMTokenizer from .tokenization_roberta import RobertaTokenizer from .tokenization_distilbert import DistilBertTokenizer +from .tokenization_albert import AlbertTokenizer +from .tokenization_camembert import CamembertTokenizer # Configurations from .configuration_utils import PretrainedConfig @@ -56,6 +62,8 @@ from .configuration_ctrl import CTRLConfig, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP from .configuration_xlm import XLMConfig, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP from .configuration_roberta import RobertaConfig, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP from .configuration_distilbert import DistilBertConfig, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP +from .configuration_albert import AlbertConfig, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP +from .configuration_camembert import CamembertConfig, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # Modeling if is_torch_available(): @@ -72,6 +80,7 @@ if is_torch_available(): OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, load_tf_weights_in_openai_gpt, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP) from .modeling_transfo_xl import (TransfoXLPreTrainedModel, TransfoXLModel, TransfoXLLMHeadModel, + AdaptiveEmbedding, load_tf_weights_in_transfo_xl, TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP) from .modeling_gpt2 import (GPT2PreTrainedModel, GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel, @@ -80,28 +89,40 @@ if is_torch_available(): CTRLLMHeadModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP) from .modeling_xlnet import (XLNetPreTrainedModel, XLNetModel, XLNetLMHeadModel, - XLNetForSequenceClassification, XLNetForMultipleChoice, - XLNetForQuestionAnsweringSimple, XLNetForQuestionAnswering, - load_tf_weights_in_xlnet, XLNET_PRETRAINED_MODEL_ARCHIVE_MAP) + XLNetForSequenceClassification, XLNetForTokenClassification, + XLNetForMultipleChoice, XLNetForQuestionAnsweringSimple, + XLNetForQuestionAnswering, load_tf_weights_in_xlnet, + XLNET_PRETRAINED_MODEL_ARCHIVE_MAP) from .modeling_xlm import (XLMPreTrainedModel , XLMModel, XLMWithLMHeadModel, XLMForSequenceClassification, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLM_PRETRAINED_MODEL_ARCHIVE_MAP) from .modeling_roberta import (RobertaForMaskedLM, RobertaModel, RobertaForSequenceClassification, RobertaForMultipleChoice, + RobertaForTokenClassification, ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP) - from .modeling_distilbert import (DistilBertForMaskedLM, DistilBertModel, + from .modeling_distilbert import (DistilBertPreTrainedModel, DistilBertForMaskedLM, DistilBertModel, DistilBertForSequenceClassification, DistilBertForQuestionAnswering, + DistilBertForTokenClassification, DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP) + from .modeling_camembert import (CamembertForMaskedLM, CamembertModel, + CamembertForSequenceClassification, CamembertForMultipleChoice, + CamembertForTokenClassification, + CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP) + from .modeling_encoder_decoder import PreTrainedEncoderDecoder, Model2Model + + from .modeling_albert import (AlbertPreTrainedModel, AlbertModel, AlbertForMaskedLM, AlbertForSequenceClassification, + AlbertForQuestionAnswering, + load_tf_weights_in_albert, ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP) # Optimization - from .optimization import (AdamW, ConstantLRSchedule, WarmupConstantSchedule, WarmupCosineSchedule, - WarmupCosineWithHardRestartsSchedule, WarmupLinearSchedule) + from .optimization import (AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, + get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup) # TensorFlow if is_tf_available(): - from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, TFSequenceSummary + from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, TFSequenceSummary, shape_list from .modeling_tf_auto import (TFAutoModel, TFAutoModelForSequenceClassification, TFAutoModelForQuestionAnswering, TFAutoModelWithLMHead) @@ -127,6 +148,7 @@ if is_tf_available(): from .modeling_tf_xlnet import (TFXLNetPreTrainedModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, + TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple, TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP) @@ -139,11 +161,13 @@ if is_tf_available(): from .modeling_tf_roberta import (TFRobertaPreTrainedModel, TFRobertaMainLayer, TFRobertaModel, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, + TFRobertaForTokenClassification, TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP) from .modeling_tf_distilbert import (TFDistilBertPreTrainedModel, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForSequenceClassification, + TFDistilBertForTokenClassification, TFDistilBertForQuestionAnswering, TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP) @@ -151,6 +175,12 @@ if is_tf_available(): TFCTRLLMHeadModel, TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP) + from .modeling_tf_albert import (TFAlbertPreTrainedModel, TFAlbertModel, TFAlbertForMaskedLM, + TFAlbertForSequenceClassification, + TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP) + # Optimization + from .optimization_tf import (WarmUp, create_optimizer, AdamWeightDecay, GradientAccumulator) + # TF 2.0 <=> PyTorch conversion utilities from .modeling_tf_pytorch_utils import (convert_tf_weight_name_to_pt_weight_name, load_pytorch_checkpoint_in_tf2_model, diff --git a/transformers/commands/__init__.py b/transformers/commands/__init__.py new file mode 100644 index 00000000000..bbdd5655fc5 --- /dev/null +++ b/transformers/commands/__init__.py @@ -0,0 +1,12 @@ +from abc import ABC, abstractmethod +from argparse import ArgumentParser + +class BaseTransformersCLICommand(ABC): + @staticmethod + @abstractmethod + def register_subcommand(parser: ArgumentParser): + raise NotImplementedError() + + @abstractmethod + def run(self): + raise NotImplementedError() diff --git a/transformers/commands/user.py b/transformers/commands/user.py new file mode 100644 index 00000000000..d79922ed8ac --- /dev/null +++ b/transformers/commands/user.py @@ -0,0 +1,165 @@ +from argparse import ArgumentParser +from getpass import getpass +import os + +from transformers.commands import BaseTransformersCLICommand +from transformers.hf_api import HfApi, HfFolder, HTTPError + + +class UserCommands(BaseTransformersCLICommand): + @staticmethod + def register_subcommand(parser: ArgumentParser): + login_parser = parser.add_parser('login') + login_parser.set_defaults(func=lambda args: LoginCommand(args)) + whoami_parser = parser.add_parser('whoami') + whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args)) + logout_parser = parser.add_parser('logout') + logout_parser.set_defaults(func=lambda args: LogoutCommand(args)) + list_parser = parser.add_parser('ls') + list_parser.set_defaults(func=lambda args: ListObjsCommand(args)) + # upload + upload_parser = parser.add_parser('upload') + upload_parser.add_argument('file', type=str, help='Local filepath of the file to upload.') + upload_parser.add_argument('--filename', type=str, default=None, help='Optional: override object filename on S3.') + upload_parser.set_defaults(func=lambda args: UploadCommand(args)) + + + +class ANSI: + """ + Helper for en.wikipedia.org/wiki/ANSI_escape_code + """ + _bold = u"\u001b[1m" + _reset = u"\u001b[0m" + @classmethod + def bold(cls, s): + return "{}{}{}".format(cls._bold, s, cls._reset) + + +class BaseUserCommand: + def __init__(self, args): + self.args = args + self._api = HfApi() + + +class LoginCommand(BaseUserCommand): + def run(self): + print(""" + _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_| + _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _| + _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_| + _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _| + _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_| + + """) + username = input("Username: ") + password = getpass() + try: + token = self._api.login(username, password) + except HTTPError as e: + # probably invalid credentials, display error message. + print(e) + exit(1) + HfFolder.save_token(token) + print("Login successful") + print("Your token:", token, "\n") + print("Your token has been saved to", HfFolder.path_token) + + +class WhoamiCommand(BaseUserCommand): + def run(self): + token = HfFolder.get_token() + if token is None: + print("Not logged in") + exit() + try: + user = self._api.whoami(token) + print(user) + except HTTPError as e: + print(e) + + +class LogoutCommand(BaseUserCommand): + def run(self): + token = HfFolder.get_token() + if token is None: + print("Not logged in") + exit() + HfFolder.delete_token() + self._api.logout(token) + print("Successfully logged out.") + + +class ListObjsCommand(BaseUserCommand): + def tabulate(self, rows, headers): + # type: (List[List[Union[str, int]]], List[str]) -> str + """ + Inspired by: + stackoverflow.com/a/8356620/593036 + stackoverflow.com/questions/9535954/printing-lists-as-tabular-data + """ + col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)] + row_format = ("{{:{}}} " * len(headers)).format(*col_widths) + lines = [] + lines.append( + row_format.format(*headers) + ) + lines.append( + row_format.format(*["-" * w for w in col_widths]) + ) + for row in rows: + lines.append( + row_format.format(*row) + ) + return "\n".join(lines) + + def run(self): + token = HfFolder.get_token() + if token is None: + print("Not logged in") + exit(1) + try: + objs = self._api.list_objs(token) + except HTTPError as e: + print(e) + exit(1) + if len(objs) == 0: + print("No shared file yet") + exit() + rows = [ [ + obj.filename, + obj.LastModified, + obj.ETag, + obj.Size + ] for obj in objs ] + print( + self.tabulate(rows, headers=["Filename", "LastModified", "ETag", "Size"]) + ) + + +class UploadCommand(BaseUserCommand): + def run(self): + token = HfFolder.get_token() + if token is None: + print("Not logged in") + exit(1) + filepath = os.path.join(os.getcwd(), self.args.file) + filename = self.args.filename if self.args.filename is not None else os.path.basename(filepath) + print( + "About to upload file {} to S3 under filename {}".format( + ANSI.bold(filepath), ANSI.bold(filename) + ) + ) + + choice = input("Proceed? [Y/n] ").lower() + if not(choice == "" or choice == "y" or choice == "yes"): + print("Abort") + exit() + print( + ANSI.bold("Uploading... This might take a while if file is large") + ) + access_url = self._api.presign_and_upload( + token=token, filename=filename, filepath=filepath + ) + print("Your file now lives at:") + print(access_url) diff --git a/transformers/configuration_albert.py b/transformers/configuration_albert.py new file mode 100644 index 00000000000..de665c9b1c8 --- /dev/null +++ b/transformers/configuration_albert.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" ALBERT model configuration """ + +from .configuration_utils import PretrainedConfig + +ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { + 'albert-base-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-config.json", + 'albert-large-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-config.json", + 'albert-xlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-config.json", + 'albert-xxlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-config.json", + 'albert-base-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-config.json", + 'albert-large-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-config.json", + 'albert-xlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-config.json", + 'albert-xxlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-config.json", +} + +class AlbertConfig(PretrainedConfig): + """Configuration for `AlbertModel`. + + The default settings match the configuration of model `albert_xxlarge`. + """ + + pretrained_config_archive_map = ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP + + def __init__(self, + vocab_size_or_config_json_file=30000, + embedding_size=128, + hidden_size=4096, + num_hidden_layers=12, + num_hidden_groups=1, + num_attention_heads=64, + intermediate_size=16384, + inner_group_num=1, + hidden_act="gelu_new", + hidden_dropout_prob=0, + attention_probs_dropout_prob=0, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + layer_norm_eps=1e-12, **kwargs): + """Constructs AlbertConfig. + + Args: + vocab_size: Vocabulary size of `inputs_ids` in `AlbertModel`. + embedding_size: size of voc embeddings. + hidden_size: Size of the encoder layers and the pooler layer. + num_hidden_layers: Number of hidden layers in the Transformer encoder. + num_hidden_groups: Number of group for the hidden layers, parameters in + the same group are shared. + num_attention_heads: Number of attention heads for each attention layer in + the Transformer encoder. + intermediate_size: The size of the "intermediate" (i.e., feed-forward) + layer in the Transformer encoder. + inner_group_num: int, number of inner repetition of attention and ffn. + down_scale_factor: float, the scale to apply + hidden_act: The non-linear activation function (function or string) in the + encoder and pooler. + hidden_dropout_prob: The dropout probability for all fully connected + layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob: The dropout ratio for the attention + probabilities. + max_position_embeddings: The maximum sequence length that this model might + ever be used with. Typically set this to something large just in case + (e.g., 512 or 1024 or 2048). + type_vocab_size: The vocabulary size of the `token_type_ids` passed into + `AlbertModel`. + initializer_range: The stdev of the truncated_normal_initializer for + initializing all weight matrices. + """ + super(AlbertConfig, self).__init__(**kwargs) + + self.vocab_size = vocab_size_or_config_json_file + self.embedding_size = embedding_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_hidden_groups = num_hidden_groups + self.num_attention_heads = num_attention_heads + self.inner_group_num = inner_group_num + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps \ No newline at end of file diff --git a/transformers/configuration_auto.py b/transformers/configuration_auto.py index edd21a670ce..fbc5c59199d 100644 --- a/transformers/configuration_auto.py +++ b/transformers/configuration_auto.py @@ -27,6 +27,8 @@ from .configuration_xlm import XLMConfig from .configuration_roberta import RobertaConfig from .configuration_distilbert import DistilBertConfig from .configuration_ctrl import CTRLConfig +from .configuration_camembert import CamembertConfig +from .configuration_albert import AlbertConfig logger = logging.getLogger(__name__) @@ -43,13 +45,15 @@ class AutoConfig(object): The base model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: DistilBertConfig (DistilBERT model) + - contains `albert`: AlbertConfig (ALBERT model) + - contains `camembert`: CamembertConfig (CamemBERT model) + - contains `roberta`: RobertaConfig (RoBERTa model) - contains `bert`: BertConfig (Bert model) - contains `openai-gpt`: OpenAIGPTConfig (OpenAI GPT model) - contains `gpt2`: GPT2Config (OpenAI GPT-2 model) - contains `transfo-xl`: TransfoXLConfig (Transformer-XL model) - contains `xlnet`: XLNetConfig (XLNet model) - contains `xlm`: XLMConfig (XLM model) - - contains `roberta`: RobertaConfig (RoBERTa model) - contains `ctrl` : CTRLConfig (CTRL model) This class cannot be instantiated using `__init__()` (throw an error). """ @@ -65,18 +69,21 @@ class AutoConfig(object): The configuration class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: DistilBertConfig (DistilBERT model) + - contains `albert`: AlbertConfig (ALBERT model) + - contains `camembert`: CamembertConfig (CamemBERT model) + - contains `roberta`: RobertaConfig (RoBERTa model) - contains `bert`: BertConfig (Bert model) - contains `openai-gpt`: OpenAIGPTConfig (OpenAI GPT model) - contains `gpt2`: GPT2Config (OpenAI GPT-2 model) - contains `transfo-xl`: TransfoXLConfig (Transformer-XL model) - contains `xlnet`: XLNetConfig (XLNet model) - contains `xlm`: XLMConfig (XLM model) - - contains `roberta`: RobertaConfig (RoBERTa model) - contains `ctrl` : CTRLConfig (CTRL model) Params: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model configuration to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a pre-trained model configuration that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing a configuration file saved using the :func:`~transformers.PretrainedConfig.save_pretrained` method, e.g.: ``./my_model_directory/``. - a path or url to a saved configuration JSON `file`, e.g.: ``./my_model_directory/configuration.json``. @@ -92,6 +99,9 @@ class AutoConfig(object): force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. + resume_download: (`optional`) boolean, default False: + Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. + proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. @@ -116,6 +126,10 @@ class AutoConfig(object): """ if 'distilbert' in pretrained_model_name_or_path: return DistilBertConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) + elif 'albert' in pretrained_model_name_or_path: + return AlbertConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) + elif 'camembert' in pretrained_model_name_or_path: + return CamembertConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) elif 'roberta' in pretrained_model_name_or_path: return RobertaConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) elif 'bert' in pretrained_model_name_or_path: @@ -134,4 +148,4 @@ class AutoConfig(object): return CTRLConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) raise ValueError("Unrecognized model identifier in {}. Should contains one of " "'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', " - "'xlm', 'roberta', 'ctrl'".format(pretrained_model_name_or_path)) + "'xlm', 'roberta', 'distilbert', 'camembert', 'ctrl', 'albert'".format(pretrained_model_name_or_path)) diff --git a/transformers/configuration_bert.py b/transformers/configuration_bert.py index d63be963eba..01fcd88cb81 100644 --- a/transformers/configuration_bert.py +++ b/transformers/configuration_bert.py @@ -42,6 +42,10 @@ BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json", 'bert-base-german-dbmdz-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-config.json", 'bert-base-german-dbmdz-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-config.json", + 'bert-base-japanese': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-config.json", + 'bert-base-japanese-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking-config.json", + 'bert-base-japanese-char': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-config.json", + 'bert-base-japanese-char-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking-config.json" } diff --git a/transformers/configuration_camembert.py b/transformers/configuration_camembert.py new file mode 100644 index 00000000000..3ff64454e50 --- /dev/null +++ b/transformers/configuration_camembert.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" CamemBERT configuration """ + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import logging + +from .configuration_roberta import RobertaConfig + +logger = logging.getLogger(__name__) + +CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { + 'camembert-base': "https://s3.amazonaws.com/models.huggingface.co/bert/camembert-base-config.json", +} + + +class CamembertConfig(RobertaConfig): + pretrained_config_archive_map = CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP diff --git a/transformers/configuration_distilbert.py b/transformers/configuration_distilbert.py index 2a8a149acfc..d5d575be295 100644 --- a/transformers/configuration_distilbert.py +++ b/transformers/configuration_distilbert.py @@ -27,7 +27,9 @@ logger = logging.getLogger(__name__) DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { 'distilbert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-config.json", - 'distilbert-base-uncased-distilled-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-distilled-squad-config.json" + 'distilbert-base-uncased-distilled-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-distilled-squad-config.json", + 'distilbert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-config.json", + 'distilbert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-multilingual-cased-config.json", } diff --git a/transformers/configuration_gpt2.py b/transformers/configuration_gpt2.py index e7d853f3171..c2fb4948d3b 100644 --- a/transformers/configuration_gpt2.py +++ b/transformers/configuration_gpt2.py @@ -29,6 +29,7 @@ logger = logging.getLogger(__name__) GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json", "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-config.json", "gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-config.json", + "gpt2-xl": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-config.json", "distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-config.json",} class GPT2Config(PretrainedConfig): diff --git a/transformers/configuration_roberta.py b/transformers/configuration_roberta.py index 367a85211d5..842edac56e6 100644 --- a/transformers/configuration_roberta.py +++ b/transformers/configuration_roberta.py @@ -29,6 +29,8 @@ ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = { 'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-config.json", 'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-config.json", 'distilroberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-config.json", + 'roberta-base-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-openai-detector-config.json", + 'roberta-large-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-openai-detector-config.json", } diff --git a/transformers/configuration_utils.py b/transformers/configuration_utils.py index cfa6502bcdb..82959adb576 100644 --- a/transformers/configuration_utils.py +++ b/transformers/configuration_utils.py @@ -24,7 +24,7 @@ import logging import os from io import open -from .file_utils import cached_path, CONFIG_NAME +from .file_utils import CONFIG_NAME, cached_path, is_remote_url, hf_bucket_url logger = logging.getLogger(__name__) @@ -57,6 +57,7 @@ class PretrainedConfig(object): self.torchscript = kwargs.pop('torchscript', False) # Only used by PyTorch models self.use_bfloat16 = kwargs.pop('use_bfloat16', False) self.pruned_heads = kwargs.pop('pruned_heads', {}) + self.is_decoder = kwargs.pop('is_decoder', False) def save_pretrained(self, save_directory): """ Save a configuration object to the directory `save_directory`, so that it @@ -78,6 +79,7 @@ class PretrainedConfig(object): pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model configuration to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a pre-trained model configuration that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing a configuration file saved using the :func:`~transformers.PretrainedConfig.save_pretrained` method, e.g.: ``./my_model_directory/``. - a path or url to a saved configuration JSON `file`, e.g.: ``./my_model_directory/configuration.json``. @@ -93,6 +95,9 @@ class PretrainedConfig(object): force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. + resume_download: (`optional`) boolean, default False: + Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. + proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. @@ -119,6 +124,7 @@ class PretrainedConfig(object): """ cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) + resume_download = kwargs.pop('resume_download', False) proxies = kwargs.pop('proxies', None) return_unused_kwargs = kwargs.pop('return_unused_kwargs', False) @@ -126,11 +132,14 @@ class PretrainedConfig(object): config_file = cls.pretrained_config_archive_map[pretrained_model_name_or_path] elif os.path.isdir(pretrained_model_name_or_path): config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) - else: + elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): config_file = pretrained_model_name_or_path + else: + config_file = hf_bucket_url(pretrained_model_name_or_path, postfix=CONFIG_NAME) # redirect to the cache, if necessary try: - resolved_config_file = cached_path(config_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies) + resolved_config_file = cached_path(config_file, cache_dir=cache_dir, force_download=force_download, + proxies=proxies, resume_download=resume_download) except EnvironmentError: if pretrained_model_name_or_path in cls.pretrained_config_archive_map: msg = "Couldn't reach server at '{}' to download pretrained model configuration file.".format( @@ -181,7 +190,7 @@ class PretrainedConfig(object): @classmethod def from_json_file(cls, json_file): - """Constructs a `BertConfig` from a json file of parameters.""" + """Constructs a `Config` from a json file of parameters.""" with open(json_file, "r", encoding='utf-8') as reader: text = reader.read() return cls.from_dict(json.loads(text)) diff --git a/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py b/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py new file mode 100644 index 00000000000..b6476b4fb6c --- /dev/null +++ b/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert ALBERT checkpoint.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import torch + +from transformers import AlbertConfig, AlbertForMaskedLM, load_tf_weights_in_albert + +import logging +logging.basicConfig(level=logging.INFO) + + +def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, albert_config_file, pytorch_dump_path): + # Initialise PyTorch model + config = AlbertConfig.from_json_file(albert_config_file) + print("Building PyTorch model from configuration: {}".format(str(config))) + model = AlbertForMaskedLM(config) + + # Load weights from tf checkpoint + load_tf_weights_in_albert(model, config, tf_checkpoint_path) + + # Save pytorch-model + print("Save PyTorch model to {}".format(pytorch_dump_path)) + torch.save(model.state_dict(), pytorch_dump_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + ## Required parameters + parser.add_argument("--tf_checkpoint_path", + default = None, + type = str, + required = True, + help = "Path to the TensorFlow checkpoint path.") + parser.add_argument("--albert_config_file", + default = None, + type = str, + required = True, + help = "The config json file corresponding to the pre-trained ALBERT model. \n" + "This specifies the model architecture.") + parser.add_argument("--pytorch_dump_path", + default = None, + type = str, + required = True, + help = "Path to the output PyTorch model.") + args = parser.parse_args() + convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, + args.albert_config_file, + args.pytorch_dump_path) + \ No newline at end of file diff --git a/transformers/convert_pytorch_checkpoint_to_tf2.py b/transformers/convert_pytorch_checkpoint_to_tf2.py index e673b77dcca..2c419888e87 100644 --- a/transformers/convert_pytorch_checkpoint_to_tf2.py +++ b/transformers/convert_pytorch_checkpoint_to_tf2.py @@ -33,7 +33,8 @@ from transformers import (load_pytorch_checkpoint_in_tf2_model, OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, - CTRLConfig, TFCTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP) + CTRLConfig, TFCTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, + AlbertConfig, TFAlbertForMaskedLM, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP) if is_torch_available(): import torch @@ -46,7 +47,8 @@ if is_torch_available(): OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP, RobertaForMaskedLM, RobertaForSequenceClassification, ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP, - CTRLLMHeadModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP) + CTRLLMHeadModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, + AlbertForMaskedLM, ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP) else: (BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BERT_PRETRAINED_MODEL_ARCHIVE_MAP, GPT2LMHeadModel, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP, @@ -56,7 +58,8 @@ else: OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP, RobertaForMaskedLM, RobertaForSequenceClassification, ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP, - CTRLLMHeadModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP) = ( + CTRLLMHeadModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, + AlbertForMaskedLM, ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP) = ( None, None, None, None, None, None, None, None, @@ -65,6 +68,7 @@ else: None, None, None, None, None, None, None, None, + None, None, None, None) @@ -85,7 +89,8 @@ MODEL_CLASSES = { 'roberta-large-mnli': (RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP), 'distilbert': (DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP), 'distilbert-base-uncased-distilled-squad': (DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP), - 'ctrl': (CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP) + 'ctrl': (CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP), + 'albert': (AlbertConfig, TFAlbertForMaskedLM, AlbertForMaskedLM, ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP) } def convert_pt_checkpoint_to_tf(model_type, pytorch_checkpoint_path, config_file, tf_dump_path, compare_with_pt_model=False, use_cached_models=True): @@ -114,10 +119,11 @@ def convert_pt_checkpoint_to_tf(model_type, pytorch_checkpoint_path, config_file tf_inputs = tf.constant(inputs_list) tfo = tf_model(tf_inputs, training=False) # build the network - pt_model = pt_model_class.from_pretrained(None, + state_dict = torch.load(pytorch_checkpoint_path, map_location='cpu') + pt_model = pt_model_class.from_pretrained(pretrained_model_name_or_path=None, config=config, - state_dict=torch.load(pytorch_checkpoint_path, - map_location='cpu')) + state_dict=state_dict) + pt_inputs = torch.tensor(inputs_list) with torch.no_grad(): pto = pt_model(pt_inputs) @@ -134,7 +140,7 @@ def convert_pt_checkpoint_to_tf(model_type, pytorch_checkpoint_path, config_file def convert_all_pt_checkpoints_to_tf(args_model_type, tf_dump_path, model_shortcut_names_or_path=None, config_shortcut_names_or_path=None, - compare_with_pt_model=False, use_cached_models=False, only_convert_finetuned_models=False): + compare_with_pt_model=False, use_cached_models=False, remove_cached_files=False, only_convert_finetuned_models=False): assert os.path.isdir(args.tf_dump_path), "--tf_dump_path should be a directory" if args_model_type is None: @@ -182,13 +188,15 @@ def convert_all_pt_checkpoints_to_tf(args_model_type, tf_dump_path, model_shortc if os.path.isfile(model_shortcut_name): model_shortcut_name = 'converted_model' + convert_pt_checkpoint_to_tf(model_type=model_type, pytorch_checkpoint_path=model_file, config_file=config_file, tf_dump_path=os.path.join(tf_dump_path, model_shortcut_name + '-tf_model.h5'), compare_with_pt_model=compare_with_pt_model) - os.remove(config_file) - os.remove(model_file) + if remove_cached_files: + os.remove(config_file) + os.remove(model_file) if __name__ == "__main__": @@ -221,6 +229,9 @@ if __name__ == "__main__": parser.add_argument("--use_cached_models", action='store_true', help = "Use cached models if possible instead of updating to latest checkpoint versions.") + parser.add_argument("--remove_cached_files", + action='store_true', + help = "Remove pytorch models after conversion (save memory when converting in batches).") parser.add_argument("--only_convert_finetuned_models", action='store_true', help = "Only convert finetuned models.") @@ -240,4 +251,5 @@ if __name__ == "__main__": config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, + remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models) diff --git a/transformers/data/__init__.py b/transformers/data/__init__.py index e910d6da2ea..270a0532681 100644 --- a/transformers/data/__init__.py +++ b/transformers/data/__init__.py @@ -1,6 +1,8 @@ -from .processors import InputExample, InputFeatures, DataProcessor +from .processors import InputExample, InputFeatures, DataProcessor, SquadFeatures from .processors import glue_output_modes, glue_processors, glue_tasks_num_labels, glue_convert_examples_to_features +from .processors import squad_convert_examples_to_features, SquadExample, SquadV1Processor, SquadV2Processor +from .processors import xnli_output_modes, xnli_processors, xnli_tasks_num_labels from .metrics import is_sklearn_available if is_sklearn_available(): - from .metrics import glue_compute_metrics + from .metrics import glue_compute_metrics, xnli_compute_metrics diff --git a/transformers/data/metrics/__init__.py b/transformers/data/metrics/__init__.py index c9ebaac38df..5a46eb05d3b 100644 --- a/transformers/data/metrics/__init__.py +++ b/transformers/data/metrics/__init__.py @@ -81,3 +81,11 @@ if _has_sklearn: return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name) + + + def xnli_compute_metrics(task_name, preds, labels): + assert len(preds) == len(labels) + if task_name == "xnli": + return {"acc": simple_accuracy(preds, labels)} + else: + raise KeyError(task_name) diff --git a/transformers/data/metrics/squad_metrics.py b/transformers/data/metrics/squad_metrics.py new file mode 100644 index 00000000000..7b03255f496 --- /dev/null +++ b/transformers/data/metrics/squad_metrics.py @@ -0,0 +1,763 @@ +""" Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was +modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0 + +In addition to basic functionality, we also compute additional statistics and +plot precision-recall curves if an additional na_prob.json file is provided. +This file is expected to map question ID's to the model's predicted probability +that a question is unanswerable. +""" + + +import json +import logging +import math +import collections +from io import open +from tqdm import tqdm +import string +import re + +from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize + +logger = logging.getLogger(__name__) + + +def normalize_answer(s): + """Lower text and remove punctuation, articles and extra whitespace.""" + def remove_articles(text): + regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) + return re.sub(regex, ' ', text) + + def white_space_fix(text): + return ' '.join(text.split()) + + def remove_punc(text): + exclude = set(string.punctuation) + return ''.join(ch for ch in text if ch not in exclude) + + def lower(text): + return text.lower() + return white_space_fix(remove_articles(remove_punc(lower(s)))) + + +def get_tokens(s): + if not s: + return [] + return normalize_answer(s).split() + + +def compute_exact(a_gold, a_pred): + return int(normalize_answer(a_gold) == normalize_answer(a_pred)) + + +def compute_f1(a_gold, a_pred): + gold_toks = get_tokens(a_gold) + pred_toks = get_tokens(a_pred) + common = collections.Counter(gold_toks) & collections.Counter(pred_toks) + num_same = sum(common.values()) + if len(gold_toks) == 0 or len(pred_toks) == 0: + # If either is no-answer, then F1 is 1 if they agree, 0 otherwise + return int(gold_toks == pred_toks) + if num_same == 0: + return 0 + precision = 1.0 * num_same / len(pred_toks) + recall = 1.0 * num_same / len(gold_toks) + f1 = (2 * precision * recall) / (precision + recall) + return f1 + + +def get_raw_scores(examples, preds): + """ + Computes the exact and f1 scores from the examples and the model predictions + """ + exact_scores = {} + f1_scores = {} + + for example in examples: + qas_id = example.qas_id + gold_answers = [answer['text'] for answer in example.answers if normalize_answer(answer['text'])] + + if not gold_answers: + # For unanswerable questions, only correct answer is empty string + gold_answers = [''] + + if qas_id not in preds: + print('Missing prediction for %s' % qas_id) + continue + + prediction = preds[qas_id] + exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers) + f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers) + + return exact_scores, f1_scores + + +def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh): + new_scores = {} + for qid, s in scores.items(): + pred_na = na_probs[qid] > na_prob_thresh + if pred_na: + new_scores[qid] = float(not qid_to_has_ans[qid]) + else: + new_scores[qid] = s + return new_scores + + +def make_eval_dict(exact_scores, f1_scores, qid_list=None): + if not qid_list: + total = len(exact_scores) + return collections.OrderedDict([ + ('exact', 100.0 * sum(exact_scores.values()) / total), + ('f1', 100.0 * sum(f1_scores.values()) / total), + ('total', total), + ]) + else: + total = len(qid_list) + return collections.OrderedDict([ + ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total), + ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total), + ('total', total), + ]) + + +def merge_eval(main_eval, new_eval, prefix): + for k in new_eval: + main_eval['%s_%s' % (prefix, k)] = new_eval[k] + + +def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans): + num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) + cur_score = num_no_ans + best_score = cur_score + best_thresh = 0.0 + qid_list = sorted(na_probs, key=lambda k: na_probs[k]) + for i, qid in enumerate(qid_list): + if qid not in scores: + continue + if qid_to_has_ans[qid]: + diff = scores[qid] + else: + if preds[qid]: + diff = -1 + else: + diff = 0 + cur_score += diff + if cur_score > best_score: + best_score = cur_score + best_thresh = na_probs[qid] + + has_ans_score, has_ans_cnt = 0, 0 + for qid in qid_list: + if not qid_to_has_ans[qid]: + continue + has_ans_cnt += 1 + + if qid not in scores: + continue + has_ans_score += scores[qid] + + return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt + + +def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): + best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2( + preds, exact_raw, na_probs, qid_to_has_ans) + best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2( + preds, f1_raw, na_probs, qid_to_has_ans) + main_eval['best_exact'] = best_exact + main_eval['best_exact_thresh'] = exact_thresh + main_eval['best_f1'] = best_f1 + main_eval['best_f1_thresh'] = f1_thresh + main_eval['has_ans_exact'] = has_ans_exact + main_eval['has_ans_f1'] = has_ans_f1 + + +def find_best_thresh(preds, scores, na_probs, qid_to_has_ans): + num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) + cur_score = num_no_ans + best_score = cur_score + best_thresh = 0.0 + qid_list = sorted(na_probs, key=lambda k: na_probs[k]) + for _, qid in enumerate(qid_list): + if qid not in scores: + continue + if qid_to_has_ans[qid]: + diff = scores[qid] + else: + if preds[qid]: + diff = -1 + else: + diff = 0 + cur_score += diff + if cur_score > best_score: + best_score = cur_score + best_thresh = na_probs[qid] + return 100.0 * best_score / len(scores), best_thresh + + +def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): + best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans) + best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans) + + main_eval['best_exact'] = best_exact + main_eval['best_exact_thresh'] = exact_thresh + main_eval['best_f1'] = best_f1 + main_eval['best_f1_thresh'] = f1_thresh + + +def squad_evaluate(examples, preds, no_answer_probs=None, no_answer_probability_threshold=1.0): + qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples} + has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer] + no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer] + + if no_answer_probs is None: + no_answer_probs = {k: 0.0 for k in preds} + + exact, f1 = get_raw_scores(examples, preds) + + exact_threshold = apply_no_ans_threshold(exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold) + f1_threshold = apply_no_ans_threshold(f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold) + + evaluation = make_eval_dict(exact_threshold, f1_threshold) + + if has_answer_qids: + has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids) + merge_eval(evaluation, has_ans_eval, 'HasAns') + + if no_answer_qids: + no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids) + merge_eval(evaluation, no_ans_eval, 'NoAns') + + if no_answer_probs: + find_all_best_thresh(evaluation, preds, exact, f1, no_answer_probs, qas_id_to_has_answer) + + return evaluation + + +def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): + """Project the tokenized prediction back to the original text.""" + + # When we created the data, we kept track of the alignment between original + # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So + # now `orig_text` contains the span of our original text corresponding to the + # span that we predicted. + # + # However, `orig_text` may contain extra characters that we don't want in + # our prediction. + # + # For example, let's say: + # pred_text = steve smith + # orig_text = Steve Smith's + # + # We don't want to return `orig_text` because it contains the extra "'s". + # + # We don't want to return `pred_text` because it's already been normalized + # (the SQuAD eval script also does punctuation stripping/lower casing but + # our tokenizer does additional normalization like stripping accent + # characters). + # + # What we really want to return is "Steve Smith". + # + # Therefore, we have to apply a semi-complicated alignment heuristic between + # `pred_text` and `orig_text` to get a character-to-character alignment. This + # can fail in certain cases in which case we just return `orig_text`. + + def _strip_spaces(text): + ns_chars = [] + ns_to_s_map = collections.OrderedDict() + for (i, c) in enumerate(text): + if c == " ": + continue + ns_to_s_map[len(ns_chars)] = i + ns_chars.append(c) + ns_text = "".join(ns_chars) + return (ns_text, ns_to_s_map) + + # We first tokenize `orig_text`, strip whitespace from the result + # and `pred_text`, and check if they are the same length. If they are + # NOT the same length, the heuristic has failed. If they are the same + # length, we assume the characters are one-to-one aligned. + tokenizer = BasicTokenizer(do_lower_case=do_lower_case) + + tok_text = " ".join(tokenizer.tokenize(orig_text)) + + start_position = tok_text.find(pred_text) + if start_position == -1: + if verbose_logging: + logger.info( + "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) + return orig_text + end_position = start_position + len(pred_text) - 1 + + (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) + (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) + + if len(orig_ns_text) != len(tok_ns_text): + if verbose_logging: + logger.info("Length not equal after stripping spaces: '%s' vs '%s'", + orig_ns_text, tok_ns_text) + return orig_text + + # We then project the characters in `pred_text` back to `orig_text` using + # the character-to-character alignment. + tok_s_to_ns_map = {} + for (i, tok_index) in tok_ns_to_s_map.items(): + tok_s_to_ns_map[tok_index] = i + + orig_start_position = None + if start_position in tok_s_to_ns_map: + ns_start_position = tok_s_to_ns_map[start_position] + if ns_start_position in orig_ns_to_s_map: + orig_start_position = orig_ns_to_s_map[ns_start_position] + + if orig_start_position is None: + if verbose_logging: + logger.info("Couldn't map start position") + return orig_text + + orig_end_position = None + if end_position in tok_s_to_ns_map: + ns_end_position = tok_s_to_ns_map[end_position] + if ns_end_position in orig_ns_to_s_map: + orig_end_position = orig_ns_to_s_map[ns_end_position] + + if orig_end_position is None: + if verbose_logging: + logger.info("Couldn't map end position") + return orig_text + + output_text = orig_text[orig_start_position:(orig_end_position + 1)] + return output_text + + +def _get_best_indexes(logits, n_best_size): + """Get the n-best logits from a list.""" + index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) + + best_indexes = [] + for i in range(len(index_and_score)): + if i >= n_best_size: + break + best_indexes.append(index_and_score[i][0]) + return best_indexes + + +def _compute_softmax(scores): + """Compute softmax probability over raw logits.""" + if not scores: + return [] + + max_score = None + for score in scores: + if max_score is None or score > max_score: + max_score = score + + exp_scores = [] + total_sum = 0.0 + for score in scores: + x = math.exp(score - max_score) + exp_scores.append(x) + total_sum += x + + probs = [] + for score in exp_scores: + probs.append(score / total_sum) + return probs + + +def compute_predictions_logits( + all_examples, + all_features, + all_results, + n_best_size, + max_answer_length, + do_lower_case, + output_prediction_file, + output_nbest_file, + output_null_log_odds_file, + verbose_logging, + version_2_with_negative, + null_score_diff_threshold +): + """Write final predictions to the json file and log-odds of null if needed.""" + logger.info("Writing predictions to: %s" % (output_prediction_file)) + logger.info("Writing nbest to: %s" % (output_nbest_file)) + + example_index_to_features = collections.defaultdict(list) + for feature in all_features: + example_index_to_features[feature.example_index].append(feature) + + unique_id_to_result = {} + for result in all_results: + unique_id_to_result[result.unique_id] = result + + _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name + "PrelimPrediction", + ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) + + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + scores_diff_json = collections.OrderedDict() + + for (example_index, example) in enumerate(all_examples): + features = example_index_to_features[example_index] + + prelim_predictions = [] + # keep track of the minimum score of null start+end of position 0 + score_null = 1000000 # large and positive + min_null_feature_index = 0 # the paragraph slice with min null score + null_start_logit = 0 # the start logit at the slice with min null score + null_end_logit = 0 # the end logit at the slice with min null score + for (feature_index, feature) in enumerate(features): + result = unique_id_to_result[feature.unique_id] + start_indexes = _get_best_indexes(result.start_logits, n_best_size) + end_indexes = _get_best_indexes(result.end_logits, n_best_size) + # if we could have irrelevant answers, get the min score of irrelevant + if version_2_with_negative: + feature_null_score = result.start_logits[0] + result.end_logits[0] + if feature_null_score < score_null: + score_null = feature_null_score + min_null_feature_index = feature_index + null_start_logit = result.start_logits[0] + null_end_logit = result.end_logits[0] + for start_index in start_indexes: + for end_index in end_indexes: + # We could hypothetically create invalid predictions, e.g., predict + # that the start of the span is in the question. We throw out all + # invalid predictions. + if start_index >= len(feature.tokens): + continue + if end_index >= len(feature.tokens): + continue + if start_index not in feature.token_to_orig_map: + continue + if end_index not in feature.token_to_orig_map: + continue + if not feature.token_is_max_context.get(start_index, False): + continue + if end_index < start_index: + continue + length = end_index - start_index + 1 + if length > max_answer_length: + continue + prelim_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + start_index=start_index, + end_index=end_index, + start_logit=result.start_logits[start_index], + end_logit=result.end_logits[end_index])) + if version_2_with_negative: + prelim_predictions.append( + _PrelimPrediction( + feature_index=min_null_feature_index, + start_index=0, + end_index=0, + start_logit=null_start_logit, + end_logit=null_end_logit)) + prelim_predictions = sorted( + prelim_predictions, + key=lambda x: (x.start_logit + x.end_logit), + reverse=True) + + _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name + "NbestPrediction", ["text", "start_logit", "end_logit"]) + + seen_predictions = {} + nbest = [] + for pred in prelim_predictions: + if len(nbest) >= n_best_size: + break + feature = features[pred.feature_index] + if pred.start_index > 0: # this is a non-null prediction + tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] + orig_doc_start = feature.token_to_orig_map[pred.start_index] + orig_doc_end = feature.token_to_orig_map[pred.end_index] + orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] + tok_text = " ".join(tok_tokens) + + # De-tokenize WordPieces that have been split off. + tok_text = tok_text.replace(" ##", "") + tok_text = tok_text.replace("##", "") + + # Clean whitespace + tok_text = tok_text.strip() + tok_text = " ".join(tok_text.split()) + orig_text = " ".join(orig_tokens) + + final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) + if final_text in seen_predictions: + continue + + seen_predictions[final_text] = True + else: + final_text = "" + seen_predictions[final_text] = True + + nbest.append( + _NbestPrediction( + text=final_text, + start_logit=pred.start_logit, + end_logit=pred.end_logit)) + # if we didn't include the empty option in the n-best, include it + if version_2_with_negative: + if "" not in seen_predictions: + nbest.append( + _NbestPrediction( + text="", + start_logit=null_start_logit, + end_logit=null_end_logit)) + + # In very rare edge cases we could only have single null prediction. + # So we just create a nonce prediction in this case to avoid failure. + if len(nbest) == 1: + nbest.insert(0, + _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) + + # In very rare edge cases we could have no valid predictions. So we + # just create a nonce prediction in this case to avoid failure. + if not nbest: + nbest.append( + _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) + + assert len(nbest) >= 1 + + total_scores = [] + best_non_null_entry = None + for entry in nbest: + total_scores.append(entry.start_logit + entry.end_logit) + if not best_non_null_entry: + if entry.text: + best_non_null_entry = entry + + probs = _compute_softmax(total_scores) + + nbest_json = [] + for (i, entry) in enumerate(nbest): + output = collections.OrderedDict() + output["text"] = entry.text + output["probability"] = probs[i] + output["start_logit"] = entry.start_logit + output["end_logit"] = entry.end_logit + nbest_json.append(output) + + assert len(nbest_json) >= 1 + + if not version_2_with_negative: + all_predictions[example.qas_id] = nbest_json[0]["text"] + else: + # predict "" iff the null score - the score of best non-null > threshold + score_diff = score_null - best_non_null_entry.start_logit - ( + best_non_null_entry.end_logit) + scores_diff_json[example.qas_id] = score_diff + if score_diff > null_score_diff_threshold: + all_predictions[example.qas_id] = "" + else: + all_predictions[example.qas_id] = best_non_null_entry.text + all_nbest_json[example.qas_id] = nbest_json + + with open(output_prediction_file, "w") as writer: + writer.write(json.dumps(all_predictions, indent=4) + "\n") + + with open(output_nbest_file, "w") as writer: + writer.write(json.dumps(all_nbest_json, indent=4) + "\n") + + if version_2_with_negative: + with open(output_null_log_odds_file, "w") as writer: + writer.write(json.dumps(scores_diff_json, indent=4) + "\n") + + return all_predictions + + +def compute_predictions_log_probs( + all_examples, + all_features, + all_results, + n_best_size, + max_answer_length, + output_prediction_file, + output_nbest_file, + output_null_log_odds_file, + start_n_top, + end_n_top, + version_2_with_negative, + tokenizer, + verbose_logging +): + """ XLNet write prediction logic (more complex than Bert's). + Write final predictions to the json file and log-odds of null if needed. + + Requires utils_squad_evaluate.py + """ + _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name + "PrelimPrediction", + ["feature_index", "start_index", "end_index", + "start_log_prob", "end_log_prob"]) + + _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name + "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]) + + logger.info("Writing predictions to: %s", output_prediction_file) + # logger.info("Writing nbest to: %s" % (output_nbest_file)) + + example_index_to_features = collections.defaultdict(list) + for feature in all_features: + example_index_to_features[feature.example_index].append(feature) + + unique_id_to_result = {} + for result in all_results: + unique_id_to_result[result.unique_id] = result + + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + scores_diff_json = collections.OrderedDict() + + for (example_index, example) in enumerate(all_examples): + features = example_index_to_features[example_index] + + prelim_predictions = [] + # keep track of the minimum score of null start+end of position 0 + score_null = 1000000 # large and positive + + for (feature_index, feature) in enumerate(features): + result = unique_id_to_result[feature.unique_id] + + cur_null_score = result.cls_logits + + # if we could have irrelevant answers, get the min score of irrelevant + score_null = min(score_null, cur_null_score) + + for i in range(start_n_top): + for j in range(end_n_top): + start_log_prob = result.start_logits[i] + start_index = result.start_top_index[i] + + j_index = i * end_n_top + j + + end_log_prob = result.end_logits[j_index] + end_index = result.end_top_index[j_index] + + # We could hypothetically create invalid predictions, e.g., predict + # that the start of the span is in the question. We throw out all + # invalid predictions. + if start_index >= feature.paragraph_len - 1: + continue + if end_index >= feature.paragraph_len - 1: + continue + + if not feature.token_is_max_context.get(start_index, False): + continue + if end_index < start_index: + continue + length = end_index - start_index + 1 + if length > max_answer_length: + continue + + prelim_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + start_index=start_index, + end_index=end_index, + start_log_prob=start_log_prob, + end_log_prob=end_log_prob)) + + prelim_predictions = sorted( + prelim_predictions, + key=lambda x: (x.start_log_prob + x.end_log_prob), + reverse=True) + + seen_predictions = {} + nbest = [] + for pred in prelim_predictions: + if len(nbest) >= n_best_size: + break + feature = features[pred.feature_index] + + # XLNet un-tokenizer + # Let's keep it simple for now and see if we need all this later. + # + # tok_start_to_orig_index = feature.tok_start_to_orig_index + # tok_end_to_orig_index = feature.tok_end_to_orig_index + # start_orig_pos = tok_start_to_orig_index[pred.start_index] + # end_orig_pos = tok_end_to_orig_index[pred.end_index] + # paragraph_text = example.paragraph_text + # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip() + + # Previously used Bert untokenizer + tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] + orig_doc_start = feature.token_to_orig_map[pred.start_index] + orig_doc_end = feature.token_to_orig_map[pred.end_index] + orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] + tok_text = tokenizer.convert_tokens_to_string(tok_tokens) + + # Clean whitespace + tok_text = tok_text.strip() + tok_text = " ".join(tok_text.split()) + orig_text = " ".join(orig_tokens) + + if hasattr(tokenizer, "do_lower_case"): + do_lower_case = tokenizer.do_lower_case + else: + do_lower_case = tokenizer.do_lowercase_and_remove_accent + + final_text = get_final_text(tok_text, orig_text, do_lower_case, + verbose_logging) + + if final_text in seen_predictions: + continue + + seen_predictions[final_text] = True + + nbest.append( + _NbestPrediction( + text=final_text, + start_log_prob=pred.start_log_prob, + end_log_prob=pred.end_log_prob)) + + # In very rare edge cases we could have no valid predictions. So we + # just create a nonce prediction in this case to avoid failure. + if not nbest: + nbest.append( + _NbestPrediction(text="", start_log_prob=-1e6, + end_log_prob=-1e6)) + + total_scores = [] + best_non_null_entry = None + for entry in nbest: + total_scores.append(entry.start_log_prob + entry.end_log_prob) + if not best_non_null_entry: + best_non_null_entry = entry + + probs = _compute_softmax(total_scores) + + nbest_json = [] + for (i, entry) in enumerate(nbest): + output = collections.OrderedDict() + output["text"] = entry.text + output["probability"] = probs[i] + output["start_log_prob"] = entry.start_log_prob + output["end_log_prob"] = entry.end_log_prob + nbest_json.append(output) + + assert len(nbest_json) >= 1 + assert best_non_null_entry is not None + + score_diff = score_null + scores_diff_json[example.qas_id] = score_diff + # note(zhiliny): always predict best_non_null_entry + # and the evaluation script will search for the best threshold + all_predictions[example.qas_id] = best_non_null_entry.text + + all_nbest_json[example.qas_id] = nbest_json + + with open(output_prediction_file, "w") as writer: + writer.write(json.dumps(all_predictions, indent=4) + "\n") + + with open(output_nbest_file, "w") as writer: + writer.write(json.dumps(all_nbest_json, indent=4) + "\n") + + if version_2_with_negative: + with open(output_null_log_odds_file, "w") as writer: + writer.write(json.dumps(scores_diff_json, indent=4) + "\n") + + return all_predictions diff --git a/transformers/data/processors/__init__.py b/transformers/data/processors/__init__.py index af38c54beba..0f1b24893ae 100644 --- a/transformers/data/processors/__init__.py +++ b/transformers/data/processors/__init__.py @@ -1,3 +1,4 @@ from .utils import InputExample, InputFeatures, DataProcessor from .glue import glue_output_modes, glue_processors, glue_tasks_num_labels, glue_convert_examples_to_features - +from .squad import squad_convert_examples_to_features, SquadFeatures, SquadExample, SquadV1Processor, SquadV2Processor +from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels \ No newline at end of file diff --git a/transformers/data/processors/glue.py b/transformers/data/processors/glue.py index 741569ea30a..518251b0503 100644 --- a/transformers/data/processors/glue.py +++ b/transformers/data/processors/glue.py @@ -80,6 +80,7 @@ def glue_convert_examples_to_features(examples, tokenizer, logger.info("Writing example %d" % (ex_index)) if is_tf_dataset: example = processor.get_example_from_tensor_dict(example) + example = processor.tfds_map(example) inputs = tokenizer.encode_plus( example.text_a, diff --git a/transformers/data/processors/squad.py b/transformers/data/processors/squad.py new file mode 100644 index 00000000000..9bc43756842 --- /dev/null +++ b/transformers/data/processors/squad.py @@ -0,0 +1,591 @@ +from tqdm import tqdm +import collections +import logging +import os +import json +import numpy as np + +from ...tokenization_bert import BasicTokenizer, whitespace_tokenize +from .utils import DataProcessor, InputExample, InputFeatures +from ...file_utils import is_tf_available, is_torch_available + +if is_torch_available(): + import torch + from torch.utils.data import TensorDataset + +if is_tf_available(): + import tensorflow as tf + +logger = logging.getLogger(__name__) + +def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, + orig_answer_text): + """Returns tokenized answer spans that better match the annotated answer.""" + tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) + + for new_start in range(input_start, input_end + 1): + for new_end in range(input_end, new_start - 1, -1): + text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) + if text_span == tok_answer_text: + return (new_start, new_end) + + return (input_start, input_end) + +def _check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + best_score = None + best_span_index = None + for (span_index, doc_span) in enumerate(doc_spans): + end = doc_span.start + doc_span.length - 1 + if position < doc_span.start: + continue + if position > end: + continue + num_left_context = position - doc_span.start + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span.length + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + +def _new_check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + # if len(doc_spans) == 1: + # return True + best_score = None + best_span_index = None + for (span_index, doc_span) in enumerate(doc_spans): + end = doc_span["start"] + doc_span["length"] - 1 + if position < doc_span["start"]: + continue + if position > end: + continue + num_left_context = position - doc_span["start"] + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"] + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + +def _is_whitespace(c): + if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: + return True + return False + +def squad_convert_examples_to_features(examples, tokenizer, max_seq_length, + doc_stride, max_query_length, is_training, + return_dataset=False): + """ + Converts a list of examples into a list of features that can be directly given as input to a model. + It is model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs. + + Args: + examples: list of :class:`~transformers.data.processors.squad.SquadExample` + tokenizer: an instance of a child of :class:`~transformers.PreTrainedTokenizer` + max_seq_length: The maximum sequence length of the inputs. + doc_stride: The stride used when the context is too large and is split across several features. + max_query_length: The maximum length of the query. + is_training: whether to create features for model evaluation or model training. + return_dataset: Default False. Either 'pt' or 'tf'. + if 'pt': returns a torch.data.TensorDataset, + if 'tf': returns a tf.data.Dataset + + Returns: + list of :class:`~transformers.data.processors.squad.SquadFeatures` + + Example:: + + processor = SquadV2Processor() + examples = processor.get_dev_examples(data_dir) + + features = squad_convert_examples_to_features( + examples=examples, + tokenizer=tokenizer, + max_seq_length=args.max_seq_length, + doc_stride=args.doc_stride, + max_query_length=args.max_query_length, + is_training=not evaluate, + ) + """ + + # Defining helper methods + unique_id = 1000000000 + + features = [] + for (example_index, example) in enumerate(tqdm(examples)): + if is_training and not example.is_impossible: + # Get start and end position + start_position = example.start_position + end_position = example.end_position + + # If the answer cannot be found in the text, then skip this example. + actual_text = " ".join(example.doc_tokens[start_position:(end_position + 1)]) + cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text)) + if actual_text.find(cleaned_answer_text) == -1: + logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) + continue + + + tok_to_orig_index = [] + orig_to_tok_index = [] + all_doc_tokens = [] + for (i, token) in enumerate(example.doc_tokens): + orig_to_tok_index.append(len(all_doc_tokens)) + sub_tokens = tokenizer.tokenize(token) + for sub_token in sub_tokens: + tok_to_orig_index.append(i) + all_doc_tokens.append(sub_token) + + + if is_training and not example.is_impossible: + tok_start_position = orig_to_tok_index[example.start_position] + if example.end_position < len(example.doc_tokens) - 1: + tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 + else: + tok_end_position = len(all_doc_tokens) - 1 + + (tok_start_position, tok_end_position) = _improve_answer_span( + all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text + ) + + spans = [] + + truncated_query = tokenizer.encode(example.question_text, add_special_tokens=False, max_length=max_query_length) + sequence_added_tokens = tokenizer.max_len - tokenizer.max_len_single_sentence + sequence_pair_added_tokens = tokenizer.max_len - tokenizer.max_len_sentences_pair + + span_doc_tokens = all_doc_tokens + while len(spans) * doc_stride < len(all_doc_tokens): + + encoded_dict = tokenizer.encode_plus( + truncated_query if tokenizer.padding_side == "right" else span_doc_tokens, + span_doc_tokens if tokenizer.padding_side == "right" else truncated_query, + max_length=max_seq_length, + return_overflowing_tokens=True, + pad_to_max_length=True, + stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens, + truncation_strategy='only_second' if tokenizer.padding_side == "right" else 'only_first' + ) + + paragraph_len = min(len(all_doc_tokens) - len(spans) * doc_stride, max_seq_length - len(truncated_query) - sequence_pair_added_tokens) + + if tokenizer.pad_token_id in encoded_dict['input_ids']: + non_padded_ids = encoded_dict['input_ids'][:encoded_dict['input_ids'].index(tokenizer.pad_token_id)] + else: + non_padded_ids = encoded_dict['input_ids'] + + tokens = tokenizer.convert_ids_to_tokens(non_padded_ids) + + token_to_orig_map = {} + for i in range(paragraph_len): + index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i + token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i] + + encoded_dict["paragraph_len"] = paragraph_len + encoded_dict["tokens"] = tokens + encoded_dict["token_to_orig_map"] = token_to_orig_map + encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens + encoded_dict["token_is_max_context"] = {} + encoded_dict["start"] = len(spans) * doc_stride + encoded_dict["length"] = paragraph_len + + spans.append(encoded_dict) + + if "overflowing_tokens" not in encoded_dict: + break + span_doc_tokens = encoded_dict["overflowing_tokens"] + + for doc_span_index in range(len(spans)): + for j in range(spans[doc_span_index]["paragraph_len"]): + is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j) + index = j if tokenizer.padding_side == "left" else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j + spans[doc_span_index]["token_is_max_context"][index] = is_max_context + + for span in spans: + # Identify the position of the CLS token + cls_index = span['input_ids'].index(tokenizer.cls_token_id) + + # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) + # Original TF implem also keep the classification token (set to 0) (not sure why...) + p_mask = np.array(span['token_type_ids']) + + p_mask = np.minimum(p_mask, 1) + + if tokenizer.padding_side == "right": + # Limit positive values to one + p_mask = 1 - p_mask + + p_mask[np.where(np.array(span["input_ids"]) == tokenizer.sep_token_id)[0]] = 1 + + # Set the CLS index to '0' + p_mask[cls_index] = 0 + + + span_is_impossible = example.is_impossible + start_position = 0 + end_position = 0 + if is_training and not span_is_impossible: + # For training, if our document chunk does not contain an annotation + # we throw it out, since there is nothing to predict. + doc_start = span["start"] + doc_end = span["start"] + span["length"] - 1 + out_of_span = False + + if not (tok_start_position >= doc_start and tok_end_position <= doc_end): + out_of_span = True + + if out_of_span: + start_position = cls_index + end_position = cls_index + span_is_impossible = True + else: + if tokenizer.padding_side == "left": + doc_offset = 0 + else: + doc_offset = len(truncated_query) + sequence_added_tokens + + start_position = tok_start_position - doc_start + doc_offset + end_position = tok_end_position - doc_start + doc_offset + + + features.append(SquadFeatures( + span['input_ids'], + span['attention_mask'], + span['token_type_ids'], + cls_index, + p_mask.tolist(), + + example_index=example_index, + unique_id=unique_id, + paragraph_len=span['paragraph_len'], + token_is_max_context=span["token_is_max_context"], + tokens=span["tokens"], + token_to_orig_map=span["token_to_orig_map"], + + start_position=start_position, + end_position=end_position + )) + + unique_id += 1 + + if return_dataset == 'pt': + if not is_torch_available(): + raise ImportError("Pytorch must be installed to return a pytorch dataset.") + + # Convert to Tensors and build dataset + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) + all_input_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) + all_segment_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) + all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) + all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) + + if not is_training: + all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) + dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, + all_example_index, all_cls_index, all_p_mask) + else: + all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) + all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) + dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, + all_start_positions, all_end_positions, + all_cls_index, all_p_mask) + + return features, dataset + + + return features + + +class SquadProcessor(DataProcessor): + """ + Processor for the SQuAD data set. + Overriden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and version 2.0 of SQuAD, respectively. + """ + train_file = None + dev_file = None + + def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False): + if not evaluate: + answer = tensor_dict['answers']['text'][0].numpy().decode('utf-8') + answer_start = tensor_dict['answers']['answer_start'][0].numpy() + answers = [] + else: + answers = [{ + "answer_start": start.numpy(), + "text": text.numpy().decode('utf-8') + } for start, text in zip(tensor_dict['answers']["answer_start"], tensor_dict['answers']["text"])] + + answer = None + answer_start = None + + return SquadExample( + qas_id=tensor_dict['id'].numpy().decode("utf-8"), + question_text=tensor_dict['question'].numpy().decode('utf-8'), + context_text=tensor_dict['context'].numpy().decode('utf-8'), + answer_text=answer, + start_position_character=answer_start, + title=tensor_dict['title'].numpy().decode('utf-8'), + answers=answers + ) + + def get_examples_from_dataset(self, dataset, evaluate=False): + """ + Creates a list of :class:`~transformers.data.processors.squad.SquadExample` using a TFDS dataset. + + Args: + dataset: The tfds dataset loaded from `tensorflow_datasets.load("squad")` + evaluate: boolean specifying if in evaluation mode or in training mode + + Returns: + List of SquadExample + + Examples:: + + import tensorflow_datasets as tfds + dataset = tfds.load("squad") + + training_examples = get_examples_from_dataset(dataset, evaluate=False) + evaluation_examples = get_examples_from_dataset(dataset, evaluate=True) + """ + + if evaluate: + dataset = dataset["validation"] + else: + dataset = dataset["train"] + + examples = [] + for tensor_dict in tqdm(dataset): + examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate)) + + return examples + + def get_train_examples(self, data_dir, filename=None): + """ + Returns the training examples from the data directory. + + Args: + data_dir: Directory containing the data files used for training and evaluating. + filename: None by default, specify this if the training file has a different name than the original one + which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively. + + """ + if data_dir is None: + data_dir = "" + + if self.train_file is None: + raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor") + + with open(os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding='utf-8') as reader: + input_data = json.load(reader)["data"] + return self._create_examples(input_data, "train") + + def get_dev_examples(self, data_dir, filename=None): + """ + Returns the evaluation example from the data directory. + + Args: + data_dir: Directory containing the data files used for training and evaluating. + filename: None by default, specify this if the evaluation file has a different name than the original one + which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively. + """ + if data_dir is None: + data_dir = "" + + if self.dev_file is None: + raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor") + + with open(os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding='utf-8') as reader: + input_data = json.load(reader)["data"] + return self._create_examples(input_data, "dev") + + def _create_examples(self, input_data, set_type): + is_training = set_type == "train" + examples = [] + for entry in tqdm(input_data): + title = entry['title'] + for paragraph in entry["paragraphs"]: + context_text = paragraph["context"] + for qa in paragraph["qas"]: + qas_id = qa["id"] + question_text = qa["question"] + start_position_character = None + answer_text = None + answers = [] + + if "is_impossible" in qa: + is_impossible = qa["is_impossible"] + else: + is_impossible = False + + if not is_impossible: + if is_training: + answer = qa["answers"][0] + answer_text = answer['text'] + start_position_character = answer['answer_start'] + else: + answers = qa["answers"] + + example = SquadExample( + qas_id=qas_id, + question_text=question_text, + context_text=context_text, + answer_text=answer_text, + start_position_character=start_position_character, + title=title, + is_impossible=is_impossible, + answers=answers + ) + + examples.append(example) + return examples + +class SquadV1Processor(SquadProcessor): + train_file = "train-v1.1.json" + dev_file = "dev-v1.1.json" + + +class SquadV2Processor(SquadProcessor): + train_file = "train-v2.0.json" + dev_file = "dev-v2.0.json" + + +class SquadExample(object): + """ + A single training/test example for the Squad dataset, as loaded from disk. + + Args: + qas_id: The example's unique identifier + question_text: The question string + context_text: The context string + answer_text: The answer string + start_position_character: The character position of the start of the answer + title: The title of the example + answers: None by default, this is used during evaluation. Holds answers as well as their start positions. + is_impossible: False by default, set to True if the example has no possible answer. + """ + + def __init__(self, + qas_id, + question_text, + context_text, + answer_text, + start_position_character, + title, + answers=[], + is_impossible=False): + self.qas_id = qas_id + self.question_text = question_text + self.context_text = context_text + self.answer_text = answer_text + self.title = title + self.is_impossible = is_impossible + self.answers = answers + + self.start_position, self.end_position = 0, 0 + + doc_tokens = [] + char_to_word_offset = [] + prev_is_whitespace = True + + # Split on whitespace so that different tokens may be attributed to their original position. + for c in self.context_text: + if _is_whitespace(c): + prev_is_whitespace = True + else: + if prev_is_whitespace: + doc_tokens.append(c) + else: + doc_tokens[-1] += c + prev_is_whitespace = False + char_to_word_offset.append(len(doc_tokens) - 1) + + self.doc_tokens = doc_tokens + self.char_to_word_offset = char_to_word_offset + + # Start end end positions only has a value during evaluation. + if start_position_character is not None and not is_impossible: + self.start_position = char_to_word_offset[start_position_character] + self.end_position = char_to_word_offset[start_position_character + len(answer_text) - 1] + + +class SquadFeatures(object): + """ + Single squad example features to be fed to a model. + Those features are model-specific and can be crafted from :class:`~transformers.data.processors.squad.SquadExample` + using the :method:`~transformers.data.processors.squad.squad_convert_examples_to_features` method. + + Args: + input_ids: Indices of input sequence tokens in the vocabulary. + attention_mask: Mask to avoid performing attention on padding token indices. + token_type_ids: Segment token indices to indicate first and second portions of the inputs. + cls_index: the index of the CLS token. + p_mask: Mask identifying tokens that can be answers vs. tokens that cannot. + Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer + example_index: the index of the example + unique_id: The unique Feature identifier + paragraph_len: The length of the context + token_is_max_context: List of booleans identifying which tokens have their maximum context in this feature object. + If a token does not have their maximum context in this feature object, it means that another feature object + has more information related to that token and should be prioritized over this feature for that token. + tokens: list of tokens corresponding to the input ids + token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer. + start_position: start of the answer token index + end_position: end of the answer token index + """ + + def __init__(self, + input_ids, + attention_mask, + token_type_ids, + cls_index, + p_mask, + + example_index, + unique_id, + paragraph_len, + token_is_max_context, + tokens, + token_to_orig_map, + + start_position, + end_position + ): + self.input_ids = input_ids + self.attention_mask = attention_mask + self.token_type_ids = token_type_ids + self.cls_index = cls_index + self.p_mask = p_mask + + self.example_index = example_index + self.unique_id = unique_id + self.paragraph_len = paragraph_len + self.token_is_max_context = token_is_max_context + self.tokens = tokens + self.token_to_orig_map = token_to_orig_map + + self.start_position = start_position + self.end_position = end_position + + +class SquadResult(object): + """ + Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset. + + Args: + unique_id: The unique identifier corresponding to that example. + start_logits: The logits corresponding to the start of the answer + end_logits: The logits corresponding to the end of the answer + """ + def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None): + self.start_logits = start_logits + self.end_logits = end_logits + self.unique_id = unique_id + + if start_top_index: + self.start_top_index = start_top_index + self.end_top_index = end_top_index + self.cls_logits = cls_logits \ No newline at end of file diff --git a/transformers/data/processors/utils.py b/transformers/data/processors/utils.py index 27138f9959d..07bdf3150c8 100644 --- a/transformers/data/processors/utils.py +++ b/transformers/data/processors/utils.py @@ -107,6 +107,13 @@ class DataProcessor(object): """Gets the list of labels for this data set.""" raise NotImplementedError() + def tfds_map(self, example): + """Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. + This method converts examples to the correct format.""" + if len(self.get_labels()) > 1: + example.label = self.get_labels()[int(example.label)] + return example + @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" diff --git a/transformers/data/processors/xnli.py b/transformers/data/processors/xnli.py new file mode 100644 index 00000000000..958bdf62f9e --- /dev/null +++ b/transformers/data/processors/xnli.py @@ -0,0 +1,85 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" XNLI utils (dataset loading and evaluation) """ + +from __future__ import absolute_import, division, print_function + +import logging +import os + +from .utils import DataProcessor, InputExample + +logger = logging.getLogger(__name__) + +class XnliProcessor(DataProcessor): + """Processor for the XNLI dataset. + Adapted from https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207""" + + def __init__(self, language, train_language = None): + self.language = language + self.train_language = train_language + + def get_train_examples(self, data_dir): + """See base class.""" + lg = self.language if self.train_language is None else self.train_language + lines = self._read_tsv(os.path.join(data_dir, "XNLI-MT-1.0/multinli/multinli.train.{}.tsv".format(lg))) + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % ('train', i) + text_a = line[0] + text_b = line[1] + label = "contradiction" if line[2] == "contradictory" else line[2] + assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_test_examples(self, data_dir): + """See base class.""" + lines = self._read_tsv(os.path.join(data_dir, "XNLI-1.0/xnli.test.tsv")) + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + language = line[0] + if language != self.language: + continue + guid = "%s-%s" % ('test', i) + text_a = line[6] + text_b = line[7] + label = line[1] + assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str) + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_labels(self): + """See base class.""" + return ["contradiction", "entailment", "neutral"] + +xnli_processors = { + "xnli": XnliProcessor, +} + +xnli_output_modes = { + "xnli": "classification", +} + +xnli_tasks_num_labels = { + "xnli": 3, +} diff --git a/transformers/file_utils.py b/transformers/file_utils.py index 11c4ba6318b..5fd5e2ee397 100644 --- a/transformers/file_utils.py +++ b/transformers/file_utils.py @@ -21,7 +21,8 @@ import boto3 from botocore.config import Config from botocore.exceptions import ClientError import requests -from tqdm import tqdm +from tqdm.auto import tqdm +from contextlib import contextmanager logger = logging.getLogger(__name__) # pylint: disable=invalid-name @@ -72,6 +73,8 @@ TF2_WEIGHTS_NAME = 'tf_model.h5' TF_WEIGHTS_NAME = 'model.ckpt' CONFIG_NAME = "config.json" +S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert" + def is_torch_available(): return _torch_available @@ -102,6 +105,18 @@ else: return fn return docstring_decorator + +def is_remote_url(url_or_filename): + parsed = urlparse(url_or_filename) + return parsed.scheme in ('http', 'https', 's3') + +def hf_bucket_url(identifier, postfix=None): + if postfix is None: + return "/".join((S3_BUCKET_PREFIX, identifier)) + else: + return "/".join((S3_BUCKET_PREFIX, identifier, postfix)) + + def url_to_filename(url, etag=None): """ Convert `url` into a hashed filename in a repeatable way. @@ -152,7 +167,7 @@ def filename_to_url(filename, cache_dir=None): return url, etag -def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None): +def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False): """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and @@ -161,6 +176,7 @@ def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=N Args: cache_dir: specify a cache directory to save the file to (overwrite the default cache dir). force_download: if True, re-dowload the file even if it's already cached in the cache dir. + resume_download: if True, resume the download if incompletly recieved file is found. """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE @@ -169,15 +185,15 @@ def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=N if sys.version_info[0] == 3 and isinstance(cache_dir, Path): cache_dir = str(cache_dir) - parsed = urlparse(url_or_filename) - - if parsed.scheme in ('http', 'https', 's3'): + if is_remote_url(url_or_filename): # URL, so get it from the cache (downloading if necessary) - return get_from_cache(url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies) + return get_from_cache(url_or_filename, cache_dir=cache_dir, + force_download=force_download, proxies=proxies, + resume_download=resume_download) elif os.path.exists(url_or_filename): # File, and it exists. return url_or_filename - elif parsed.scheme == '': + elif urlparse(url_or_filename).scheme == '': # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(url_or_filename)) else: @@ -234,19 +250,22 @@ def s3_get(url, temp_file, proxies=None): s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) -def http_get(url, temp_file, proxies=None): - req = requests.get(url, stream=True, proxies=proxies) - content_length = req.headers.get('Content-Length') - total = int(content_length) if content_length is not None else None - progress = tqdm(unit="B", total=total) - for chunk in req.iter_content(chunk_size=1024): +def http_get(url, temp_file, proxies=None, resume_size=0): + headers={'Range':'bytes=%d-'%(resume_size,)} if resume_size > 0 else None + response = requests.get(url, stream=True, proxies=proxies, headers=headers) + if response.status_code == 416: # Range not satisfiable + return + content_length = response.headers.get('Content-Length') + total = resume_size + int(content_length) if content_length is not None else None + progress = tqdm(unit="B", unit_scale=True, total=total, initial=resume_size, desc="Downloading") + for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() -def get_from_cache(url, cache_dir=None, force_download=False, proxies=None): +def get_from_cache(url, cache_dir=None, force_download=False, proxies=None, etag_timeout=10, resume_download=False): """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. @@ -266,12 +285,12 @@ def get_from_cache(url, cache_dir=None, force_download=False, proxies=None): etag = s3_etag(url, proxies=proxies) else: try: - response = requests.head(url, allow_redirects=True, proxies=proxies) + response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout) if response.status_code != 200: etag = None else: etag = response.headers.get("ETag") - except EnvironmentError: + except (EnvironmentError, requests.exceptions.Timeout): etag = None if sys.version_info[0] == 2 and etag is not None: @@ -289,17 +308,35 @@ def get_from_cache(url, cache_dir=None, force_download=False, proxies=None): if matching_files: cache_path = os.path.join(cache_dir, matching_files[-1]) + if resume_download: + incomplete_path = cache_path + '.incomplete' + @contextmanager + def _resumable_file_manager(): + with open(incomplete_path,'a+b') as f: + yield f + os.remove(incomplete_path) + temp_file_manager = _resumable_file_manager + if os.path.exists(incomplete_path): + resume_size = os.stat(incomplete_path).st_size + else: + resume_size = 0 + else: + temp_file_manager = tempfile.NamedTemporaryFile + resume_size = 0 + if not os.path.exists(cache_path) or force_download: # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. - with tempfile.NamedTemporaryFile() as temp_file: + with temp_file_manager() as temp_file: logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name) # GET file object if url.startswith("s3://"): + if resume_download: + logger.warn('Warning: resumable downloads are not implemented for "s3://" urls') s3_get(url, temp_file, proxies=proxies) else: - http_get(url, temp_file, proxies=proxies) + http_get(url, temp_file, proxies=proxies, resume_size=resume_size) # we are copying the file before closing it, so flush to avoid truncation temp_file.flush() diff --git a/transformers/hf_api.py b/transformers/hf_api.py new file mode 100644 index 00000000000..3bbb6c567a8 --- /dev/null +++ b/transformers/hf_api.py @@ -0,0 +1,228 @@ +# coding=utf-8 +# Copyright 2019-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +import os +from os.path import expanduser + +import requests +import six +from requests.exceptions import HTTPError +from tqdm import tqdm + +ENDPOINT = "https://huggingface.co" + +class S3Obj: + def __init__( + self, + filename, # type: str + LastModified, # type: str + ETag, # type: str + Size, # type: int + **kwargs + ): + self.filename = filename + self.LastModified = LastModified + self.ETag = ETag + self.Size = Size + + +class PresignedUrl: + def __init__( + self, + write, # type: str + access, # type: str + type, # type: str + **kwargs + ): + self.write = write + self.access = access + self.type = type # mime-type to send to S3. + + +class HfApi: + def __init__(self, endpoint=None): + self.endpoint = endpoint if endpoint is not None else ENDPOINT + + def login( + self, + username, # type: str + password, # type: str + ): + # type: (...) -> str + """ + Call HF API to sign in a user and get a token if credentials are valid. + + Outputs: + token if credentials are valid + + Throws: + requests.exceptions.HTTPError if credentials are invalid + """ + path = "{}/api/login".format(self.endpoint) + r = requests.post(path, json={"username": username, "password": password}) + r.raise_for_status() + d = r.json() + return d["token"] + + def whoami( + self, + token, # type: str + ): + # type: (...) -> str + """ + Call HF API to know "whoami" + """ + path = "{}/api/whoami".format(self.endpoint) + r = requests.get(path, headers={"authorization": "Bearer {}".format(token)}) + r.raise_for_status() + d = r.json() + return d["user"] + + def logout(self, token): + # type: (...) -> void + """ + Call HF API to log out. + """ + path = "{}/api/logout".format(self.endpoint) + r = requests.post(path, headers={"authorization": "Bearer {}".format(token)}) + r.raise_for_status() + + def presign(self, token, filename): + # type: (...) -> PresignedUrl + """ + Call HF API to get a presigned url to upload `filename` to S3. + """ + path = "{}/api/presign".format(self.endpoint) + r = requests.post( + path, + headers={"authorization": "Bearer {}".format(token)}, + json={"filename": filename}, + ) + r.raise_for_status() + d = r.json() + return PresignedUrl(**d) + + def presign_and_upload(self, token, filename, filepath): + # type: (...) -> str + """ + Get a presigned url, then upload file to S3. + + Outputs: + url: Read-only url for the stored file on S3. + """ + urls = self.presign(token, filename=filename) + # streaming upload: + # https://2.python-requests.org/en/master/user/advanced/#streaming-uploads + # + # Even though we presign with the correct content-type, + # the client still has to specify it when uploading the file. + with open(filepath, "rb") as f: + pf = TqdmProgressFileReader(f) + + r = requests.put(urls.write, data=f, headers={ + "content-type": urls.type, + }) + r.raise_for_status() + pf.close() + return urls.access + + def list_objs(self, token): + # type: (...) -> List[S3Obj] + """ + Call HF API to list all stored files for user. + """ + path = "{}/api/listObjs".format(self.endpoint) + r = requests.get(path, headers={"authorization": "Bearer {}".format(token)}) + r.raise_for_status() + d = r.json() + return [S3Obj(**x) for x in d] + + + +class TqdmProgressFileReader: + """ + Wrap an io.BufferedReader `f` (such as the output of `open(…, "rb")`) + and override `f.read()` so as to display a tqdm progress bar. + + see github.com/huggingface/transformers/pull/2078#discussion_r354739608 + for implementation details. + """ + def __init__( + self, + f # type: io.BufferedReader + ): + self.f = f + self.total_size = os.fstat(f.fileno()).st_size # type: int + self.pbar = tqdm(total=self.total_size, leave=False) + if six.PY3: + # does not work unless PY3 + # no big deal as the CLI does not currently support PY2 anyways. + self.read = f.read + f.read = self._read + + def _read(self, n=-1): + self.pbar.update(n) + return self.read(n) + + def close(self): + self.pbar.close() + + + +class HfFolder: + path_token = expanduser("~/.huggingface/token") + + @classmethod + def save_token(cls, token): + """ + Save token, creating folder as needed. + """ + if six.PY3: + os.makedirs(os.path.dirname(cls.path_token), exist_ok=True) + else: + # Python 2 + try: + os.makedirs(os.path.dirname(cls.path_token)) + except OSError as e: + if e.errno != os.errno.EEXIST: + raise e + pass + with open(cls.path_token, 'w+') as f: + f.write(token) + + @classmethod + def get_token(cls): + """ + Get token or None if not existent. + """ + try: + with open(cls.path_token, 'r') as f: + return f.read() + except: + # this is too wide. When Py2 is dead use: + # `except FileNotFoundError:` instead + return None + + @classmethod + def delete_token(cls): + """ + Delete token. + Do not fail if token does not exist. + """ + try: + os.remove(cls.path_token) + except: + return diff --git a/transformers/modeling_albert.py b/transformers/modeling_albert.py new file mode 100644 index 00000000000..0f67bf8f360 --- /dev/null +++ b/transformers/modeling_albert.py @@ -0,0 +1,801 @@ + +# coding=utf-8 +# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch ALBERT model. """ + +import os +import math +import logging +import torch +import torch.nn as nn +from torch.nn import CrossEntropyLoss, MSELoss +from transformers.modeling_utils import PreTrainedModel +from transformers.configuration_albert import AlbertConfig +from transformers.modeling_bert import BertEmbeddings, BertSelfAttention, prune_linear_layer, ACT2FN +from .file_utils import add_start_docstrings + +logger = logging.getLogger(__name__) + + +ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP = { + 'albert-base-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-pytorch_model.bin", + 'albert-large-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-pytorch_model.bin", + 'albert-xlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-pytorch_model.bin", + 'albert-xxlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-pytorch_model.bin", + 'albert-base-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-pytorch_model.bin", + 'albert-large-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-pytorch_model.bin", + 'albert-xlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-pytorch_model.bin", + 'albert-xxlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-pytorch_model.bin", +} + + +def load_tf_weights_in_albert(model, config, tf_checkpoint_path): + """ Load tf checkpoints in a pytorch model.""" + try: + import re + import numpy as np + import tensorflow as tf + except ImportError: + logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions.") + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info("Loading TF weight {} with shape {}".format(name, shape)) + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + print(name) + + for name, array in zip(names, arrays): + original_name = name + + # If saved from the TF HUB module + name = name.replace("module/", "") + + # Renaming and simplifying + name = name.replace("ffn_1", "ffn") + name = name.replace("bert/", "albert/") + name = name.replace("attention_1", "attention") + name = name.replace("transform/", "") + name = name.replace("LayerNorm_1", "full_layer_layer_norm") + name = name.replace("LayerNorm", "attention/LayerNorm") + name = name.replace("transformer/", "") + + # The feed forward layer had an 'intermediate' step which has been abstracted away + name = name.replace("intermediate/dense/", "") + name = name.replace("ffn/intermediate/output/dense/", "ffn_output/") + + # ALBERT attention was split between self and output which have been abstracted away + name = name.replace("/output/", "/") + name = name.replace("/self/", "/") + + # The pooler is a linear layer + name = name.replace("pooler/dense", "pooler") + + # The classifier was simplified to predictions from cls/predictions + name = name.replace("cls/predictions", "predictions") + name = name.replace("predictions/attention", "predictions") + + # Naming was changed to be more explicit + name = name.replace("embeddings/attention", "embeddings") + name = name.replace("inner_group_", "albert_layers/") + name = name.replace("group_", "albert_layer_groups/") + + # Classifier + if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name): + name = "classifier/" + name + + # No ALBERT model currently handles the next sentence prediction task + if "seq_relationship" in name: + continue + + name = name.split('/') + + # Ignore the gradients applied by the LAMB/ADAM optimizers. + if "adam_m" in name or "adam_v" in name or "global_step" in name: + logger.info("Skipping {}".format("/".join(name))) + continue + + pointer = model + for m_name in name: + if re.fullmatch(r'[A-Za-z]+_\d+', m_name): + l = re.split(r'_(\d+)', m_name) + else: + l = [m_name] + + if l[0] == 'kernel' or l[0] == 'gamma': + pointer = getattr(pointer, 'weight') + elif l[0] == 'output_bias' or l[0] == 'beta': + pointer = getattr(pointer, 'bias') + elif l[0] == 'output_weights': + pointer = getattr(pointer, 'weight') + elif l[0] == 'squad': + pointer = getattr(pointer, 'classifier') + else: + try: + pointer = getattr(pointer, l[0]) + except AttributeError: + logger.info("Skipping {}".format("/".join(name))) + continue + if len(l) >= 2: + num = int(l[1]) + pointer = pointer[num] + + if m_name[-11:] == '_embeddings': + pointer = getattr(pointer, 'weight') + elif m_name == 'kernel': + array = np.transpose(array) + try: + assert pointer.shape == array.shape + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + print("Initialize PyTorch weight {} from {}".format(name, original_name)) + pointer.data = torch.from_numpy(array) + + return model + + +class AlbertEmbeddings(BertEmbeddings): + """ + Construct the embeddings from word, position and token_type embeddings. + """ + def __init__(self, config): + super(AlbertEmbeddings, self).__init__(config) + + self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size) + self.LayerNorm = torch.nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps) + + +class AlbertAttention(BertSelfAttention): + def __init__(self, config): + super(AlbertAttention, self).__init__(config) + + self.output_attentions = config.output_attentions + self.num_attention_heads = config.num_attention_heads + self.hidden_size = config.hidden_size + self.attention_head_size = config.hidden_size // config.num_attention_heads + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + mask = torch.ones(self.num_attention_heads, self.attention_head_size) + heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads + for head in heads: + # Compute how many pruned heads are before the head and move the index accordingly + head = head - sum(1 if h < head else 0 for h in self.pruned_heads) + mask[head] = 0 + mask = mask.view(-1).contiguous().eq(1) + index = torch.arange(len(mask))[mask].long() + + # Prune linear layers + self.query = prune_linear_layer(self.query, index) + self.key = prune_linear_layer(self.key, index) + self.value = prune_linear_layer(self.value, index) + self.dense = prune_linear_layer(self.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.num_attention_heads = self.num_attention_heads - len(heads) + self.all_head_size = self.attention_head_size * self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward(self, input_ids, attention_mask=None, head_mask=None): + mixed_query_layer = self.query(input_ids) + mixed_key_layer = self.key(input_ids) + mixed_value_layer = self.value(input_ids) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + reshaped_context_layer = context_layer.view(*new_context_layer_shape) + + + # Should find a better way to do this + w = self.dense.weight.t().view(self.num_attention_heads, self.attention_head_size, self.hidden_size).to(context_layer.dtype) + b = self.dense.bias.to(context_layer.dtype) + + projected_context_layer = torch.einsum("bfnd,ndh->bfh", context_layer, w) + b + projected_context_layer_dropout = self.dropout(projected_context_layer) + layernormed_context_layer = self.LayerNorm(input_ids + projected_context_layer_dropout) + return (layernormed_context_layer, attention_probs) if self.output_attentions else (layernormed_context_layer,) + + +class AlbertLayer(nn.Module): + def __init__(self, config): + super(AlbertLayer, self).__init__() + + self.config = config + self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.attention = AlbertAttention(config) + self.ffn = nn.Linear(config.hidden_size, config.intermediate_size) + self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size) + self.activation = ACT2FN[config.hidden_act] + + def forward(self, hidden_states, attention_mask=None, head_mask=None): + attention_output = self.attention(hidden_states, attention_mask, head_mask) + ffn_output = self.ffn(attention_output[0]) + ffn_output = self.activation(ffn_output) + ffn_output = self.ffn_output(ffn_output) + hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0]) + + return (hidden_states,) + attention_output[1:] # add attentions if we output them + + +class AlbertLayerGroup(nn.Module): + def __init__(self, config): + super(AlbertLayerGroup, self).__init__() + + self.output_attentions = config.output_attentions + self.output_hidden_states = config.output_hidden_states + self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)]) + + def forward(self, hidden_states, attention_mask=None, head_mask=None): + layer_hidden_states = () + layer_attentions = () + + for layer_index, albert_layer in enumerate(self.albert_layers): + layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index]) + hidden_states = layer_output[0] + + if self.output_attentions: + layer_attentions = layer_attentions + (layer_output[1],) + + if self.output_hidden_states: + layer_hidden_states = layer_hidden_states + (hidden_states,) + + outputs = (hidden_states,) + if self.output_hidden_states: + outputs = outputs + (layer_hidden_states,) + if self.output_attentions: + outputs = outputs + (layer_attentions,) + return outputs # last-layer hidden state, (layer hidden states), (layer attentions) + + +class AlbertTransformer(nn.Module): + def __init__(self, config): + super(AlbertTransformer, self).__init__() + + self.config = config + self.output_attentions = config.output_attentions + self.output_hidden_states = config.output_hidden_states + self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size) + self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)]) + + def forward(self, hidden_states, attention_mask=None, head_mask=None): + hidden_states = self.embedding_hidden_mapping_in(hidden_states) + + all_attentions = () + + if self.output_hidden_states: + all_hidden_states = (hidden_states,) + + for i in range(self.config.num_hidden_layers): + # Number of layers in a hidden group + layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups) + + # Index of the hidden group + group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups)) + + # Index of the layer inside the group + layer_idx = int(i - group_idx * layers_per_group) + + layer_group_output = self.albert_layer_groups[group_idx](hidden_states, attention_mask, head_mask[group_idx*layers_per_group:(group_idx+1)*layers_per_group]) + hidden_states = layer_group_output[0] + + if self.output_attentions: + all_attentions = all_attentions + layer_group_output[-1] + + if self.output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + + outputs = (hidden_states,) + if self.output_hidden_states: + outputs = outputs + (all_hidden_states,) + if self.output_attentions: + outputs = outputs + (all_attentions,) + return outputs # last-layer hidden state, (all hidden states), (all attentions) + + + +class AlbertPreTrainedModel(PreTrainedModel): + """ An abstract class to handle weights initialization and + a simple interface for dowloading and loading pretrained models. + """ + config_class = AlbertConfig + pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP + base_model_prefix = "albert" + + def _init_weights(self, module): + """ Initialize the weights. + """ + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if isinstance(module, (nn.Linear)) and module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +ALBERT_START_DOCSTRING = r""" The ALBERT model was proposed in + `ALBERT: A Lite BERT for Self-supervised Learning of Language Representations`_ + by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut. It presents + two parameter-reduction techniques to lower memory consumption and increase the trainig speed of BERT. + + This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and + refer to the PyTorch documentation for all matter related to general usage and behavior. + + .. _`ALBERT: A Lite BERT for Self-supervised Learning of Language Representations`: + https://arxiv.org/abs/1909.11942 + + .. _`torch.nn.Module`: + https://pytorch.org/docs/stable/nn.html#module + + Parameters: + config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the configuration. + Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. +""" + +ALBERT_INPUTS_DOCSTRING = r""" + Inputs: + **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Indices of input sequence tokens in the vocabulary. + To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows: + + (a) For sequence pairs: + + ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` + + ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` + + (b) For single sequences: + + ``tokens: [CLS] the dog is hairy . [SEP]`` + + ``token_type_ids: 0 0 0 0 0 0 0`` + + Albert is a model with absolute position embeddings so it's usually advised to pad the inputs on + the right rather than the left. + + Indices can be obtained using :class:`transformers.AlbertTokenizer`. + See :func:`transformers.PreTrainedTokenizer.encode` and + :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. + **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: + Mask to avoid performing attention on padding token indices. + Mask values selected in ``[0, 1]``: + ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. + **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Segment token indices to indicate first and second portions of the inputs. + Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` + corresponds to a `sentence B` token + (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). + **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Indices of positions of each input sequence tokens in the position embeddings. + Selected in the range ``[0, config.max_position_embeddings - 1]``. + **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: + Mask to nullify selected heads of the self-attention modules. + Mask values selected in ``[0, 1]``: + ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. +""" + +@add_start_docstrings("The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.", + ALBERT_START_DOCSTRING, ALBERT_INPUTS_DOCSTRING) +class AlbertModel(AlbertPreTrainedModel): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` + Sequence of hidden-states at the output of the last layer of the model. + **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)`` + Last layer hidden-state of the first token of the sequence (classification token) + further processed by a Linear layer and a Tanh activation function. The Linear + layer weights are trained from the next sentence prediction (classification) + objective during Bert pretraining. This output is usually *not* a good summary + of the semantic content of the input, you're often better with averaging or pooling + the sequence of hidden-states for the whole input sequence. + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + """ + + config_class = AlbertConfig + pretrained_model_archive_map = ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP + load_tf_weights = load_tf_weights_in_albert + base_model_prefix = "albert" + + def __init__(self, config): + super(AlbertModel, self).__init__(config) + + self.config = config + self.embeddings = AlbertEmbeddings(config) + self.encoder = AlbertTransformer(config) + self.pooler = nn.Linear(config.hidden_size, config.hidden_size) + self.pooler_activation = nn.Tanh() + + self.init_weights() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _resize_token_embeddings(self, new_num_tokens): + old_embeddings = self.embeddings.word_embeddings + new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) + self.embeddings.word_embeddings = new_embeddings + return self.embeddings.word_embeddings + + def _prune_heads(self, heads_to_prune): + """ Prunes heads of the model. + heads_to_prune: dict of {layer_num: list of heads to prune in this layer} + ALBERT has a different architecture in that its layers are shared across groups, which then has inner groups. + If an ALBERT model has 12 hidden layers and 2 hidden groups, with two inner groups, there + is a total of 4 different layers. + + These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer, + while [2,3] correspond to the two inner groups of the second hidden layer. + + Any layer with in index other than [0,1,2,3] will result in an error. + See base class PreTrainedModel for more information about head pruning + """ + for layer, heads in heads_to_prune.items(): + group_idx = int(layer / self.config.inner_group_num) + inner_group_idx = int(layer - group_idx * self.config.inner_group_num) + self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads) + + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, + inputs_embeds=None): + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if attention_mask is None: + attention_mask = torch.ones(input_shape, device=device) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + if head_mask is not None: + if head_mask.dim() == 1: + head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) + head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) + elif head_mask.dim() == 2: + head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer + head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility + else: + head_mask = [None] * self.config.num_hidden_layers + + embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds) + encoder_outputs = self.encoder(embedding_output, + extended_attention_mask, + head_mask=head_mask) + + sequence_output = encoder_outputs[0] + + pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) + + outputs = (sequence_output, pooled_output) + encoder_outputs[1:] # add hidden_states and attentions if they are here + return outputs + +class AlbertMLMHead(nn.Module): + def __init__(self, config): + super(AlbertMLMHead, self).__init__() + + self.LayerNorm = nn.LayerNorm(config.embedding_size) + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + self.dense = nn.Linear(config.hidden_size, config.embedding_size) + self.decoder = nn.Linear(config.embedding_size, config.vocab_size) + self.activation = ACT2FN[config.hidden_act] + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.activation(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + hidden_states = self.decoder(hidden_states) + + prediction_scores = hidden_states + self.bias + + return prediction_scores + + +@add_start_docstrings("Bert Model with a `language modeling` head on top.", ALBERT_START_DOCSTRING, ALBERT_INPUTS_DOCSTRING) +class AlbertForMaskedLM(AlbertPreTrainedModel): + r""" + **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Labels for computing the masked language modeling loss. + Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) + Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels + in ``[0, ..., config.vocab_size]`` + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Masked language modeling loss. + **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + """ + + def __init__(self, config): + super(AlbertForMaskedLM, self).__init__(config) + + self.albert = AlbertModel(config) + self.predictions = AlbertMLMHead(config) + + self.init_weights() + self.tie_weights() + + def tie_weights(self): + """ Make sure we are sharing the input and output embeddings. + Export to TorchScript can't handle parameter sharing so we are cloning them instead. + """ + self._tie_or_clone_weights(self.predictions.decoder, + self.albert.embeddings.word_embeddings) + + def get_output_embeddings(self): + return self.predictions.decoder + + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, + masked_lm_labels=None): + outputs = self.albert( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds + ) + sequence_outputs = outputs[0] + + prediction_scores = self.predictions(sequence_outputs) + + outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here + if masked_lm_labels is not None: + loss_fct = CrossEntropyLoss(ignore_index=-1) + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) + outputs = (masked_lm_loss,) + outputs + + return outputs + + +@add_start_docstrings("""Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of + the pooled output) e.g. for GLUE tasks. """, + ALBERT_START_DOCSTRING, ALBERT_INPUTS_DOCSTRING) +class AlbertForSequenceClassification(AlbertPreTrainedModel): + r""" + **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: + Labels for computing the sequence classification/regression loss. + Indices should be in ``[0, ..., config.num_labels - 1]``. + If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), + If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Classification (or regression if config.num_labels==1) loss. + **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` + Classification (or regression if config.num_labels==1) scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') + model = AlbertForSequenceClassification.from_pretrained('albert-base-v2') + input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 + outputs = model(input_ids, labels=labels) + loss, logits = outputs[:2] + + """ + def __init__(self, config): + super(AlbertForSequenceClassification, self).__init__(config) + self.num_labels = config.num_labels + + self.albert = AlbertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) + + self.init_weights() + + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, + position_ids=None, head_mask=None, inputs_embeds=None, labels=None): + + outputs = self.albert( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + + if labels is not None: + if self.num_labels == 1: + # We are doing regression + loss_fct = MSELoss() + loss = loss_fct(logits.view(-1), labels.view(-1)) + else: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + outputs = (loss,) + outputs + + return outputs # (loss), logits, (hidden_states), (attentions) + + + +@add_start_docstrings("""Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of + the hidden-states output to compute `span start logits` and `span end logits`). """, + ALBERT_START_DOCSTRING, ALBERT_INPUTS_DOCSTRING) +class AlbertForQuestionAnswering(AlbertPreTrainedModel): + r""" + **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). + Position outside of the sequence are not taken into account for computing the loss. + **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). + Position outside of the sequence are not taken into account for computing the loss. + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. + **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` + Span-start scores (before SoftMax). + **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` + Span-end scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') + model = AlbertForQuestionAnswering.from_pretrained('albert-base-v2') + question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" + input_text = "[CLS] " + question + " [SEP] " + text + " [SEP]" + input_ids = tokenizer.encode(input_text) + token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))] + start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids])) + all_tokens = tokenizer.convert_ids_to_tokens(input_ids) + print(' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])) + # a nice puppet + + + """ + def __init__(self, config): + super(AlbertForQuestionAnswering, self).__init__(config) + self.num_labels = config.num_labels + + self.albert = AlbertModel(config) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + self.init_weights() + + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, + inputs_embeds=None, start_positions=None, end_positions=None): + + outputs = self.albert( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1) + end_logits = end_logits.squeeze(-1) + + outputs = (start_logits, end_logits,) + outputs[2:] + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions.clamp_(0, ignored_index) + end_positions.clamp_(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + outputs = (total_loss,) + outputs + + return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions) diff --git a/transformers/modeling_auto.py b/transformers/modeling_auto.py index d98110d4bd9..96f45d8ec44 100644 --- a/transformers/modeling_auto.py +++ b/transformers/modeling_auto.py @@ -27,6 +27,8 @@ from .modeling_xlnet import XLNetModel, XLNetLMHeadModel, XLNetForSequenceClassi from .modeling_xlm import XLMModel, XLMWithLMHeadModel, XLMForSequenceClassification, XLMForQuestionAnswering from .modeling_roberta import RobertaModel, RobertaForMaskedLM, RobertaForSequenceClassification from .modeling_distilbert import DistilBertModel, DistilBertForQuestionAnswering, DistilBertForMaskedLM, DistilBertForSequenceClassification +from .modeling_camembert import CamembertModel, CamembertForMaskedLM, CamembertForSequenceClassification, CamembertForMultipleChoice +from .modeling_albert import AlbertModel, AlbertForMaskedLM, AlbertForSequenceClassification, AlbertForQuestionAnswering from .modeling_utils import PreTrainedModel, SequenceSummary @@ -48,14 +50,16 @@ class AutoModel(object): The base model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: DistilBertModel (DistilBERT model) + - contains `albert`: AlbertModel (ALBERT model) + - contains `camembert`: CamembertModel (CamemBERT model) - contains `roberta`: RobertaModel (RoBERTa model) - contains `bert`: BertModel (Bert model) - contains `openai-gpt`: OpenAIGPTModel (OpenAI GPT model) - contains `gpt2`: GPT2Model (OpenAI GPT-2 model) - - contains `ctrl`: CTRLModel (Salesforce CTRL model) - contains `transfo-xl`: TransfoXLModel (Transformer-XL model) - contains `xlnet`: XLNetModel (XLNet model) - contains `xlm`: XLMModel (XLM model) + - contains `ctrl`: CTRLModel (Salesforce CTRL model) This class cannot be instantiated using `__init__()` (throws an error). """ @@ -71,14 +75,16 @@ class AutoModel(object): The model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: DistilBertModel (DistilBERT model) + - contains `albert`: AlbertModel (ALBERT model) + - contains `camembert`: CamembertModel (CamemBERT model) - contains `roberta`: RobertaModel (RoBERTa model) - contains `bert`: BertModel (Bert model) - contains `openai-gpt`: OpenAIGPTModel (OpenAI GPT model) - contains `gpt2`: GPT2Model (OpenAI GPT-2 model) - - contains `ctrl`: CTRLModel (Salesforce CTRL model) - contains `transfo-xl`: TransfoXLModel (Transformer-XL model) - contains `xlnet`: XLNetModel (XLNet model) - contains `xlm`: XLMModel (XLM model) + - contains `ctrl`: CTRLModel (Salesforce CTRL model) The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with `model.train()` @@ -87,6 +93,7 @@ class AutoModel(object): pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. @@ -112,6 +119,9 @@ class AutoModel(object): force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. + resume_download: (`optional`) boolean, default False: + Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. + proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. @@ -138,6 +148,10 @@ class AutoModel(object): """ if 'distilbert' in pretrained_model_name_or_path: return DistilBertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) + elif 'albert' in pretrained_model_name_or_path: + return AlbertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) + elif 'camembert' in pretrained_model_name_or_path: + return CamembertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'roberta' in pretrained_model_name_or_path: return RobertaModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'bert' in pretrained_model_name_or_path: @@ -156,7 +170,7 @@ class AutoModel(object): return CTRLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) raise ValueError("Unrecognized model identifier in {}. Should contains one of " "'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', " - "'xlm', 'roberta, 'ctrl'".format(pretrained_model_name_or_path)) + "'xlm', 'roberta, 'ctrl', 'distilbert', 'camembert', 'albert'".format(pretrained_model_name_or_path)) class AutoModelWithLMHead(object): @@ -172,14 +186,16 @@ class AutoModelWithLMHead(object): The model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: DistilBertForMaskedLM (DistilBERT model) + - contains `albert`: AlbertForMaskedLM (ALBERT model) + - contains `camembert`: CamembertForMaskedLM (CamemBERT model) - contains `roberta`: RobertaForMaskedLM (RoBERTa model) - contains `bert`: BertForMaskedLM (Bert model) - contains `openai-gpt`: OpenAIGPTLMHeadModel (OpenAI GPT model) - contains `gpt2`: GPT2LMHeadModel (OpenAI GPT-2 model) - - contains `ctrl`: CTRLLMModel (Salesforce CTRL model) - contains `transfo-xl`: TransfoXLLMHeadModel (Transformer-XL model) - contains `xlnet`: XLNetLMHeadModel (XLNet model) - contains `xlm`: XLMWithLMHeadModel (XLM model) + - contains `ctrl`: CTRLLMHeadModel (Salesforce CTRL model) This class cannot be instantiated using `__init__()` (throws an error). """ @@ -198,6 +214,8 @@ class AutoModelWithLMHead(object): The model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: DistilBertForMaskedLM (DistilBERT model) + - contains `albert`: AlbertForMaskedLM (ALBERT model) + - contains `camembert`: CamembertForMaskedLM (CamemBERT model) - contains `roberta`: RobertaForMaskedLM (RoBERTa model) - contains `bert`: BertForMaskedLM (Bert model) - contains `openai-gpt`: OpenAIGPTLMHeadModel (OpenAI GPT model) @@ -205,6 +223,7 @@ class AutoModelWithLMHead(object): - contains `transfo-xl`: TransfoXLLMHeadModel (Transformer-XL model) - contains `xlnet`: XLNetLMHeadModel (XLNet model) - contains `xlm`: XLMWithLMHeadModel (XLM model) + - contains `ctrl`: CTRLLMHeadModel (Salesforce CTRL model) The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) To train the model, you should first set it back in training mode with `model.train()` @@ -213,6 +232,7 @@ class AutoModelWithLMHead(object): pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. @@ -237,6 +257,8 @@ class AutoModelWithLMHead(object): force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. + resume_download: (`optional`) boolean, default False: + Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. @@ -264,6 +286,10 @@ class AutoModelWithLMHead(object): """ if 'distilbert' in pretrained_model_name_or_path: return DistilBertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) + elif 'albert' in pretrained_model_name_or_path: + return AlbertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) + elif 'camembert' in pretrained_model_name_or_path: + return CamembertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'roberta' in pretrained_model_name_or_path: return RobertaForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'bert' in pretrained_model_name_or_path: @@ -282,7 +308,7 @@ class AutoModelWithLMHead(object): return CTRLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) raise ValueError("Unrecognized model identifier in {}. Should contains one of " "'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', " - "'xlm', 'roberta','ctrl'".format(pretrained_model_name_or_path)) + "'xlm', 'roberta','ctrl', 'distilbert', 'camembert', 'albert'".format(pretrained_model_name_or_path)) class AutoModelForSequenceClassification(object): @@ -298,6 +324,8 @@ class AutoModelForSequenceClassification(object): The model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: DistilBertForSequenceClassification (DistilBERT model) + - contains `albert`: AlbertForSequenceClassification (ALBERT model) + - contains `camembert`: CamembertForSequenceClassification (CamemBERT model) - contains `roberta`: RobertaForSequenceClassification (RoBERTa model) - contains `bert`: BertForSequenceClassification (Bert model) - contains `xlnet`: XLNetForSequenceClassification (XLNet model) @@ -320,6 +348,8 @@ class AutoModelForSequenceClassification(object): The model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: DistilBertForSequenceClassification (DistilBERT model) + - contains `albert`: AlbertForSequenceClassification (ALBERT model) + - contains `camembert`: CamembertForSequenceClassification (CamemBERT model) - contains `roberta`: RobertaForSequenceClassification (RoBERTa model) - contains `bert`: BertForSequenceClassification (Bert model) - contains `xlnet`: XLNetForSequenceClassification (XLNet model) @@ -332,6 +362,7 @@ class AutoModelForSequenceClassification(object): pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. @@ -357,6 +388,9 @@ class AutoModelForSequenceClassification(object): force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. + resume_download: (`optional`) boolean, default False: + Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. + proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. @@ -383,6 +417,10 @@ class AutoModelForSequenceClassification(object): """ if 'distilbert' in pretrained_model_name_or_path: return DistilBertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) + elif 'albert' in pretrained_model_name_or_path: + return AlbertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) + elif 'camembert' in pretrained_model_name_or_path: + return CamembertForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'roberta' in pretrained_model_name_or_path: return RobertaForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'bert' in pretrained_model_name_or_path: @@ -393,7 +431,7 @@ class AutoModelForSequenceClassification(object): return XLMForSequenceClassification.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) raise ValueError("Unrecognized model identifier in {}. Should contains one of " - "'bert', 'xlnet', 'xlm', 'roberta'".format(pretrained_model_name_or_path)) + "'bert', 'xlnet', 'xlm', 'roberta', 'distilbert', 'camembert', 'albert'".format(pretrained_model_name_or_path)) class AutoModelForQuestionAnswering(object): @@ -409,6 +447,7 @@ class AutoModelForQuestionAnswering(object): The model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: DistilBertForQuestionAnswering (DistilBERT model) + - contains `albert`: AlbertForQuestionAnswering (ALBERT model) - contains `bert`: BertForQuestionAnswering (Bert model) - contains `xlnet`: XLNetForQuestionAnswering (XLNet model) - contains `xlm`: XLMForQuestionAnswering (XLM model) @@ -430,6 +469,7 @@ class AutoModelForQuestionAnswering(object): The model class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: DistilBertForQuestionAnswering (DistilBERT model) + - contains `albert`: AlbertForQuestionAnswering (ALBERT model) - contains `bert`: BertForQuestionAnswering (Bert model) - contains `xlnet`: XLNetForQuestionAnswering (XLNet model) - contains `xlm`: XLMForQuestionAnswering (XLM model) @@ -441,6 +481,7 @@ class AutoModelForQuestionAnswering(object): pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. @@ -492,6 +533,8 @@ class AutoModelForQuestionAnswering(object): """ if 'distilbert' in pretrained_model_name_or_path: return DistilBertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) + elif 'albert' in pretrained_model_name_or_path: + return AlbertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'bert' in pretrained_model_name_or_path: return BertForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif 'xlnet' in pretrained_model_name_or_path: @@ -500,4 +543,4 @@ class AutoModelForQuestionAnswering(object): return XLMForQuestionAnswering.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) raise ValueError("Unrecognized model identifier in {}. Should contains one of " - "'bert', 'xlnet', 'xlm'".format(pretrained_model_name_or_path)) + "'bert', 'xlnet', 'xlm', 'distilbert', 'albert'".format(pretrained_model_name_or_path)) diff --git a/transformers/modeling_bert.py b/transformers/modeling_bert.py index 8c92241fa22..d0f35272ac0 100644 --- a/transformers/modeling_bert.py +++ b/transformers/modeling_bert.py @@ -17,12 +17,10 @@ from __future__ import absolute_import, division, print_function, unicode_literals -import json import logging import math import os import sys -from io import open import torch from torch import nn @@ -50,8 +48,13 @@ BERT_PRETRAINED_MODEL_ARCHIVE_MAP = { 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin", 'bert-base-german-dbmdz-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-pytorch_model.bin", 'bert-base-german-dbmdz-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-pytorch_model.bin", + 'bert-base-japanese': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-pytorch_model.bin", + 'bert-base-japanese-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking-pytorch_model.bin", + 'bert-base-japanese-char': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-pytorch_model.bin", + 'bert-base-japanese-char-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking-pytorch_model.bin" } + def load_tf_weights_in_bert(model, config, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model. """ @@ -127,21 +130,28 @@ def gelu(x): """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) + def gelu_new(x): """ Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT). Also see https://arxiv.org/abs/1606.08415 """ return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) + def swish(x): return x * torch.sigmoid(x) -ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new} +def mish(x): + return x * torch.tanh(nn.functional.softplus(x)) + + +ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new, "mish": mish} BertLayerNorm = torch.nn.LayerNorm + class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ @@ -156,19 +166,26 @@ class BertEmbeddings(nn.Module): self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) - def forward(self, input_ids, token_type_ids=None, position_ids=None): - seq_length = input_ids.size(1) - if position_ids is None: - position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) - position_ids = position_ids.unsqueeze(0).expand_as(input_ids) - if token_type_ids is None: - token_type_ids = torch.zeros_like(input_ids) + def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] - words_embeddings = self.word_embeddings(input_ids) + seq_length = input_shape[1] + device = input_ids.device if input_ids is not None else inputs_embeds.device + if position_ids is None: + position_ids = torch.arange(seq_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0).expand(input_shape) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) - embeddings = words_embeddings + position_embeddings + token_type_embeddings + embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings @@ -198,10 +215,19 @@ class BertSelfAttention(nn.Module): x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) - def forward(self, hidden_states, attention_mask=None, head_mask=None): + def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None): mixed_query_layer = self.query(hidden_states) - mixed_key_layer = self.key(hidden_states) - mixed_value_layer = self.value(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + if encoder_hidden_states is not None: + mixed_key_layer = self.key(encoder_hidden_states) + mixed_value_layer = self.value(encoder_hidden_states) + attention_mask = encoder_attention_mask + else: + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) @@ -260,7 +286,7 @@ class BertAttention(nn.Module): if len(heads) == 0: return mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size) - heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads + heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in self.pruned_heads) @@ -279,9 +305,9 @@ class BertAttention(nn.Module): self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) - def forward(self, input_tensor, attention_mask=None, head_mask=None): - self_outputs = self.self(input_tensor, attention_mask, head_mask) - attention_output = self.output(self_outputs[0], input_tensor) + def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None): + self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask) + attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs @@ -319,15 +345,25 @@ class BertLayer(nn.Module): def __init__(self, config): super(BertLayer, self).__init__() self.attention = BertAttention(config) + self.is_decoder = config.is_decoder + if self.is_decoder: + self.crossattention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) - def forward(self, hidden_states, attention_mask=None, head_mask=None): - attention_outputs = self.attention(hidden_states, attention_mask, head_mask) - attention_output = attention_outputs[0] + def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None): + self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + if self.is_decoder and encoder_hidden_states is not None: + cross_attention_outputs = self.crossattention(attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights + intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) - outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them + outputs = (layer_output,) + outputs return outputs @@ -338,14 +374,14 @@ class BertEncoder(nn.Module): self.output_hidden_states = config.output_hidden_states self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) - def forward(self, hidden_states, attention_mask=None, head_mask=None): + def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None): all_hidden_states = () all_attentions = () for i, layer_module in enumerate(self.layer): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) - layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i]) + layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask) hidden_states = layer_outputs[0] if self.output_attentions: @@ -484,7 +520,7 @@ BERT_START_DOCSTRING = r""" The BERT model was proposed in https://pytorch.org/docs/stable/nn.html#module Parameters: - config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. + config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ @@ -498,13 +534,13 @@ BERT_INPUTS_DOCSTRING = r""" (a) For sequence pairs: ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` - + ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` (b) For single sequences: ``tokens: [CLS] the dog is hairy . [SEP]`` - + ``token_type_ids: 0 0 0 0 0 0 0`` Bert is a model with absolute position embeddings so it's usually advised to pad the inputs on @@ -529,6 +565,18 @@ BERT_INPUTS_DOCSTRING = r""" Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + **encoder_hidden_states**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``: + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model + is configured as a decoder. + **encoder_attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: + Mask to avoid performing attention on the padding token indices of the encoder input. This mask + is used in the cross-attention if the model is configured as a decoder. + Mask values selected in ``[0, 1]``: + ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. """ @add_start_docstrings("The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", @@ -557,13 +605,14 @@ class BertModel(BertPreTrainedModel): tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained('bert-base-uncased') - input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ def __init__(self, config): super(BertModel, self).__init__(config) + self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) @@ -571,12 +620,12 @@ class BertModel(BertPreTrainedModel): self.init_weights() - def _resize_token_embeddings(self, new_num_tokens): - old_embeddings = self.embeddings.word_embeddings - new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) - self.embeddings.word_embeddings = new_embeddings + def get_input_embeddings(self): return self.embeddings.word_embeddings + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} @@ -585,27 +634,87 @@ class BertModel(BertPreTrainedModel): for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) - def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None): - if attention_mask is None: - attention_mask = torch.ones_like(input_ids) - if token_type_ids is None: - token_type_ids = torch.zeros_like(input_ids) + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, + head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None): + """ Forward pass on the Model. - # We create a 3D attention mask from a 2D tensor mask. - # Sizes are [batch_size, 1, 1, to_seq_length] - # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] - # this attention mask is more simple than the triangular masking of causal attention - # used in OpenAI GPT, we just need to prepare the broadcast dimension here. - extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + The model can behave as an encoder (with only self-attention) as well + as a decoder, in which case a layer of cross-attention is added between + the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani, + Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the + `is_decoder` argument of the configuration set to `True`; an + `encoder_hidden_states` is expected as an input to the forward pass. + + .. _`Attention is all you need`: + https://arxiv.org/abs/1706.03762 + + """ + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if attention_mask is None: + attention_mask = torch.ones(input_shape, device=device) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder: + batch_size, seq_length = input_shape + seq_ids = torch.arange(seq_length, device=device) + causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] + causal_mask = causal_mask.to(torch.long) # not converting to long will cause errors with pytorch version < 1.3 + extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError("Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(input_shape, attention_mask.shape)) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. - extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility + extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + # If a 2D ou 3D attention mask is provided for the cross-attention + # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + + if encoder_attention_mask.dim() == 3: + encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] + elif encoder_attention_mask.dim() == 2: + encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] + else: + raise ValueError("Wrong shape for encoder_hidden_shape (shape {}) or encoder_attention_mask (shape {})".format(encoder_hidden_shape, + encoder_attention_mask.shape)) + + encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility + encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 + else: + encoder_extended_attention_mask = None + # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N @@ -617,14 +726,16 @@ class BertModel(BertPreTrainedModel): head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer - head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility + head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility else: head_mask = [None] * self.config.num_hidden_layers - embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids) + embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder(embedding_output, - extended_attention_mask, - head_mask=head_mask) + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) @@ -633,8 +744,9 @@ class BertModel(BertPreTrainedModel): @add_start_docstrings("""Bert Model with two heads on top as done during the pre-training: - a `masked language modeling` head and a `next sentence prediction (classification)` head. """, - BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) + a `masked language modeling` head and a `next sentence prediction (classification)` head. """, + BERT_START_DOCSTRING, + BERT_INPUTS_DOCSTRING) class BertForPreTraining(BertPreTrainedModel): r""" **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: @@ -667,7 +779,7 @@ class BertForPreTraining(BertPreTrainedModel): tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForPreTraining.from_pretrained('bert-base-uncased') - input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 outputs = model(input_ids) prediction_scores, seq_relationship_scores = outputs[:2] @@ -679,23 +791,19 @@ class BertForPreTraining(BertPreTrainedModel): self.cls = BertPreTrainingHeads(config) self.init_weights() - self.tie_weights() - def tie_weights(self): - """ Make sure we are sharing the input and output embeddings. - Export to TorchScript can't handle parameter sharing so we are cloning them instead. - """ - self._tie_or_clone_weights(self.cls.predictions.decoder, - self.bert.embeddings.word_embeddings) + def get_output_embeddings(self): + return self.cls.predictions.decoder - def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, masked_lm_labels=None, next_sentence_label=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask) + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) @@ -713,7 +821,8 @@ class BertForPreTraining(BertPreTrainedModel): @add_start_docstrings("""Bert Model with a `language modeling` head on top. """, - BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) + BERT_START_DOCSTRING, + BERT_INPUTS_DOCSTRING) class BertForMaskedLM(BertPreTrainedModel): r""" **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: @@ -721,10 +830,17 @@ class BertForMaskedLM(BertPreTrainedModel): Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` + **lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Labels for computing the left-to-right language modeling loss (next word prediction). + Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) + Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels + in ``[0, ..., config.vocab_size]`` Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: - **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + **masked_lm_loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Masked language modeling loss. + **ltr_lm_loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Next token prediction loss. **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) @@ -739,7 +855,7 @@ class BertForMaskedLM(BertPreTrainedModel): tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForMaskedLM.from_pretrained('bert-base-uncased') - input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 outputs = model(input_ids, masked_lm_labels=input_ids) loss, prediction_scores = outputs[:2] @@ -751,38 +867,52 @@ class BertForMaskedLM(BertPreTrainedModel): self.cls = BertOnlyMLMHead(config) self.init_weights() - self.tie_weights() - def tie_weights(self): - """ Make sure we are sharing the input and output embeddings. - Export to TorchScript can't handle parameter sharing so we are cloning them instead. - """ - self._tie_or_clone_weights(self.cls.predictions.decoder, - self.bert.embeddings.word_embeddings) + def get_output_embeddings(self): + return self.cls.predictions.decoder - def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, - masked_lm_labels=None): + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, + masked_lm_labels=None, encoder_hidden_states=None, encoder_attention_mask=None, lm_labels=None, ): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask) + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here + + # Although this may seem awkward, BertForMaskedLM supports two scenarios: + # 1. If a tensor that contains the indices of masked labels is provided, + # the cross-entropy is the MLM cross-entropy that measures the likelihood + # of predictions for masked words. + # 2. If `lm_labels` is provided we are in a causal scenario where we + # try to predict the next token for each input in the decoder. if masked_lm_labels is not None: - loss_fct = CrossEntropyLoss(ignore_index=-1) + loss_fct = CrossEntropyLoss(ignore_index=-1) # -1 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) outputs = (masked_lm_loss,) + outputs - return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions) + if lm_labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + prediction_scores = prediction_scores[:, :-1, :].contiguous() + lm_labels = lm_labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss(ignore_index=-1) + ltr_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), lm_labels.view(-1)) + outputs = (ltr_lm_loss,) + outputs + + return outputs # (masked_lm_loss), (ltr_lm_loss), prediction_scores, (hidden_states), (attentions) @add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """, - BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) + BERT_START_DOCSTRING, + BERT_INPUTS_DOCSTRING) class BertForNextSentencePrediction(BertPreTrainedModel): r""" **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: @@ -808,7 +938,7 @@ class BertForNextSentencePrediction(BertPreTrainedModel): tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForNextSentencePrediction.from_pretrained('bert-base-uncased') - input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 outputs = model(input_ids) seq_relationship_scores = outputs[0] @@ -821,14 +951,15 @@ class BertForNextSentencePrediction(BertPreTrainedModel): self.init_weights() - def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, next_sentence_label=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask) + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds) pooled_output = outputs[1] @@ -844,8 +975,9 @@ class BertForNextSentencePrediction(BertPreTrainedModel): @add_start_docstrings("""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of - the pooled output) e.g. for GLUE tasks. """, - BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) + the pooled output) e.g. for GLUE tasks. """, + BERT_START_DOCSTRING, + BERT_INPUTS_DOCSTRING) class BertForSequenceClassification(BertPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: @@ -871,7 +1003,7 @@ class BertForSequenceClassification(BertPreTrainedModel): tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForSequenceClassification.from_pretrained('bert-base-uncased') - input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, logits = outputs[:2] @@ -887,14 +1019,15 @@ class BertForSequenceClassification(BertPreTrainedModel): self.init_weights() - def forward(self, input_ids, attention_mask=None, token_type_ids=None, - position_ids=None, head_mask=None, labels=None): + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, + position_ids=None, head_mask=None, inputs_embeds=None, labels=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask) + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds) pooled_output = outputs[1] @@ -917,8 +1050,9 @@ class BertForSequenceClassification(BertPreTrainedModel): @add_start_docstrings("""Bert Model with a multiple choice classification head on top (a linear layer on top of - the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, - BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) + the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, + BERT_START_DOCSTRING, + BERT_INPUTS_DOCSTRING) class BertForMultipleChoice(BertPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: @@ -945,7 +1079,7 @@ class BertForMultipleChoice(BertPreTrainedModel): tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForMultipleChoice.from_pretrained('bert-base-uncased') choices = ["Hello, my dog is cute", "Hello, my cat is amazing"] - input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices + input_ids = torch.tensor([tokenizer.encode(s, add_special_tokens=True) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices labels = torch.tensor(1).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, classification_scores = outputs[:2] @@ -960,8 +1094,8 @@ class BertForMultipleChoice(BertPreTrainedModel): self.init_weights() - def forward(self, input_ids, attention_mask=None, token_type_ids=None, - position_ids=None, head_mask=None, labels=None): + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, + position_ids=None, head_mask=None, inputs_embeds=None, labels=None): num_choices = input_ids.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) @@ -973,7 +1107,8 @@ class BertForMultipleChoice(BertPreTrainedModel): attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) pooled_output = outputs[1] @@ -992,8 +1127,9 @@ class BertForMultipleChoice(BertPreTrainedModel): @add_start_docstrings("""Bert Model with a token classification head on top (a linear layer on top of - the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, - BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) + the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, + BERT_START_DOCSTRING, + BERT_INPUTS_DOCSTRING) class BertForTokenClassification(BertPreTrainedModel): r""" **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: @@ -1017,7 +1153,7 @@ class BertForTokenClassification(BertPreTrainedModel): tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForTokenClassification.from_pretrained('bert-base-uncased') - input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=labels) loss, scores = outputs[:2] @@ -1033,14 +1169,15 @@ class BertForTokenClassification(BertPreTrainedModel): self.init_weights() - def forward(self, input_ids, attention_mask=None, token_type_ids=None, - position_ids=None, head_mask=None, labels=None): + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, + position_ids=None, head_mask=None, inputs_embeds=None, labels=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask) + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds) sequence_output = outputs[0] @@ -1064,8 +1201,9 @@ class BertForTokenClassification(BertPreTrainedModel): @add_start_docstrings("""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of - the hidden-states output to compute `span start logits` and `span end logits`). """, - BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) + the hidden-states output to compute `span start logits` and `span end logits`). """, + BERT_START_DOCSTRING, + BERT_INPUTS_DOCSTRING) class BertForQuestionAnswering(BertPreTrainedModel): r""" **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: @@ -1099,9 +1237,9 @@ class BertForQuestionAnswering(BertPreTrainedModel): question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" input_text = "[CLS] " + question + " [SEP] " + text + " [SEP]" input_ids = tokenizer.encode(input_text) - token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))] + token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))] start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids])) - all_tokens = tokenizer.convert_ids_to_tokens(input_ids) + all_tokens = tokenizer.convert_ids_to_tokens(input_ids) print(' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])) # a nice puppet @@ -1116,14 +1254,15 @@ class BertForQuestionAnswering(BertPreTrainedModel): self.init_weights() - def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask) + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds) sequence_output = outputs[0] diff --git a/transformers/modeling_camembert.py b/transformers/modeling_camembert.py new file mode 100644 index 00000000000..f302346f2d6 --- /dev/null +++ b/transformers/modeling_camembert.py @@ -0,0 +1,293 @@ +# coding=utf-8 +# Copyright 2019 Inria, Facebook AI Research and the HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch CamemBERT model. """ + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import logging + +from .modeling_roberta import RobertaModel, RobertaForMaskedLM, RobertaForSequenceClassification, RobertaForMultipleChoice, RobertaForTokenClassification +from .configuration_camembert import CamembertConfig +from .file_utils import add_start_docstrings + +logger = logging.getLogger(__name__) + +CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP = { + 'camembert-base': "https://s3.amazonaws.com/models.huggingface.co/bert/camembert-base-pytorch_model.bin", +} + + +CAMEMBERT_START_DOCSTRING = r""" The CamemBERT model was proposed in + `CamemBERT: a Tasty French Language Model`_ + by Louis Martin, Benjamin Muller, Pedro Javier Ortiz Suárez, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah, and Benoît Sagot. It is based on Facebook's RoBERTa model released in 2019. + + It is a model trained on 138GB of French text. + + This implementation is the same as RoBERTa. + + This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and + refer to the PyTorch documentation for all matter related to general usage and behavior. + + .. _`CamemBERT: a Tasty French Language Model`: + https://arxiv.org/abs/1911.03894 + + .. _`torch.nn.Module`: + https://pytorch.org/docs/stable/nn.html#module + + Parameters: + config (:class:`~transformers.CamembertConfig`): Model configuration class with all the parameters of the + model. Initializing with a config file does not load the weights associated with the model, only the configuration. + Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. +""" + +CAMEMBERT_INPUTS_DOCSTRING = r""" + Inputs: + **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Indices of input sequence tokens in the vocabulary. + To match pre-training, CamemBERT input sequence should be formatted with and tokens as follows: + + (a) For sequence pairs: + + ``tokens: Is this Jacksonville ? No it is not . `` + + (b) For single sequences: + + ``tokens: the dog is hairy . `` + + Fully encoded sequences or sequence pairs can be obtained using the CamembertTokenizer.encode function with + the ``add_special_tokens`` parameter set to ``True``. + + CamemBERT is a model with absolute position embeddings so it's usually advised to pad the inputs on + the right rather than the left. + + See :func:`transformers.PreTrainedTokenizer.encode` and + :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. + **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: + Mask to avoid performing attention on padding token indices. + Mask values selected in ``[0, 1]``: + ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. + **token_type_ids**: (`optional` need to be trained) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Optional segment token indices to indicate first and second portions of the inputs. + This embedding matrice is not trained (not pretrained during CamemBERT pretraining), you will have to train it + during finetuning. + Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` + corresponds to a `sentence B` token + (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). + **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Indices of positions of each input sequence tokens in the position embeddings. + Selected in the range ``[0, config.max_position_embeddings - 1[``. + **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: + Mask to nullify selected heads of the self-attention modules. + Mask values selected in ``[0, 1]``: + ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. +""" + +@add_start_docstrings("The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.", + CAMEMBERT_START_DOCSTRING, CAMEMBERT_INPUTS_DOCSTRING) +class CamembertModel(RobertaModel): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` + Sequence of hidden-states at the output of the last layer of the model. + **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)`` + Last layer hidden-state of the first token of the sequence (classification token) + further processed by a Linear layer and a Tanh activation function. The Linear + layer weights are trained from the next sentence prediction (classification) + eo match pre-training, CamemBERT input sequence should be formatted with [CLS] and [SEP] tokens as follows: + + (a) For sequence pairs: + + ``tokens: [CLS] is this jack ##son ##ville ? [SEP] [SEP] no it is not . [SEP]`` + + ``token_type_ids: 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` + + (b) For single sequences: + + ``tokens: [CLS] the dog is hairy . [SEP]`` + + ``token_type_ids: 0 0 0 0 0 0 0`` + + objective during Bert pretraining. This output is usually *not* a good summary + of the semantic content of the input, you're often better with averaging or pooling + the sequence of hidden-states for the whole input sequence. + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + tokenizer = CamembertTokenizer.from_pretrained('camembert-base') + model = CamembertModel.from_pretrained('camembert-base') + input_ids = torch.tensor(tokenizer.encode("J'aime le camembert !")).unsqueeze(0) # Batch size 1 + outputs = model(input_ids) + last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple + + """ + config_class = CamembertConfig + pretrained_model_archive_map = CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP + + +@add_start_docstrings("""CamemBERT Model with a `language modeling` head on top. """, + CAMEMBERT_START_DOCSTRING, CAMEMBERT_INPUTS_DOCSTRING) +class CamembertForMaskedLM(RobertaForMaskedLM): + r""" + **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Labels for computing the masked language modeling loss. + Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) + Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels + in ``[0, ..., config.vocab_size]`` + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Masked language modeling loss. + **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + tokenizer = CamembertTokenizer.from_pretrained('camembert-base') + model = CamembertForMaskedLM.from_pretrained('camembert-base') + input_ids = torch.tensor(tokenizer.encode("J'aime le camembert !")).unsqueeze(0) # Batch size 1 + outputs = model(input_ids, masked_lm_labels=input_ids) + loss, prediction_scores = outputs[:2] + + """ + config_class = CamembertConfig + pretrained_model_archive_map = CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP + + +@add_start_docstrings("""CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer + on top of the pooled output) e.g. for GLUE tasks. """, + CAMEMBERT_START_DOCSTRING, CAMEMBERT_INPUTS_DOCSTRING) +class CamembertForSequenceClassification(RobertaForSequenceClassification): + r""" + **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: + Labels for computing the sequence classification/regression loss. + Indices should be in ``[0, ..., config.num_labels]``. + If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), + If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Classification (or regression if config.num_labels==1) loss. + **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` + Classification (or regression if config.num_labels==1) scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + tokenizer = CamembertTokenizer.from_pretrained('camembert-base') + model = CamembertForSequenceClassification.from_pretrained('camembert-base') + input_ids = torch.tensor(tokenizer.encode("J'aime le camembert !")).unsqueeze(0) # Batch size 1 + labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 + outputs = model(input_ids, labels=labels) + loss, logits = outputs[:2] + + """ + config_class = CamembertConfig + pretrained_model_archive_map = CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP + + +@add_start_docstrings("""CamemBERT Model with a multiple choice classification head on top (a linear layer on top of + the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, + CAMEMBERT_START_DOCSTRING, CAMEMBERT_INPUTS_DOCSTRING) +class CamembertForMultipleChoice(RobertaForMultipleChoice): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Classification loss. + **classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension + of the input tensors. (see `input_ids` above). + Classification scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + tokenizer = CamembertTokenizer.from_pretrained('camembert-base') + model = CamembertForMultipleChoice.from_pretrained('camembert-base') + choices = ["J'aime le camembert !", "Je deteste le camembert !"] + input_ids = torch.tensor([tokenizer.encode(s, add_special_tokens=True) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices + labels = torch.tensor(1).unsqueeze(0) # Batch size 1 + outputs = model(input_ids, labels=labels) + loss, classification_scores = outputs[:2] + + """ + config_class = CamembertConfig + pretrained_model_archive_map = CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP + + +@add_start_docstrings("""CamemBERT Model with a token classification head on top (a linear layer on top of + the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, + CAMEMBERT_START_DOCSTRING, CAMEMBERT_INPUTS_DOCSTRING) +class CamembertForTokenClassification(RobertaForTokenClassification): + r""" + **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Labels for computing the token classification loss. + Indices should be in ``[0, ..., config.num_labels - 1]``. + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Classification loss. + **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` + Classification scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + tokenizer = CamembertTokenizer.from_pretrained('camembert-base') + model = CamembertForTokenClassification.from_pretrained('camembert-base') + input_ids = torch.tensor(tokenizer.encode("J'aime le camembert !", add_special_tokens=True)).unsqueeze(0) # Batch size 1 + labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1 + outputs = model(input_ids, labels=labels) + loss, scores = outputs[:2] + + """ + config_class = CamembertConfig + pretrained_model_archive_map = CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP diff --git a/transformers/modeling_ctrl.py b/transformers/modeling_ctrl.py index 55e64d318ba..97bcb144349 100644 --- a/transformers/modeling_ctrl.py +++ b/transformers/modeling_ctrl.py @@ -63,7 +63,8 @@ def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=N scaled_attention_logits = matmul_qk / np.sqrt(dk) if mask is not None: - scaled_attention_logits += (mask * -1e4) + nd, ns = scaled_attention_logits.size(-2), scaled_attention_logits.size(-1) + scaled_attention_logits += (mask[ns-nd:ns, :ns] * -1e4) if attention_mask is not None: # Apply the attention mask @@ -220,7 +221,8 @@ CTRL_INPUTS_DOCSTRING = r""" Inputs: **past**: list of ``torch.FloatTensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model - (see `past` output below). Can be used to speed up sequential decoding. + (see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model + should not be passed as input ids as they have already been computed. **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: @@ -236,6 +238,10 @@ CTRL_INPUTS_DOCSTRING = r""" Inputs: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.", @@ -246,9 +252,10 @@ class CTRLModel(CTRLPreTrainedModel): **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the last layer of the model. **past**: - list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``: that contains pre-computed hidden-states (key and values in the attention blocks). - Can be used (see `past` input) to speed up sequential decoding. + Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model + should not be passed as input ids as they have already been computed. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: @@ -289,10 +296,12 @@ class CTRLModel(CTRLPreTrainedModel): self.init_weights() - def _resize_token_embeddings(self, new_num_tokens): - self.w = self._get_resized_embeddings(self.w, new_num_tokens) + def get_input_embeddings(self): return self.w + def set_input_embeddings(self, new_embeddings): + self.w = new_embeddings + def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} @@ -300,17 +309,26 @@ class CTRLModel(CTRLPreTrainedModel): for layer, heads in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) - def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None): - input_shape = input_ids.size() - input_ids = input_ids.view(-1, input_shape[-1]) + def forward(self, input_ids=None, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None): + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + if past is None: past_length = 0 past = [None] * len(self.h) else: past_length = past[0][0].size(-2) if position_ids is None: - position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device) - position_ids = position_ids.unsqueeze(0).expand_as(input_ids) + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) # Attention mask. if attention_mask is not None: @@ -352,10 +370,11 @@ class CTRLModel(CTRLPreTrainedModel): token_type_embeds = 0 position_ids = position_ids.view(-1, input_shape[-1]) - inputs_embeds = self.w(input_ids) + if inputs_embeds is None: + inputs_embeds = self.w(input_ids) # inputs_embeds = embedded.unsqueeze(0) if len(input_ids.shape)<2 else embedded - seq_len = input_ids.shape[-1] - mask = torch.triu(torch.ones(seq_len, seq_len), 1).to(inputs_embeds.device) + seq_len = input_shape[-1] + mask = torch.triu(torch.ones(seq_len + past_length, seq_len + past_length), 1).to(inputs_embeds.device) inputs_embeds *= np.sqrt(self.d_model_size) @@ -419,9 +438,10 @@ class CTRLLMHeadModel(CTRLPreTrainedModel): **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **past**: - list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``: that contains pre-computed hidden-states (key and values in the attention blocks). - Can be used (see `past` input) to speed up sequential decoding. + Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model + should not be passed as input ids as they have already been computed. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: @@ -449,22 +469,19 @@ class CTRLLMHeadModel(CTRLPreTrainedModel): self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=True) self.init_weights() - self.tie_weights() - def tie_weights(self): - """ Make sure we are sharing the input and output embeddings. - Export to TorchScript can't handle parameter sharing so we are cloning them instead. - """ - self._tie_or_clone_weights(self.lm_head, self.transformer.w) + def get_output_embeddings(self): + return self.lm_head - def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, + def forward(self, input_ids=None, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None): transformer_outputs = self.transformer(input_ids, past=past, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) hidden_states = transformer_outputs[0] diff --git a/transformers/modeling_distilbert.py b/transformers/modeling_distilbert.py index d3b4ccff5dc..6faeafa15ee 100644 --- a/transformers/modeling_distilbert.py +++ b/transformers/modeling_distilbert.py @@ -30,6 +30,7 @@ import numpy as np import torch import torch.nn as nn +from torch.nn import CrossEntropyLoss from .modeling_utils import PreTrainedModel, prune_linear_layer from .configuration_distilbert import DistilBertConfig @@ -41,7 +42,9 @@ logger = logging.getLogger(__name__) DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP = { 'distilbert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-pytorch_model.bin", - 'distilbert-base-uncased-distilled-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-distilled-squad-pytorch_model.bin" + 'distilbert-base-uncased-distilled-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-distilled-squad-pytorch_model.bin", + 'distilbert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-pytorch_model.bin", + 'distilbert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-multilingual-cased-pytorch_model.bin", } @@ -334,9 +337,6 @@ class DistilBertPreTrainedModel(PreTrainedModel): load_tf_weights = None base_model_prefix = "distilbert" - def __init__(self, *inputs, **kwargs): - super(DistilBertPreTrainedModel, self).__init__(*inputs, **kwargs) - def _init_weights(self, module): """ Initialize the weights. """ @@ -390,6 +390,10 @@ DISTILBERT_INPUTS_DOCSTRING = r""" Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.", @@ -424,12 +428,12 @@ class DistilBertModel(DistilBertPreTrainedModel): self.init_weights() - def _resize_token_embeddings(self, new_num_tokens): - old_embeddings = self.embeddings.word_embeddings - new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) - self.embeddings.word_embeddings = new_embeddings + def get_input_embeddings(self): return self.embeddings.word_embeddings + def set_input_embeddings(self, new_embeddings): + self.embeddings.word_embeddings = new_embeddings + def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} @@ -439,9 +443,20 @@ class DistilBertModel(DistilBertPreTrainedModel): self.transformer.layer[layer].attention.prune_heads(heads) def forward(self, - input_ids, attention_mask=None, head_mask=None): + input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None): + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + if attention_mask is None: - attention_mask = torch.ones_like(input_ids) # (bs, seq_length) + attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head @@ -458,8 +473,9 @@ class DistilBertModel(DistilBertPreTrainedModel): else: head_mask = [None] * self.config.num_hidden_layers - embedding_output = self.embeddings(input_ids) # (bs, seq_length, dim) - tfmr_output = self.transformer(x=embedding_output, + if inputs_embeds is None: + inputs_embeds = self.embeddings(input_ids) # (bs, seq_length, dim) + tfmr_output = self.transformer(x=inputs_embeds, attn_mask=attention_mask, head_mask=head_mask) hidden_state = tfmr_output[0] @@ -511,21 +527,17 @@ class DistilBertForMaskedLM(DistilBertPreTrainedModel): self.vocab_projector = nn.Linear(config.dim, config.vocab_size) self.init_weights() - self.tie_weights() self.mlm_loss_fct = nn.CrossEntropyLoss(ignore_index=-1) - def tie_weights(self): - """ Make sure we are sharing the input and output embeddings. - Export to TorchScript can't handle parameter sharing so we are cloning them instead. - """ - self._tie_or_clone_weights(self.vocab_projector, - self.distilbert.embeddings.word_embeddings) + def get_output_embeddings(self): + return self.vocab_projector - def forward(self, input_ids, attention_mask=None, head_mask=None, masked_lm_labels=None): + def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, masked_lm_labels=None): dlbrt_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) hidden_states = dlbrt_output[0] # (bs, seq_length, dim) prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim) prediction_logits = gelu(prediction_logits) # (bs, seq_length, dim) @@ -586,10 +598,11 @@ class DistilBertForSequenceClassification(DistilBertPreTrainedModel): self.init_weights() - def forward(self, input_ids, attention_mask=None, head_mask=None, labels=None): + def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, labels=None): distilbert_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) hidden_state = distilbert_output[0] # (bs, seq_len, dim) pooled_output = hidden_state[:, 0] # (bs, dim) pooled_output = self.pre_classifier(pooled_output) # (bs, dim) @@ -660,10 +673,11 @@ class DistilBertForQuestionAnswering(DistilBertPreTrainedModel): self.init_weights() - def forward(self, input_ids, attention_mask=None, head_mask=None, start_positions=None, end_positions=None): + def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None): distilbert_output = self.distilbert(input_ids=input_ids, attention_mask=attention_mask, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) hidden_states = distilbert_output[0] # (bs, max_query_len, dim) hidden_states = self.dropout(hidden_states) # (bs, max_query_len, dim) @@ -691,3 +705,75 @@ class DistilBertForQuestionAnswering(DistilBertPreTrainedModel): outputs = (total_loss,) + outputs return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions) + + +@add_start_docstrings("""DistilBert Model with a token classification head on top (a linear layer on top of + the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, + DISTILBERT_START_DOCSTRING, + DISTILBERT_INPUTS_DOCSTRING) +class DistilBertForTokenClassification(DistilBertPreTrainedModel): + r""" + **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Labels for computing the token classification loss. + Indices should be in ``[0, ..., config.num_labels - 1]``. + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Classification loss. + **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` + Classification scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased') + model = DistilBertForTokenClassification.from_pretrained('distilbert-base-uncased') + input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1 + outputs = model(input_ids, labels=labels) + loss, scores = outputs[:2] + + """ + def __init__(self, config): + super(DistilBertForTokenClassification, self).__init__(config) + self.num_labels = config.num_labels + + self.distilbert = DistilBertModel(config) + self.dropout = nn.Dropout(config.dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + self.init_weights() + + def forward(self, input_ids=None, attention_mask=None, head_mask=None, + inputs_embeds=None, labels=None): + + outputs = self.distilbert(input_ids, + attention_mask=attention_mask, + head_mask=head_mask, + inputs_embeds=inputs_embeds) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + if labels is not None: + loss_fct = CrossEntropyLoss() + # Only keep active parts of the loss + if attention_mask is not None: + active_loss = attention_mask.view(-1) == 1 + active_logits = logits.view(-1, self.num_labels)[active_loss] + active_labels = labels.view(-1)[active_loss] + loss = loss_fct(active_logits, active_labels) + else: + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + outputs = (loss,) + outputs + + return outputs # (loss), scores, (hidden_states), (attentions) diff --git a/transformers/modeling_encoder_decoder.py b/transformers/modeling_encoder_decoder.py new file mode 100644 index 00000000000..70f765b849e --- /dev/null +++ b/transformers/modeling_encoder_decoder.py @@ -0,0 +1,312 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Classes to support Encoder-Decoder architectures """ + +from __future__ import absolute_import, division, print_function, unicode_literals + +import logging +import os + +import torch +from torch import nn + +from .modeling_auto import AutoModel, AutoModelWithLMHead + +logger = logging.getLogger(__name__) + + +class PreTrainedEncoderDecoder(nn.Module): + r""" + :class:`~transformers.PreTrainedEncoderDecoder` is a generic model class that will be + instantiated as a transformer architecture with one of the base model + classes of the library as encoder and (optionally) another one as + decoder when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)` + class method. + """ + + def __init__(self, encoder, decoder): + super(PreTrainedEncoderDecoder, self).__init__() + self.encoder = encoder + self.decoder = decoder + + @classmethod + def from_pretrained( + cls, + encoder_pretrained_model_name_or_path=None, + decoder_pretrained_model_name_or_path=None, + *model_args, + **kwargs + ): + r""" Instantiates an encoder and a decoder from one or two base classes of the library from pre-trained model checkpoints. + + + The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated) + To train the model, you need to first set it back in training mode with `model.train()` + + Params: + encoder_pretrained_model_name_or_path: information necessary to initiate the encoder. Either: + + - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. + - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/encoder``. + - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. + + decoder_pretrained_model_name_or_path: information necessary to initiate the decoder. Either: + + - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. + - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/decoder``. + - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. + + model_args: (`optional`) Sequence of positional arguments: + All remaning positional arguments will be passed to the underlying model's ``__init__`` method + + config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: + Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: + + - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or + - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. + - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. + + state_dict: (`optional`) dict: + an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file. + This option can be used if you want to create a model from a pretrained configuration but load your own weights. + In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. + + cache_dir: (`optional`) string: + Path to a directory in which a downloaded pre-trained model + configuration should be cached if the standard cache should not be used. + + force_download: (`optional`) boolean, default False: + Force to (re-)download the model weights and configuration files and override the cached versions if they exists. + + proxies: (`optional`) dict, default None: + A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. + The proxies are used on each request. + + output_loading_info: (`optional`) boolean: + Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages. + + kwargs: (`optional`) Remaining dictionary of keyword arguments. + Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded: + + - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) + - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. + + You can specify kwargs sepcific for the encoder and decoder by prefixing the key with `encoder_` and `decoder_` respectively. (e.g. ``decoder_output_attention=True``). The remaining kwargs will be passed to both encoders and decoders. + + Examples:: + + model = PreTrainedEncoderDecoder.from_pretained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert + """ + + # keyword arguments come in 3 flavors: encoder-specific (prefixed by + # `encoder_`), decoder-specific (prefixed by `decoder_`) and those + # that apply to the model as a whole. + # We let the specific kwargs override the common ones in case of conflict. + kwargs_common = { + argument: value + for argument, value in kwargs.items() + if not argument.startswith("encoder_") + and not argument.startswith("decoder_") + } + kwargs_decoder = kwargs_common.copy() + kwargs_encoder = kwargs_common.copy() + kwargs_encoder.update( + { + argument[len("encoder_") :]: value + for argument, value in kwargs.items() + if argument.startswith("encoder_") + } + ) + kwargs_decoder.update( + { + argument[len("decoder_") :]: value + for argument, value in kwargs.items() + if argument.startswith("decoder_") + } + ) + + # Load and initialize the encoder and decoder + # The distinction between encoder and decoder at the model level is made + # by the value of the flag `is_decoder` that we need to set correctly. + encoder = kwargs_encoder.pop("model", None) + if encoder is None: + encoder = AutoModel.from_pretrained( + encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder + ) + encoder.config.is_decoder = False + + decoder = kwargs_decoder.pop("model", None) + if decoder is None: + decoder = AutoModelWithLMHead.from_pretrained( + decoder_pretrained_model_name_or_path, **kwargs_decoder + ) + decoder.config.is_decoder = True + + model = cls(encoder, decoder) + + return model + + def save_pretrained(self, save_directory): + """ Save a Seq2Seq model and its configuration file in a format such + that it can be loaded using `:func:`~transformers.PreTrainedEncoderDecoder.from_pretrained` + + We save the encoder' and decoder's parameters in two separate directories. + """ + self.encoder.save_pretrained(os.path.join(save_directory, "encoder")) + self.decoder.save_pretrained(os.path.join(save_directory, "decoder")) + + def forward(self, encoder_input_ids, decoder_input_ids, **kwargs): + """ The forward pass on a seq2eq depends what we are performing: + + - During training we perform one forward pass through both the encoder + and decoder; + - During prediction, we perform one forward pass through the encoder, + and then perform several forward passes with the encoder's hidden + state through the decoder to decode a full sequence. + + Therefore, we skip the forward pass on the encoder if an argument named + `encoder_hidden_state` is passed to this function. + + Params: + encoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)`` + Indices of encoder input sequence tokens in the vocabulary. + decoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)`` + Indices of decoder input sequence tokens in the vocabulary. + kwargs: (`optional`) Remaining dictionary of keyword arguments. + """ + # keyword arguments come in 3 flavors: encoder-specific (prefixed by + # `encoder_`), decoder-specific (prefixed by `decoder_`) and those + # that apply to the model as whole. + # We let the specific kwargs override the common ones in case of conflict. + kwargs_common = { + argument: value + for argument, value in kwargs.items() + if not argument.startswith("encoder_") + and not argument.startswith("decoder_") + } + kwargs_decoder = kwargs_common.copy() + kwargs_encoder = kwargs_common.copy() + kwargs_encoder.update( + { + argument[len("encoder_") :]: value + for argument, value in kwargs.items() + if argument.startswith("encoder_") + } + ) + kwargs_decoder.update( + { + argument[len("decoder_") :]: value + for argument, value in kwargs.items() + if argument.startswith("decoder_") + } + ) + + # Encode if needed (training, first prediction pass) + encoder_hidden_states = kwargs_encoder.pop("hidden_states", None) + if encoder_hidden_states is None: + encoder_outputs = self.encoder(encoder_input_ids, **kwargs_encoder) + encoder_hidden_states = encoder_outputs[ + 0 + ] # output the last layer hidden state + else: + encoder_outputs = () + + # Decode + kwargs_decoder["encoder_hidden_states"] = encoder_hidden_states + kwargs_decoder["encoder_attention_mask"] = kwargs_encoder.get( + "attention_mask", None + ) + decoder_outputs = self.decoder(decoder_input_ids, **kwargs_decoder) + + return decoder_outputs + encoder_outputs + + +class Model2Model(PreTrainedEncoderDecoder): + r""" + :class:`~transformers.Model2Model` instantiates a Seq2Seq2 model + where both of the encoder and decoder are of the same family. If the + name of or that path to a pretrained model is specified the encoder and + the decoder will be initialized with the pretrained weight (the + cross-attention will be intialized randomly if its weights are not + present). + + It is possible to override this behavior and initialize, say, the decoder randomly + by creating it beforehand as follows + + config = BertConfig.from_pretrained() + decoder = BertForMaskedLM(config) + model = Model2Model.from_pretrained('bert-base-uncased', decoder_model=decoder) + """ + + def __init__(self, *args, **kwargs): + super(Model2Model, self).__init__(*args, **kwargs) + self.tie_weights() + + def tie_weights(self): + """ Tying the encoder and decoders' embeddings together. + + We need for each to get down to the embedding weights. However the + different model classes are inconsistent to that respect: + - BertModel: embeddings.word_embeddings + - RoBERTa: embeddings.word_embeddings + - XLMModel: embeddings + - GPT2: wte + - BertForMaskedLM: bert.embeddings.word_embeddings + - RobertaForMaskedLM: roberta.embeddings.word_embeddings + + argument of the XEmbedding layer for each model, but it is "blocked" + by a model-specific keyword (bert, )... + """ + # self._tie_or_clone_weights(self.encoder, self.decoder) + pass + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs): + + if ( + "bert" not in pretrained_model_name_or_path + or "roberta" in pretrained_model_name_or_path + or "distilbert" in pretrained_model_name_or_path + ): + raise ValueError("Only the Bert model is currently supported.") + + model = super(Model2Model, cls).from_pretrained( + encoder_pretrained_model_name_or_path=pretrained_model_name_or_path, + decoder_pretrained_model_name_or_path=pretrained_model_name_or_path, + *args, + **kwargs + ) + + return model + + +class Model2LSTM(PreTrainedEncoderDecoder): + @classmethod + def from_pretrained(cls, *args, **kwargs): + if kwargs.get("decoder_model", None) is None: + # We will create a randomly initilized LSTM model as decoder + if "decoder_config" not in kwargs: + raise ValueError( + "To load an LSTM in Encoder-Decoder model, please supply either: " + " - a torch.nn.LSTM model as `decoder_model` parameter (`decoder_model=lstm_model`), or" + " - a dictionary of configuration parameters that will be used to initialize a" + " torch.nn.LSTM model as `decoder_config` keyword argument. " + " E.g. `decoder_config={'input_size': 768, 'hidden_size': 768, 'num_layers': 2}`" + ) + kwargs["decoder_model"] = torch.nn.LSTM(kwargs.pop("decoder_config")) + model = super(Model2LSTM, cls).from_pretrained(*args, **kwargs) + return model diff --git a/transformers/modeling_gpt2.py b/transformers/modeling_gpt2.py index 0b5b83aa751..96fd1c06077 100644 --- a/transformers/modeling_gpt2.py +++ b/transformers/modeling_gpt2.py @@ -39,6 +39,7 @@ logger = logging.getLogger(__name__) GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin", "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin", "gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-pytorch_model.bin", + "gpt2-xl": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-pytorch_model.bin", "distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-pytorch_model.bin",} def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path): @@ -297,7 +298,8 @@ GPT2_INPUTS_DOCSTRING = r""" Inputs: **past**: list of ``torch.FloatTensor`` (one for each layer): that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model - (see `past` output below). Can be used to speed up sequential decoding. + (see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model + should not be passed as input ids as they have already been computed. **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: @@ -313,6 +315,10 @@ GPT2_INPUTS_DOCSTRING = r""" Inputs: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.", @@ -323,9 +329,10 @@ class GPT2Model(GPT2PreTrainedModel): **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the last layer of the model. **past**: - list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``: that contains pre-computed hidden-states (key and values in the attention blocks). - Can be used (see `past` input) to speed up sequential decoding. + Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model + should not be passed as input ids as they have already been computed. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: @@ -357,10 +364,12 @@ class GPT2Model(GPT2PreTrainedModel): self.init_weights() - def _resize_token_embeddings(self, new_num_tokens): - self.wte = self._get_resized_embeddings(self.wte, new_num_tokens) + def get_input_embeddings(self): return self.wte + def set_input_embeddings(self, new_embeddings): + self.wte = new_embeddings + def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} @@ -368,9 +377,17 @@ class GPT2Model(GPT2PreTrainedModel): for layer, heads in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) - def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None): - input_shape = input_ids.size() - input_ids = input_ids.view(-1, input_shape[-1]) + def forward(self, input_ids=None, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None): + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if position_ids is not None: @@ -382,8 +399,9 @@ class GPT2Model(GPT2PreTrainedModel): else: past_length = past[0][0].size(-2) if position_ids is None: - position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device) - position_ids = position_ids.unsqueeze(0).expand_as(input_ids) + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) # Attention mask. if attention_mask is not None: @@ -417,7 +435,8 @@ class GPT2Model(GPT2PreTrainedModel): else: head_mask = [None] * self.config.n_layer - inputs_embeds = self.wte(input_ids) + if inputs_embeds is None: + inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) if token_type_ids is not None: token_type_embeds = self.wte(token_type_ids) @@ -484,9 +503,10 @@ class GPT2LMHeadModel(GPT2PreTrainedModel): **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **past**: - list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``: that contains pre-computed hidden-states (key and values in the attention blocks). - Can be used (see `past` input) to speed up sequential decoding. + Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model + should not be passed as input ids as they have already been computed. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: @@ -514,23 +534,19 @@ class GPT2LMHeadModel(GPT2PreTrainedModel): self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.init_weights() - self.tie_weights() - def tie_weights(self): - """ Make sure we are sharing the input and output embeddings. - Export to TorchScript can't handle parameter sharing so we are cloning them instead. - """ - self._tie_or_clone_weights(self.lm_head, - self.transformer.wte) + def get_output_embeddings(self): + return self.lm_head - def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, + def forward(self, input_ids=None, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None): transformer_outputs = self.transformer(input_ids, past=past, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) @@ -580,9 +596,10 @@ class GPT2DoubleHeadsModel(GPT2PreTrainedModel): **mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax). **past**: - list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + list of ``torch.FloatTensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``: that contains pre-computed hidden-states (key and values in the attention blocks). - Can be used (see `past` input) to speed up sequential decoding. + Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model + should not be passed as input ids as they have already been computed. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: @@ -622,23 +639,19 @@ class GPT2DoubleHeadsModel(GPT2PreTrainedModel): self.multiple_choice_head = SequenceSummary(config) self.init_weights() - self.tie_weights() - def tie_weights(self): - """ Make sure we are sharing the input and output embeddings. - Export to TorchScript can't handle parameter sharing so we are cloning them instead. - """ - self._tie_or_clone_weights(self.lm_head, - self.transformer.wte) + def get_output_embeddings(self): + return self.lm_head - def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, + def forward(self, input_ids=None, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, mc_token_ids=None, lm_labels=None, mc_labels=None): transformer_outputs = self.transformer(input_ids, past=past, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) hidden_states = transformer_outputs[0] diff --git a/transformers/modeling_openai.py b/transformers/modeling_openai.py index 52f3b7db72a..4fe7ffee8b5 100644 --- a/transformers/modeling_openai.py +++ b/transformers/modeling_openai.py @@ -50,8 +50,10 @@ def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path): logger.info("Loading weights from {}".format(openai_checkpoint_folder_path)) - names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8')) - shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8')) + with open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8') as names_handle: + names = json.load(names_handle) + with open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8') as shapes_handle: + shapes = json.load(shapes_handle) offsets = np.cumsum([np.prod(shape) for shape in shapes]) init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)] init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1] @@ -322,6 +324,10 @@ OPENAI_GPT_INPUTS_DOCSTRING = r""" Inputs: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.", @@ -360,10 +366,12 @@ class OpenAIGPTModel(OpenAIGPTPreTrainedModel): self.init_weights() - def _resize_token_embeddings(self, new_num_tokens): - self.tokens_embed = self._get_resized_embeddings(self.tokens_embed, new_num_tokens) + def get_input_embeddings(self): return self.tokens_embed + def set_input_embeddings(self, new_embeddings): + self.tokens_embed = new_embeddings + def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} @@ -371,14 +379,22 @@ class OpenAIGPTModel(OpenAIGPTPreTrainedModel): for layer, heads in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) - def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None): + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None): + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + if position_ids is None: - # This was used when we had a single embedding matrice from position and token embeddings - # start = self.config.vocab_size + self.config.n_special - # end = start + input_ids.size(-1) - # position_ids = torch.arange(start, end, dtype=torch.long, device=input_ids.device) - position_ids = torch.arange(input_ids.size(-1), dtype=torch.long, device=input_ids.device) - position_ids = position_ids.unsqueeze(0).expand_as(input_ids) + # Code is different from when we had a single embedding matrice from position and token embeddings + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange(input_shape[-1], dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) # Attention mask. if attention_mask is not None: @@ -411,11 +427,8 @@ class OpenAIGPTModel(OpenAIGPTPreTrainedModel): else: head_mask = [None] * self.config.n_layer - input_shape = input_ids.size() - input_ids = input_ids.view(-1, input_ids.size(-1)) - position_ids = position_ids.view(-1, position_ids.size(-1)) - - inputs_embeds = self.tokens_embed(input_ids) + if inputs_embeds is None: + inputs_embeds = self.tokens_embed(input_ids) position_embeds = self.positions_embed(position_ids) if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) @@ -489,22 +502,18 @@ class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel): self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.init_weights() - self.tie_weights() - def tie_weights(self): - """ Make sure we are sharing the input and output embeddings. - Export to TorchScript can't handle parameter sharing so we are cloning them instead. - """ - self._tie_or_clone_weights(self.lm_head, - self.transformer.tokens_embed) + def get_output_embeddings(self): + return self.lm_head - def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) @@ -568,9 +577,12 @@ class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel): tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') model = OpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt') tokenizer.add_special_tokens({'cls_token': '[CLS]'}) # Add a [CLS] to the vocabulary (we should train it also!) + model.resize_token_embeddings(len(tokenizer)) + choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices - mc_token_ids = torch.tensor([input_ids.size(-1), input_ids.size(-1)]).unsqueeze(0) # Batch size 1 + mc_token_ids = torch.tensor([input_ids.size(-1)-1, input_ids.size(-1)-1]).unsqueeze(0) # Batch size 1 + outputs = model(input_ids, mc_token_ids=mc_token_ids) lm_prediction_scores, mc_prediction_scores = outputs[:2] @@ -583,22 +595,18 @@ class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel): self.multiple_choice_head = SequenceSummary(config) self.init_weights() - self.tie_weights() - def tie_weights(self): - """ Make sure we are sharing the input and output embeddings. - Export to TorchScript can't handle parameter sharing so we are cloning them instead. - """ - self._tie_or_clone_weights(self.lm_head, - self.transformer.tokens_embed) + def get_output_embeddings(self): + return self.lm_head - def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, mc_token_ids=None, lm_labels=None, mc_labels=None): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) diff --git a/transformers/modeling_roberta.py b/transformers/modeling_roberta.py index eb340dc7fb9..fc27353d374 100644 --- a/transformers/modeling_roberta.py +++ b/transformers/modeling_roberta.py @@ -35,6 +35,8 @@ ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = { 'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-pytorch_model.bin", 'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-pytorch_model.bin", 'distilroberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-pytorch_model.bin", + 'roberta-base-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-openai-detector-pytorch_model.bin", + 'roberta-large-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-openai-detector-pytorch_model.bin", } class RobertaEmbeddings(BertEmbeddings): @@ -48,16 +50,24 @@ class RobertaEmbeddings(BertEmbeddings): self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx) - def forward(self, input_ids, token_type_ids=None, position_ids=None): - seq_length = input_ids.size(1) + def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + device = input_ids.device if input_ids is not None else inputs_embeds.device + if position_ids is None: # Position numbers begin at padding_idx+1. Padding symbols are ignored. # cf. fairseq's `utils.make_positions` - position_ids = torch.arange(self.padding_idx+1, seq_length+self.padding_idx+1, dtype=torch.long, device=input_ids.device) - position_ids = position_ids.unsqueeze(0).expand_as(input_ids) + position_ids = torch.arange(self.padding_idx+1, seq_length+self.padding_idx+1, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0).expand(input_shape) return super(RobertaEmbeddings, self).forward(input_ids, token_type_ids=token_type_ids, - position_ids=position_ids) + position_ids=position_ids, + inputs_embeds=inputs_embeds) ROBERTA_START_DOCSTRING = r""" The RoBERTa model was proposed in @@ -126,6 +136,10 @@ ROBERTA_INPUTS_DOCSTRING = r""" Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.", @@ -169,18 +183,11 @@ class RobertaModel(BertModel): self.embeddings = RobertaEmbeddings(config) self.init_weights() - def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None): - if input_ids[:, 0].sum().item() != 0: - logger.warning("A sequence with no special tokens has been passed to the RoBERTa model. " - "This model requires special tokens in order to work. " - "Please specify add_special_tokens=True in your tokenize.encode()" - "or tokenizer.convert_tokens_to_ids().") - return super(RobertaModel, self).forward(input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - head_mask=head_mask) + def get_input_embeddings(self): + return self.embeddings.word_embeddings + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value @add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING) @@ -225,21 +232,18 @@ class RobertaForMaskedLM(BertPreTrainedModel): self.lm_head = RobertaLMHead(config) self.init_weights() - self.tie_weights() - def tie_weights(self): - """ Make sure we are sharing the input and output embeddings. - Export to TorchScript can't handle parameter sharing so we are cloning them instead. - """ - self._tie_or_clone_weights(self.lm_head.decoder, self.roberta.embeddings.word_embeddings) + def get_output_embeddings(self): + return self.lm_head.decoder - def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, masked_lm_labels=None): outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) @@ -320,13 +324,14 @@ class RobertaForSequenceClassification(BertPreTrainedModel): self.roberta = RobertaModel(config) self.classifier = RobertaClassificationHead(config) - def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None): outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) sequence_output = outputs[0] logits = self.classifier(sequence_output) @@ -343,6 +348,7 @@ class RobertaForSequenceClassification(BertPreTrainedModel): return outputs # (loss), logits, (hidden_states), (attentions) + @add_start_docstrings("""Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING) @@ -382,6 +388,10 @@ class RobertaForMultipleChoice(BertPreTrainedModel): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension @@ -425,8 +435,8 @@ class RobertaForMultipleChoice(BertPreTrainedModel): self.init_weights() - def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, - position_ids=None, head_mask=None): + def forward(self, input_ids=None, token_type_ids=None, attention_mask=None, labels=None, + position_ids=None, head_mask=None, inputs_embeds=None): num_choices = input_ids.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) @@ -451,6 +461,82 @@ class RobertaForMultipleChoice(BertPreTrainedModel): return outputs # (loss), reshaped_logits, (hidden_states), (attentions) +@add_start_docstrings("""Roberta Model with a token classification head on top (a linear layer on top of + the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, + ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING) +class RobertaForTokenClassification(BertPreTrainedModel): + r""" + **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Labels for computing the token classification loss. + Indices should be in ``[0, ..., config.num_labels - 1]``. + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Classification loss. + **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` + Classification scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + tokenizer = RobertaTokenizer.from_pretrained('roberta-base') + model = RobertaForTokenClassification.from_pretrained('roberta-base') + input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 + labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1 + outputs = model(input_ids, labels=labels) + loss, scores = outputs[:2] + + """ + config_class = RobertaConfig + pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP + base_model_prefix = "roberta" + + def __init__(self, config): + super(RobertaForTokenClassification, self).__init__(config) + self.num_labels = config.num_labels + + self.roberta = RobertaModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + self.init_weights() + + def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, + position_ids=None, head_mask=None, inputs_embeds=None, labels=None): + + outputs = self.roberta(input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + if labels is not None: + loss_fct = CrossEntropyLoss() + # Only keep active parts of the loss + if attention_mask is not None: + active_loss = attention_mask.view(-1) == 1 + active_logits = logits.view(-1, self.num_labels)[active_loss] + active_labels = labels.view(-1)[active_loss] + loss = loss_fct(active_logits, active_labels) + else: + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + outputs = (loss,) + outputs + + return outputs # (loss), scores, (hidden_states), (attentions) + class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" diff --git a/transformers/modeling_tf_albert.py b/transformers/modeling_tf_albert.py new file mode 100644 index 00000000000..d1650d41a83 --- /dev/null +++ b/transformers/modeling_tf_albert.py @@ -0,0 +1,794 @@ +# coding=utf-8 +# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TF 2.0 ALBERT model. """ +from __future__ import absolute_import, division, print_function, unicode_literals + +import logging +import sys + +import tensorflow as tf + +from .configuration_albert import AlbertConfig +from .modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list +from .modeling_tf_bert import ACT2FN, TFBertSelfAttention +from .file_utils import add_start_docstrings + +import logging + +logger = logging.getLogger(__name__) + +TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP = { + 'albert-base-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-tf_model.h5", + 'albert-large-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v1-tf_model.h5", + 'albert-xlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v1-tf_model.h5", + 'albert-xxlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v1-tf_model.h5", + 'albert-base-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-tf_model.h5", + 'albert-large-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-tf_model.h5", + 'albert-xlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-tf_model.h5", + 'albert-xxlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-tf_model.h5", +} + + +class TFAlbertEmbeddings(tf.keras.layers.Layer): + """Construct the embeddings from word, position and token_type embeddings. + """ + + def __init__(self, config, **kwargs): + super(TFAlbertEmbeddings, self).__init__(**kwargs) + + self.config = config + self.position_embeddings = tf.keras.layers.Embedding(config.max_position_embeddings, + config.embedding_size, + embeddings_initializer=get_initializer( + self.config.initializer_range), + name='position_embeddings') + self.token_type_embeddings = tf.keras.layers.Embedding(config.type_vocab_size, + config.embedding_size, + embeddings_initializer=get_initializer( + self.config.initializer_range), + name='token_type_embeddings') + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = tf.keras.layers.LayerNormalization( + epsilon=config.layer_norm_eps, name='LayerNorm') + self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + + def build(self, input_shape): + """Build shared word embedding layer """ + with tf.name_scope("word_embeddings"): + # Create and initialize weights. The random normal initializer was chosen + # arbitrarily, and works well. + self.word_embeddings = self.add_weight( + "weight", + shape=[self.config.vocab_size, self.config.embedding_size], + initializer=get_initializer(self.config.initializer_range)) + super(TFAlbertEmbeddings, self).build(input_shape) + + def call(self, inputs, mode="embedding", training=False): + """Get token embeddings of inputs. + Args: + inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids) + mode: string, a valid value is one of "embedding" and "linear". + Returns: + outputs: (1) If mode == "embedding", output embedding tensor, float32 with + shape [batch_size, length, embedding_size]; (2) mode == "linear", output + linear tensor, float32 with shape [batch_size, length, vocab_size]. + Raises: + ValueError: if mode is not valid. + + Shared weights logic adapted from + https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 + """ + if mode == "embedding": + return self._embedding(inputs, training=training) + elif mode == "linear": + return self._linear(inputs) + else: + raise ValueError("mode {} is not valid.".format(mode)) + + def _embedding(self, inputs, training=False): + """Applies embedding based on inputs tensor.""" + input_ids, position_ids, token_type_ids, inputs_embeds = inputs + + if input_ids is not None: + input_shape = shape_list(input_ids) + else: + input_shape = shape_list(inputs_embeds)[:-1] + + seq_length = input_shape[1] + if position_ids is None: + position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :] + if token_type_ids is None: + token_type_ids = tf.fill(input_shape, 0) + + if inputs_embeds is None: + inputs_embeds = tf.gather(self.word_embeddings, input_ids) + position_embeddings = self.position_embeddings(position_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + position_embeddings + token_type_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings, training=training) + return embeddings + + def _linear(self, inputs): + """Computes logits by running inputs through a linear layer. + Args: + inputs: A float32 tensor with shape [batch_size, length, embedding_size] + Returns: + float32 tensor with shape [batch_size, length, vocab_size]. + """ + batch_size = shape_list(inputs)[0] + length = shape_list(inputs)[1] + x = tf.reshape(inputs, [-1, self.config.embedding_size]) + logits = tf.matmul(x, self.word_embeddings, transpose_b=True) + return tf.reshape(logits, [batch_size, length, self.config.vocab_size]) + + +class TFAlbertSelfAttention(tf.keras.layers.Layer): + def __init__(self, config, **kwargs): + super(TFAlbertSelfAttention, self).__init__(**kwargs) + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads)) + self.output_attentions = config.output_attentions + + self.num_attention_heads = config.num_attention_heads + assert config.hidden_size % config.num_attention_heads == 0 + self.attention_head_size = int( + config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = tf.keras.layers.Dense(self.all_head_size, + kernel_initializer=get_initializer( + config.initializer_range), + name='query') + self.key = tf.keras.layers.Dense(self.all_head_size, + kernel_initializer=get_initializer( + config.initializer_range), + name='key') + self.value = tf.keras.layers.Dense(self.all_head_size, + kernel_initializer=get_initializer( + config.initializer_range), + name='value') + + self.dropout = tf.keras.layers.Dropout( + config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x, batch_size): + x = tf.reshape( + x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) + return tf.transpose(x, perm=[0, 2, 1, 3]) + + def call(self, inputs, training=False): + hidden_states, attention_mask, head_mask = inputs + + batch_size = shape_list(hidden_states)[0] + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) + key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) + value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) + + # Take the dot product between "query" and "key" to get the raw attention scores. + # (batch size, num_heads, seq_len_q, seq_len_k) + attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) + # scale attention_scores + dk = tf.cast(shape_list(key_layer)[-1], tf.float32) + attention_scores = attention_scores / tf.math.sqrt(dk) + + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in TFAlbertModel call() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = tf.nn.softmax(attention_scores, axis=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs, training=training) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = tf.matmul(attention_probs, value_layer) + + context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) + context_layer = tf.reshape(context_layer, + (batch_size, -1, self.all_head_size)) # (batch_size, seq_len_q, all_head_size) + + outputs = (context_layer, attention_probs) if self.output_attentions else ( + context_layer,) + return outputs + + +class TFAlbertSelfOutput(tf.keras.layers.Layer): + def __init__(self, config, **kwargs): + super(TFAlbertSelfOutput, self).__init__(**kwargs) + self.dense = tf.keras.layers.Dense(config.hidden_size, + kernel_initializer=get_initializer( + config.initializer_range), + name='dense') + self.LayerNorm = tf.keras.layers.LayerNormalization( + epsilon=config.layer_norm_eps, name='LayerNorm') + self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + + def call(self, inputs, training=False): + hidden_states, input_tensor = inputs + + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class TFAlbertAttention(TFBertSelfAttention): + def __init__(self, config, **kwargs): + super(TFAlbertAttention, self).__init__(config, **kwargs) + + self.hidden_size = config.hidden_size + self.dense = tf.keras.layers.Dense(config.hidden_size, + kernel_initializer=get_initializer( + config.initializer_range), + name='dense') + self.LayerNorm = tf.keras.layers.LayerNormalization( + epsilon=config.layer_norm_eps, name='LayerNorm') + self.pruned_heads = set() + + def prune_heads(self, heads): + raise NotImplementedError + + def call(self, inputs, training=False): + input_tensor, attention_mask, head_mask = inputs + + batch_size = shape_list(input_tensor)[0] + mixed_query_layer = self.query(input_tensor) + mixed_key_layer = self.key(input_tensor) + mixed_value_layer = self.value(input_tensor) + + query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) + key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) + value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) + + # Take the dot product between "query" and "key" to get the raw attention scores. + # (batch size, num_heads, seq_len_q, seq_len_k) + attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) + # scale attention_scores + dk = tf.cast(shape_list(key_layer)[-1], tf.float32) + attention_scores = attention_scores / tf.math.sqrt(dk) + + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in TFBertModel call() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = tf.nn.softmax(attention_scores, axis=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs, training=training) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = tf.matmul(attention_probs, value_layer) + + context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) + context_layer = tf.reshape(context_layer, + (batch_size, -1, self.all_head_size)) # (batch_size, seq_len_q, all_head_size) + + self_outputs = (context_layer, attention_probs) if self.output_attentions else ( + context_layer,) + + hidden_states = self_outputs[0] + + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + attention_output = self.LayerNorm(hidden_states + input_tensor) + + # add attentions if we output them + outputs = (attention_output,) + self_outputs[1:] + return outputs + + +class TFAlbertLayer(tf.keras.layers.Layer): + def __init__(self, config, **kwargs): + super(TFAlbertLayer, self).__init__(**kwargs) + self.attention = TFAlbertAttention(config, name='attention') + + self.ffn = tf.keras.layers.Dense(config.intermediate_size, kernel_initializer=get_initializer( + config.initializer_range), name='ffn') + + if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): + self.activation = ACT2FN[config.hidden_act] + else: + self.activation = config.hidden_act + + self.ffn_output = tf.keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer( + config.initializer_range), name='ffn_output') + self.full_layer_layer_norm = tf.keras.layers.LayerNormalization( + epsilon=config.layer_norm_eps, name='full_layer_layer_norm') + self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + + def call(self, inputs, training=False): + hidden_states, attention_mask, head_mask = inputs + + attention_outputs = self.attention( + [hidden_states, attention_mask, head_mask], training=training) + ffn_output = self.ffn(attention_outputs[0]) + ffn_output = self.activation(ffn_output) + ffn_output = self.ffn_output(ffn_output) + + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = self.full_layer_layer_norm( + ffn_output + attention_outputs[0]) + + # add attentions if we output them + outputs = (hidden_states,) + attention_outputs[1:] + return outputs + + +class TFAlbertLayerGroup(tf.keras.layers.Layer): + def __init__(self, config, **kwargs): + super(TFAlbertLayerGroup, self).__init__(**kwargs) + + self.output_attentions = config.output_attentions + self.output_hidden_states = config.output_hidden_states + self.albert_layers = [TFAlbertLayer(config, name="albert_layers_._{}".format( + i)) for i in range(config.inner_group_num)] + + def call(self, inputs, training=False): + hidden_states, attention_mask, head_mask = inputs + + layer_hidden_states = () + layer_attentions = () + + for layer_index, albert_layer in enumerate(self.albert_layers): + layer_output = albert_layer( + [hidden_states, attention_mask, head_mask[layer_index]], training=training) + hidden_states = layer_output[0] + + if self.output_attentions: + layer_attentions = layer_attentions + (layer_output[1],) + + if self.output_hidden_states: + layer_hidden_states = layer_hidden_states + (hidden_states,) + + outputs = (hidden_states,) + if self.output_hidden_states: + outputs = outputs + (layer_hidden_states,) + if self.output_attentions: + outputs = outputs + (layer_attentions,) + # last-layer hidden state, (layer hidden states), (layer attentions) + return outputs + + +class TFAlbertTransformer(tf.keras.layers.Layer): + def __init__(self, config, **kwargs): + super(TFAlbertTransformer, self).__init__(**kwargs) + + self.config = config + self.output_attentions = config.output_attentions + self.output_hidden_states = config.output_hidden_states + self.embedding_hidden_mapping_in = tf.keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer( + config.initializer_range), name='embedding_hidden_mapping_in') + self.albert_layer_groups = [TFAlbertLayerGroup( + config, name="albert_layer_groups_._{}".format(i)) for i in range(config.num_hidden_groups)] + + def call(self, inputs, training=False): + hidden_states, attention_mask, head_mask = inputs + + hidden_states = self.embedding_hidden_mapping_in(hidden_states) + all_attentions = () + + if self.output_hidden_states: + all_hidden_states = (hidden_states,) + + for i in range(self.config.num_hidden_layers): + # Number of layers in a hidden group + layers_per_group = int( + self.config.num_hidden_layers / self.config.num_hidden_groups) + + # Index of the hidden group + group_idx = int( + i / (self.config.num_hidden_layers / self.config.num_hidden_groups)) + + layer_group_output = self.albert_layer_groups[group_idx]( + [hidden_states, attention_mask, head_mask[group_idx*layers_per_group:(group_idx+1)*layers_per_group]], training=training) + hidden_states = layer_group_output[0] + + if self.output_attentions: + all_attentions = all_attentions + layer_group_output[-1] + + if self.output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + outputs = (hidden_states,) + if self.output_hidden_states: + outputs = outputs + (all_hidden_states,) + if self.output_attentions: + outputs = outputs + (all_attentions,) + + # last-layer hidden state, (all hidden states), (all attentions) + return outputs + + +class TFAlbertPreTrainedModel(TFPreTrainedModel): + """ An abstract class to handle weights initialization and + a simple interface for dowloading and loading pretrained models. + """ + config_class = AlbertConfig + pretrained_model_archive_map = TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP + base_model_prefix = "albert" + + +class TFAlbertMLMHead(tf.keras.layers.Layer): + def __init__(self, config, input_embeddings, **kwargs): + super(TFAlbertMLMHead, self).__init__(**kwargs) + self.vocab_size = config.vocab_size + + self.dense = tf.keras.layers.Dense(config.embedding_size, + kernel_initializer=get_initializer( + config.initializer_range), + name='dense') + if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): + self.activation = ACT2FN[config.hidden_act] + else: + self.activation = config.hidden_act + + self.LayerNorm = tf.keras.layers.LayerNormalization( + epsilon=config.layer_norm_eps, name='LayerNorm') + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = input_embeddings + + def build(self, input_shape): + self.bias = self.add_weight(shape=(self.vocab_size,), + initializer='zeros', + trainable=True, + name='bias') + self.decoder_bias = self.add_weight(shape=(self.vocab_size,), + initializer='zeros', + trainable=True, + name='decoder/bias') + super(TFAlbertMLMHead, self).build(input_shape) + + def call(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.activation(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + hidden_states = self.decoder(hidden_states, mode="linear") + self.decoder_bias + hidden_states = hidden_states + self.bias + return hidden_states + + +ALBERT_START_DOCSTRING = r""" The ALBERT model was proposed in + `ALBERT: A Lite BERT for Self-supervised Learning of Language Representations`_ + by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut. It presents + two parameter-reduction techniques to lower memory consumption and increase the trainig speed of BERT. + + This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and + refer to the TF 2.0 documentation for all matter related to general usage and behavior. + + .. _`ALBERT: A Lite BERT for Self-supervised Learning of Language Representations`: + https://arxiv.org/abs/1909.11942 + + .. _`tf.keras.Model`: + https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model + + Note on the model inputs: + TF 2.0 models accepts two formats as inputs: + + - having all inputs as keyword arguments (like PyTorch models), or + - having all inputs as a list, tuple or dict in the first positional arguments. + + This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. + + If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : + + - a single Tensor with input_ids only and nothing else: `model(inputs_ids) + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associaed to the input names given in the docstring: + `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})` + + Parameters: + config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the configuration. + Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. +""" + +ALBERT_INPUTS_DOCSTRING = r""" + Inputs: + **input_ids**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: + Indices of input sequence tokens in the vocabulary. + To match pre-training, ALBERT input sequence should be formatted with [CLS] and [SEP] tokens as follows: + + (a) For sequence pairs: + + ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` + + ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` + + (b) For single sequences: + + ``tokens: [CLS] the dog is hairy . [SEP]`` + + ``token_type_ids: 0 0 0 0 0 0 0`` + + Albert is a model with absolute position embeddings so it's usually advised to pad the inputs on + the right rather than the left. + + Indices can be obtained using :class:`transformers.AlbertTokenizer`. + See :func:`transformers.PreTrainedTokenizer.encode` and + :func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. + **attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: + Mask to avoid performing attention on padding token indices. + Mask values selected in ``[0, 1]``: + ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. + **token_type_ids**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: + Segment token indices to indicate first and second portions of the inputs. + Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` + corresponds to a `sentence B` token + (see `ALBERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). + **position_ids**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``: + Indices of positions of each input sequence tokens in the position embeddings. + Selected in the range ``[0, config.max_position_embeddings - 1]``. + **head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: + Mask to nullify selected heads of the self-attention modules. + Mask values selected in ``[0, 1]``: + ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. +""" + +@add_start_docstrings("The bare Albert Model transformer outputing raw hidden-states without any specific head on top.", + ALBERT_START_DOCSTRING, ALBERT_INPUTS_DOCSTRING) +class TFAlbertModel(TFAlbertPreTrainedModel): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **last_hidden_state**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, hidden_size)`` + Sequence of hidden-states at the output of the last layer of the model. + **pooler_output**: ``tf.Tensor`` of shape ``(batch_size, hidden_size)`` + Last layer hidden-state of the first token of the sequence (classification token) + further processed by a Linear layer and a Tanh activation function. The Linear + layer weights are trained from the next sentence prediction (classification) + objective during Albert pretraining. This output is usually *not* a good summary + of the semantic content of the input, you're often better with averaging or pooling + the sequence of hidden-states for the whole input sequence. + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + import tensorflow as tf + from transformers import AlbertTokenizer, TFAlbertModel + + tokenizer = AlbertTokenizer.from_pretrained('bert-base-uncased') + model = TFAlbertModel.from_pretrained('bert-base-uncased') + input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 + outputs = model(input_ids) + last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple + + """ + + def __init__(self, config, **kwargs): + super(TFAlbertModel, self).__init__(config, **kwargs) + self.num_hidden_layers = config.num_hidden_layers + + self.embeddings = TFAlbertEmbeddings(config, name="embeddings") + self.encoder = TFAlbertTransformer(config, name="encoder") + self.pooler = tf.keras.layers.Dense(config.hidden_size, kernel_initializer=get_initializer( + config.initializer_range), activation='tanh', name='pooler') + + def get_input_embeddings(self): + return self.embeddings + + def _resize_token_embeddings(self, new_num_tokens): + raise NotImplementedError + + def _prune_heads(self, heads_to_prune): + """ Prunes heads of the model. + heads_to_prune: dict of {layer_num: list of heads to prune in this layer} + See base class PreTrainedModel + """ + raise NotImplementedError + + def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, training=False): + if isinstance(inputs, (tuple, list)): + input_ids = inputs[0] + attention_mask = inputs[1] if len(inputs) > 1 else attention_mask + token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids + position_ids = inputs[3] if len(inputs) > 3 else position_ids + head_mask = inputs[4] if len(inputs) > 4 else head_mask + inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds + assert len(inputs) <= 6, "Too many inputs." + elif isinstance(inputs, dict): + input_ids = inputs.get('input_ids') + attention_mask = inputs.get('attention_mask', attention_mask) + token_type_ids = inputs.get('token_type_ids', token_type_ids) + position_ids = inputs.get('position_ids', position_ids) + head_mask = inputs.get('head_mask', head_mask) + inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) + assert len(inputs) <= 6, "Too many inputs." + else: + input_ids = inputs + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if attention_mask is None: + attention_mask = tf.fill(input_shape, 1) + if token_type_ids is None: + token_type_ids = tf.fill(input_shape, 0) + + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + + extended_attention_mask = tf.cast(extended_attention_mask, tf.float32) + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + if not head_mask is None: + raise NotImplementedError + else: + head_mask = [None] * self.num_hidden_layers + # head_mask = tf.constant([0] * self.num_hidden_layers) + + embedding_output = self.embeddings( + [input_ids, position_ids, token_type_ids, inputs_embeds], training=training) + encoder_outputs = self.encoder( + [embedding_output, extended_attention_mask, head_mask], training=training) + + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output[:, 0]) + + # add hidden_states and attentions if they are here + outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] + # sequence_output, pooled_output, (hidden_states), (attentions) + return outputs + + +@add_start_docstrings("""Albert Model with a `language modeling` head on top. """, + ALBERT_START_DOCSTRING, ALBERT_INPUTS_DOCSTRING) +class TFAlbertForMaskedLM(TFAlbertPreTrainedModel): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **prediction_scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + import tensorflow as tf + from transformers import AlbertTokenizer, TFAlbertForMaskedLM + + tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') + model = TFAlbertForMaskedLM.from_pretrained('albert-base-v2') + input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 + outputs = model(input_ids) + prediction_scores = outputs[0] + + """ + + def __init__(self, config, *inputs, **kwargs): + super(TFAlbertForMaskedLM, self).__init__(config, *inputs, **kwargs) + + self.albert = TFAlbertModel(config, name='albert') + self.predictions = TFAlbertMLMHead( + config, self.albert.embeddings, name='predictions') + + def get_output_embeddings(self): + return self.albert.embeddings + + def call(self, inputs, **kwargs): + outputs = self.albert(inputs, **kwargs) + + sequence_output = outputs[0] + prediction_scores = self.predictions( + sequence_output, training=kwargs.get('training', False)) + + # Add hidden states and attention if they are here + outputs = (prediction_scores,) + outputs[2:] + + return outputs # prediction_scores, (hidden_states), (attentions) + + +@add_start_docstrings("""Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of + the pooled output) e.g. for GLUE tasks. """, + ALBERT_START_DOCSTRING, ALBERT_INPUTS_DOCSTRING) +class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **logits**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, config.num_labels)`` + Classification (or regression if config.num_labels==1) scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + import tensorflow as tf + from transformers import AlbertTokenizer, TFAlbertForSequenceClassification + + tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2') + model = TFAlbertForSequenceClassification.from_pretrained('albert-base-v2') + input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 + outputs = model(input_ids) + logits = outputs[0] + + """ + def __init__(self, config, *inputs, **kwargs): + super(TFAlbertForSequenceClassification, self).__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + + self.albert = TFAlbertModel(config, name='albert') + self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.classifier = tf.keras.layers.Dense(config.num_labels, + kernel_initializer=get_initializer(config.initializer_range), + name='classifier') + + def call(self, inputs, **kwargs): + outputs = self.albert(inputs, **kwargs) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output, training=kwargs.get('training', False)) + logits = self.classifier(pooled_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + + return outputs # logits, (hidden_states), (attentions) \ No newline at end of file diff --git a/transformers/modeling_tf_auto.py b/transformers/modeling_tf_auto.py index df0ad6e401d..fac92eb8668 100644 --- a/transformers/modeling_tf_auto.py +++ b/transformers/modeling_tf_auto.py @@ -81,6 +81,7 @@ class TFAutoModel(object): pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument. @@ -109,6 +110,9 @@ class TFAutoModel(object): force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. + resume_download: (`optional`) boolean, default False: + Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. + proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. @@ -209,6 +213,7 @@ class TFAutoModelWithLMHead(object): pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument. @@ -237,6 +242,9 @@ class TFAutoModelWithLMHead(object): force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. + resume_download: (`optional`) boolean, default False: + Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. + proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. @@ -332,6 +340,7 @@ class TFAutoModelForSequenceClassification(object): pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument. @@ -360,6 +369,9 @@ class TFAutoModelForSequenceClassification(object): force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. + resume_download: (`optional`) boolean, default False: + Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. + proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. @@ -444,6 +456,7 @@ class TFAutoModelForQuestionAnswering(object): pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `PyTorch, TF 1.X or TF 2.0 checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In the case of a PyTorch checkpoint, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument. @@ -472,6 +485,9 @@ class TFAutoModelForQuestionAnswering(object): force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. + resume_download: (`optional`) boolean, default False: + Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. + proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. diff --git a/transformers/modeling_tf_bert.py b/transformers/modeling_tf_bert.py index afe9b2946b0..7cc71f50633 100644 --- a/transformers/modeling_tf_bert.py +++ b/transformers/modeling_tf_bert.py @@ -28,7 +28,7 @@ import numpy as np import tensorflow as tf from .configuration_bert import BertConfig -from .modeling_tf_utils import TFPreTrainedModel, get_initializer +from .modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list from .file_utils import add_start_docstrings logger = logging.getLogger(__name__) @@ -48,6 +48,10 @@ TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP = { 'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-tf_model.h5", 'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-tf_model.h5", 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-tf_model.h5", + 'bert-base-japanese': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-tf_model.h5", + 'bert-base-japanese-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking-tf_model.h5", + 'bert-base-japanese-char': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-tf_model.h5", + 'bert-base-japanese-char-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking-tf_model.h5" } @@ -129,7 +133,7 @@ class TFBertEmbeddings(tf.keras.layers.Layer): linear tensor, float32 with shape [batch_size, length, vocab_size]. Raises: ValueError: if mode is not valid. - + Shared weights logic adapted from https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ @@ -142,19 +146,25 @@ class TFBertEmbeddings(tf.keras.layers.Layer): def _embedding(self, inputs, training=False): """Applies embedding based on inputs tensor.""" - input_ids, position_ids, token_type_ids = inputs + input_ids, position_ids, token_type_ids, inputs_embeds = inputs - seq_length = tf.shape(input_ids)[1] + if input_ids is not None: + input_shape = shape_list(input_ids) + else: + input_shape = shape_list(inputs_embeds)[:-1] + + seq_length = input_shape[1] if position_ids is None: position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :] if token_type_ids is None: - token_type_ids = tf.fill(tf.shape(input_ids), 0) + token_type_ids = tf.fill(input_shape, 0) - words_embeddings = tf.gather(self.word_embeddings, input_ids) + if inputs_embeds is None: + inputs_embeds = tf.gather(self.word_embeddings, input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) - embeddings = words_embeddings + position_embeddings + token_type_embeddings + embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings, training=training) return embeddings @@ -166,8 +176,8 @@ class TFBertEmbeddings(tf.keras.layers.Layer): Returns: float32 tensor with shape [batch_size, length, vocab_size]. """ - batch_size = tf.shape(inputs)[0] - length = tf.shape(inputs)[1] + batch_size = shape_list(inputs)[0] + length = shape_list(inputs)[1] x = tf.reshape(inputs, [-1, self.hidden_size]) logits = tf.matmul(x, self.word_embeddings, transpose_b=True) @@ -208,7 +218,7 @@ class TFBertSelfAttention(tf.keras.layers.Layer): def call(self, inputs, training=False): hidden_states, attention_mask, head_mask = inputs - batch_size = tf.shape(hidden_states)[0] + batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) @@ -219,7 +229,7 @@ class TFBertSelfAttention(tf.keras.layers.Layer): # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) # (batch size, num_heads, seq_len_q, seq_len_k) - dk = tf.cast(tf.shape(key_layer)[-1], tf.float32) # scale attention_scores + dk = tf.cast(shape_list(key_layer)[-1], tf.float32) # scale attention_scores attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: @@ -240,7 +250,7 @@ class TFBertSelfAttention(tf.keras.layers.Layer): context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) - context_layer = tf.reshape(context_layer, + context_layer = tf.reshape(context_layer, (batch_size, -1, self.all_head_size)) # (batch_size, seq_len_q, all_head_size) outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,) @@ -460,6 +470,9 @@ class TFBertMainLayer(tf.keras.layers.Layer): self.encoder = TFBertEncoder(config, name='encoder') self.pooler = TFBertPooler(config, name='pooler') + def get_input_embeddings(self): + return self.embeddings + def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError @@ -470,28 +483,39 @@ class TFBertMainLayer(tf.keras.layers.Layer): """ raise NotImplementedError - def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, training=False): + def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids position_ids = inputs[3] if len(inputs) > 3 else position_ids head_mask = inputs[4] if len(inputs) > 4 else head_mask - assert len(inputs) <= 5, "Too many inputs." + inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds + assert len(inputs) <= 6, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') attention_mask = inputs.get('attention_mask', attention_mask) token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) - assert len(inputs) <= 5, "Too many inputs." + inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) + assert len(inputs) <= 6, "Too many inputs." else: input_ids = inputs + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + if attention_mask is None: - attention_mask = tf.fill(tf.shape(input_ids), 1) + attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: - token_type_ids = tf.fill(tf.shape(input_ids), 0) + token_type_ids = tf.fill(input_shape, 0) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] @@ -520,7 +544,7 @@ class TFBertMainLayer(tf.keras.layers.Layer): head_mask = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) - embedding_output = self.embeddings([input_ids, position_ids, token_type_ids], training=training) + embedding_output = self.embeddings([input_ids, position_ids, token_type_ids, inputs_embeds], training=training) encoder_outputs = self.encoder([embedding_output, extended_attention_mask, head_mask], training=training) sequence_output = encoder_outputs[0] @@ -571,7 +595,7 @@ BERT_START_DOCSTRING = r""" The BERT model was proposed in `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})` Parameters: - config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. + config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ @@ -585,13 +609,13 @@ BERT_INPUTS_DOCSTRING = r""" (a) For sequence pairs: ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` - + ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` (b) For single sequences: ``tokens: [CLS] the dog is hairy . [SEP]`` - + ``token_type_ids: 0 0 0 0 0 0 0`` Bert is a model with absolute position embeddings so it's usually advised to pad the inputs on @@ -616,6 +640,10 @@ BERT_INPUTS_DOCSTRING = r""" Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare Bert Model transformer outputing raw hidden-states without any specific head on top.", @@ -698,6 +726,9 @@ class TFBertForPreTraining(TFBertPreTrainedModel): self.nsp = TFBertNSPHead(config, name='nsp___cls') self.mlm = TFBertMLMHead(config, self.bert.embeddings, name='mlm___cls') + def get_output_embeddings(self): + return self.bert.embeddings + def call(self, inputs, **kwargs): outputs = self.bert(inputs, **kwargs) @@ -743,6 +774,9 @@ class TFBertForMaskedLM(TFBertPreTrainedModel): self.bert = TFBertMainLayer(config, name='bert') self.mlm = TFBertMLMHead(config, self.bert.embeddings, name='mlm___cls') + def get_output_embeddings(self): + return self.bert.embeddings + def call(self, inputs, **kwargs): outputs = self.bert(inputs, **kwargs) @@ -888,33 +922,39 @@ class TFBertForMultipleChoice(TFBertPreTrainedModel): kernel_initializer=get_initializer(config.initializer_range), name='classifier') - def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, training=False): + def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids position_ids = inputs[3] if len(inputs) > 3 else position_ids head_mask = inputs[4] if len(inputs) > 4 else head_mask - assert len(inputs) <= 5, "Too many inputs." + inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds + assert len(inputs) <= 6, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') attention_mask = inputs.get('attention_mask', attention_mask) token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) - assert len(inputs) <= 5, "Too many inputs." + inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) + assert len(inputs) <= 6, "Too many inputs." else: input_ids = inputs - num_choices = tf.shape(input_ids)[1] - seq_length = tf.shape(input_ids)[2] + if input_ids is not None: + num_choices = shape_list(input_ids)[1] + seq_length = shape_list(input_ids)[2] + else: + num_choices = shape_list(inputs_embeds)[1] + seq_length = shape_list(inputs_embeds)[2] - flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) + flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None - flat_inputs = [flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask] + flat_inputs = [flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, inputs_embeds] outputs = self.bert(flat_inputs, training=training) diff --git a/transformers/modeling_tf_ctrl.py b/transformers/modeling_tf_ctrl.py index c8d181548bc..29ee5113a46 100644 --- a/transformers/modeling_tf_ctrl.py +++ b/transformers/modeling_tf_ctrl.py @@ -95,7 +95,7 @@ class TFMultiHeadAttention(tf.keras.layers.Layer): def call(self, inputs, training=False): v, k, q, mask, layer_past, attention_mask, head_mask = inputs - batch_size = q.shape[0] + batch_size = shape_list(q)[0] q = self.Wq(q) k = self.Wk(k) @@ -192,6 +192,9 @@ class TFCTRLMainLayer(tf.keras.layers.Layer): name='h_._{}'.format(i)) for i in range(config.n_layer)] self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="layernorm") + def get_input_embeddings(self): + return self.w + def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError @@ -201,7 +204,7 @@ class TFCTRLMainLayer(tf.keras.layers.Layer): """ raise NotImplementedError - def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, training=False): + def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] past = inputs[1] if len(inputs) > 1 else past @@ -209,7 +212,8 @@ class TFCTRLMainLayer(tf.keras.layers.Layer): token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids position_ids = inputs[4] if len(inputs) > 4 else position_ids head_mask = inputs[5] if len(inputs) > 5 else head_mask - assert len(inputs) <= 6, "Too many inputs." + inputs_embeds = inputs[6] if len(inputs) > 6 else inputs_embeds + assert len(inputs) <= 7, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') past = inputs.get('past', past) @@ -217,12 +221,20 @@ class TFCTRLMainLayer(tf.keras.layers.Layer): token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) - assert len(inputs) <= 6, "Too many inputs." + inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) + assert len(inputs) <= 7, "Too many inputs." else: input_ids = inputs - input_shape = shape_list(input_ids) - input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") if past is None: past_length = 0 @@ -230,8 +242,8 @@ class TFCTRLMainLayer(tf.keras.layers.Layer): else: past_length = shape_list(past[0][0])[-2] if position_ids is None: - position_ids = tf.range(past_length, shape_list(input_ids)[-1] + past_length, dtype=tf.int32)[tf.newaxis, :] - position_ids = tf.tile(position_ids, [shape_list(input_ids)[0], 1]) + position_ids = tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32)[tf.newaxis, :] + position_ids = tf.tile(position_ids, [input_shape[0], 1]) # Attention mask. if attention_mask is not None: @@ -270,8 +282,8 @@ class TFCTRLMainLayer(tf.keras.layers.Layer): token_type_embeds = 0 position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) - inputs_embeds = self.w(input_ids, mode='embedding') - # x = embedded.unsqueeze(0) if len(input_ids.shape)<2 else embedded + if inputs_embeds is None: + inputs_embeds = self.w(input_ids, mode='embedding') seq_len = input_shape[-1] mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0) @@ -374,6 +386,10 @@ CTRL_INPUTS_DOCSTRING = r""" Inputs: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.", @@ -384,7 +400,7 @@ class TFCTRLModel(TFCTRLPreTrainedModel): **last_hidden_state**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the last layer of the model. **past**: - list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + list of ``tf.Tensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) @@ -446,7 +462,7 @@ class TFCTRLLMHeadModel(TFCTRLPreTrainedModel): **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **past**: - list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + list of ``tf.Tensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) @@ -476,6 +492,9 @@ class TFCTRLLMHeadModel(TFCTRLPreTrainedModel): self.lm_head = TFCTRLLMHead(config, self.transformer.w, name="lm_head") + def get_output_embeddings(self): + return self.lm_head.input_embeddings + def call(self, inputs, **kwargs): transformer_outputs = self.transformer(inputs, **kwargs) hidden_states = transformer_outputs[0] diff --git a/transformers/modeling_tf_distilbert.py b/transformers/modeling_tf_distilbert.py index 188394816ed..afd88d7ebf8 100644 --- a/transformers/modeling_tf_distilbert.py +++ b/transformers/modeling_tf_distilbert.py @@ -37,7 +37,8 @@ logger = logging.getLogger(__name__) TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP = { 'distilbert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-tf_model.h5", - 'distilbert-base-uncased-distilled-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-distilled-squad-tf_model.h5" + 'distilbert-base-uncased-distilled-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-distilled-squad-tf_model.h5", + 'distilbert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-multilingual-cased-tf_model.h5", } @@ -96,7 +97,7 @@ class TFEmbeddings(tf.keras.layers.Layer): initializer=get_initializer(self.initializer_range)) super(TFEmbeddings, self).build(input_shape) - def call(self, inputs, mode="embedding", training=False): + def call(self, inputs, inputs_embeds=None, mode="embedding", training=False): """Get token embeddings of inputs. Args: inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids) @@ -112,13 +113,13 @@ class TFEmbeddings(tf.keras.layers.Layer): https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24 """ if mode == "embedding": - return self._embedding(inputs, training=training) + return self._embedding(inputs, inputs_embeds=inputs_embeds, training=training) elif mode == "linear": return self._linear(inputs) else: raise ValueError("mode {} is not valid.".format(mode)) - def _embedding(self, inputs, training=False): + def _embedding(self, inputs, inputs_embeds=None, training=False): """ Parameters ---------- @@ -136,14 +137,19 @@ class TFEmbeddings(tf.keras.layers.Layer): else: input_ids, position_ids = inputs - seq_length = tf.shape(input_ids)[1] + if input_ids is not None: + seq_length = shape_list(input_ids)[1] + else: + seq_length = shape_list(inputs_embeds)[1] + if position_ids is None: position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :] - word_embeddings = tf.gather(self.word_embeddings, input_ids) + if inputs_embeds is None: + inputs_embeds = tf.gather(self.word_embeddings, input_ids) position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim) - embeddings = word_embeddings + position_embeddings # (bs, max_seq_length, dim) + embeddings = inputs_embeds + position_embeddings # (bs, max_seq_length, dim) embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim) embeddings = self.dropout(embeddings, training=training) # (bs, max_seq_length, dim) return embeddings @@ -155,8 +161,8 @@ class TFEmbeddings(tf.keras.layers.Layer): Returns: float32 tensor with shape [batch_size, length, vocab_size]. """ - batch_size = tf.shape(inputs)[0] - length = tf.shape(inputs)[1] + batch_size = shape_list(inputs)[0] + length = shape_list(inputs)[1] x = tf.reshape(inputs, [-1, self.dim]) logits = tf.matmul(x, self.word_embeddings, transpose_b=True) @@ -398,28 +404,42 @@ class TFDistilBertMainLayer(tf.keras.layers.Layer): self.embeddings = TFEmbeddings(config, name="embeddings") # Embeddings self.transformer = TFTransformer(config, name="transformer") # Encoder + def get_input_embeddings(self): + return self.embeddings + def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError def _prune_heads(self, heads_to_prune): raise NotImplementedError - def call(self, inputs, attention_mask=None, head_mask=None, training=False): + def call(self, inputs, attention_mask=None, head_mask=None, inputs_embeds=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask head_mask = inputs[2] if len(inputs) > 2 else head_mask - assert len(inputs) <= 3, "Too many inputs." + inputs_embeds = inputs[3] if len(inputs) > 3 else inputs_embeds + assert len(inputs) <= 4, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') attention_mask = inputs.get('attention_mask', attention_mask) head_mask = inputs.get('head_mask', head_mask) - assert len(inputs) <= 3, "Too many inputs." + inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) + assert len(inputs) <= 4, "Too many inputs." else: input_ids = inputs + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + if attention_mask is None: - attention_mask = tf.ones(shape_list(input_ids)) # (bs, seq_length) + attention_mask = tf.ones(input_shape) # (bs, seq_length) attention_mask = tf.cast(attention_mask, dtype=tf.float32) # Prepare head mask if needed @@ -432,7 +452,7 @@ class TFDistilBertMainLayer(tf.keras.layers.Layer): else: head_mask = [None] * self.num_hidden_layers - embedding_output = self.embeddings(input_ids) # (bs, seq_length, dim) + embedding_output = self.embeddings(input_ids, inputs_embeds=inputs_embeds) # (bs, seq_length, dim) tfmr_output = self.transformer([embedding_output, attention_mask, head_mask], training=training) return tfmr_output # last-layer hidden-state, (all hidden_states), (all attentions) @@ -508,6 +528,10 @@ DISTILBERT_INPUTS_DOCSTRING = r""" Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare DistilBERT encoder/transformer outputing raw hidden-states without any specific head on top.", @@ -609,6 +633,9 @@ class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel): self.vocab_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-12, name="vocab_layer_norm") self.vocab_projector = TFDistilBertLMHead(config, self.distilbert.embeddings, name="vocab_projector") + def get_output_embeddings(self): + return self.vocab_projector.input_embeddings + def call(self, inputs, **kwargs): distilbert_output = self.distilbert(inputs, **kwargs) @@ -677,6 +704,53 @@ class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel): return outputs # logits, (hidden_states), (attentions) +@add_start_docstrings("""DistilBert Model with a token classification head on top (a linear layer on top of + the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, + DISTILBERT_START_DOCSTRING, DISTILBERT_INPUTS_DOCSTRING) +class TFDistilBertForTokenClassification(TFDistilBertPreTrainedModel): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` + Classification scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + Examples:: + import tensorflow as tf + from transformers import DistilBertTokenizer, TFDistilBertForTokenClassification + tokenizer = DistilBertTokenizer.from_pretrained('bert-base-uncased') + model = TFDistilBertForTokenClassification.from_pretrained('bert-base-uncased') + input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 + outputs = model(input_ids) + scores = outputs[0] + """ + def __init__(self, config, *inputs, **kwargs): + super(TFDistilBertForTokenClassification, self).__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + + self.distilbert = TFDistilBertMainLayer(config, name='distilbert') + self.dropout = tf.keras.layers.Dropout(config.dropout) + self.classifier = tf.keras.layers.Dense(config.num_labels, + kernel_initializer=get_initializer(config.initializer_range), + name='classifier') + + def call(self, inputs, **kwargs): + outputs = self.distilbert(inputs, **kwargs) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output, training=kwargs.get('training', False)) + logits = self.classifier(sequence_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + + return outputs # scores, (hidden_states), (attentions) + + @add_start_docstrings("""DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, DISTILBERT_START_DOCSTRING, DISTILBERT_INPUTS_DOCSTRING) diff --git a/transformers/modeling_tf_gpt2.py b/transformers/modeling_tf_gpt2.py index 4188b273ba7..c738e5e8e38 100644 --- a/transformers/modeling_tf_gpt2.py +++ b/transformers/modeling_tf_gpt2.py @@ -92,7 +92,7 @@ class TFAttention(tf.keras.layers.Layer): # q, k, v have shape [batch, heads, sequence, features] w = tf.matmul(q, k, transpose_b=True) if self.scale: - dk = tf.cast(tf.shape(k)[-1], tf.float32) # scale attention_scores + dk = tf.cast(shape_list(k)[-1], tf.float32) # scale attention_scores w = w / tf.math.sqrt(dk) # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst. @@ -219,6 +219,9 @@ class TFGPT2MainLayer(tf.keras.layers.Layer): name='h_._{}'.format(i)) for i in range(config.n_layer)] self.ln_f = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_f') + def get_input_embeddings(self): + return self.wte + def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError @@ -228,7 +231,7 @@ class TFGPT2MainLayer(tf.keras.layers.Layer): """ raise NotImplementedError - def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, training=False): + def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] past = inputs[1] if len(inputs) > 1 else past @@ -236,7 +239,8 @@ class TFGPT2MainLayer(tf.keras.layers.Layer): token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids position_ids = inputs[4] if len(inputs) > 4 else position_ids head_mask = inputs[5] if len(inputs) > 5 else head_mask - assert len(inputs) <= 6, "Too many inputs." + inputs_embeds = inputs[6] if len(inputs) > 6 else inputs_embeds + assert len(inputs) <= 7, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') past = inputs.get('past', past) @@ -244,17 +248,28 @@ class TFGPT2MainLayer(tf.keras.layers.Layer): token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) - assert len(inputs) <= 6, "Too many inputs." + inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) + assert len(inputs) <= 7, "Too many inputs." else: input_ids = inputs + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + if past is None: past_length = 0 past = [None] * len(self.h) else: past_length = shape_list(past[0][0])[-2] if position_ids is None: - position_ids = tf.range(past_length, shape_list(input_ids)[-1] + past_length, dtype=tf.int32)[tf.newaxis, :] + position_ids = tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32)[tf.newaxis, :] if attention_mask is not None: # We create a 3D attention mask from a 2D tensor mask. @@ -286,11 +301,10 @@ class TFGPT2MainLayer(tf.keras.layers.Layer): head_mask = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) - input_shape = shape_list(input_ids) - input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) - inputs_embeds = self.wte(input_ids, mode='embedding') + if inputs_embeds is None: + inputs_embeds = self.wte(input_ids, mode='embedding') position_embeds = self.wpe(position_ids) if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) @@ -408,6 +422,10 @@ GPT2_INPUTS_DOCSTRING = r""" Inputs: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare GPT2 Model transformer outputing raw hidden-states without any specific head on top.", @@ -418,7 +436,7 @@ class TFGPT2Model(TFGPT2PreTrainedModel): **last_hidden_state**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, hidden_size)`` Sequence of hidden-states at the last layer of the model. **past**: - list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + list of ``tf.Tensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) @@ -458,7 +476,7 @@ class TFGPT2LMHeadModel(TFGPT2PreTrainedModel): **prediction_scores**: `tf.Tensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **past**: - list of `tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + list of `tf.Tensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) @@ -486,6 +504,9 @@ class TFGPT2LMHeadModel(TFGPT2PreTrainedModel): super(TFGPT2LMHeadModel, self).__init__(config, *inputs, **kwargs) self.transformer = TFGPT2MainLayer(config, name='transformer') + def get_output_embeddings(self): + return self.transformer.wte + def call(self, inputs, **kwargs): transformer_outputs = self.transformer(inputs, **kwargs) hidden_states = transformer_outputs[0] @@ -514,7 +535,7 @@ class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel): **mc_prediction_scores**: `tf.Tensor`` of shape ``(batch_size, num_choices)`` Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax). **past**: - list of `tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + list of `tf.Tensor`` (one for each layer) of shape ``(2, batch_size, num_heads, sequence_length, embed_size_per_head)``: that contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `past` input) to speed up sequential decoding. **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) @@ -556,7 +577,10 @@ class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel): self.transformer = TFGPT2MainLayer(config, name='transformer') self.multiple_choice_head = TFSequenceSummary(config, initializer_range=config.initializer_range, name='multiple_choice_head') - def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, mc_token_ids=None, training=False): + def get_output_embeddings(self): + return self.transformer.wte + + def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, mc_token_ids=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] past = inputs[1] if len(inputs) > 1 else past @@ -564,8 +588,9 @@ class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel): token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids position_ids = inputs[4] if len(inputs) > 4 else position_ids head_mask = inputs[5] if len(inputs) > 5 else head_mask - mc_token_ids = inputs[6] if len(inputs) > 6 else mc_token_ids - assert len(inputs) <= 7, "Too many inputs." + inputs_embeds = inputs[6] if len(inputs) > 6 else inputs_embeds + mc_token_ids = inputs[7] if len(inputs) > 7 else mc_token_ids + assert len(inputs) <= 8, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') past = inputs.get('past', past) @@ -573,21 +598,25 @@ class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel): token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) + inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) mc_token_ids = inputs.get('mc_token_ids', mc_token_ids) - assert len(inputs) <= 7, "Too many inputs." + assert len(inputs) <= 8, "Too many inputs." else: input_ids = inputs - input_shapes = shape_list(input_ids) + if input_ids is not None: + input_shapes = shape_list(input_ids) + else: + input_shapes = shape_list(inputs_embeds)[:-1] seq_length = input_shapes[-1] - flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) + flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None - flat_inputs = [flat_input_ids, past, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask] + flat_inputs = [flat_input_ids, past, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, inputs_embeds] transformer_outputs = self.transformer(flat_inputs, training=training) hidden_states = transformer_outputs[0] diff --git a/transformers/modeling_tf_openai.py b/transformers/modeling_tf_openai.py index 747c5171fd3..dac3b175908 100644 --- a/transformers/modeling_tf_openai.py +++ b/transformers/modeling_tf_openai.py @@ -98,7 +98,7 @@ class TFAttention(tf.keras.layers.Layer): # q, k, v have shape [batch, heads, sequence, features] w = tf.matmul(q, k, transpose_b=True) if self.scale: - dk = tf.cast(tf.shape(k)[-1], tf.float32) # scale attention_scores + dk = tf.cast(shape_list(k)[-1], tf.float32) # scale attention_scores w = w / tf.math.sqrt(dk) # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst. @@ -217,6 +217,9 @@ class TFOpenAIGPTMainLayer(tf.keras.layers.Layer): scale=True, name='h_._{}'.format(i)) for i in range(config.n_layer)] + def get_input_embeddings(self): + return self.tokens_embed + def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError @@ -226,26 +229,38 @@ class TFOpenAIGPTMainLayer(tf.keras.layers.Layer): """ raise NotImplementedError - def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, training=False): + def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids position_ids = inputs[3] if len(inputs) > 3 else position_ids head_mask = inputs[4] if len(inputs) > 4 else head_mask - assert len(inputs) <= 5, "Too many inputs." + inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds + assert len(inputs) <= 6, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') attention_mask = inputs.get('attention_mask', attention_mask) token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) - assert len(inputs) <= 5, "Too many inputs." + inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) + assert len(inputs) <= 6, "Too many inputs." else: input_ids = inputs + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + if position_ids is None: - position_ids = tf.range(shape_list(input_ids)[-1], dtype=tf.int32)[tf.newaxis, :] + position_ids = tf.range(input_shape[-1], dtype=tf.int32)[tf.newaxis, :] if attention_mask is not None: # We create a 3D attention mask from a 2D tensor mask. @@ -277,11 +292,10 @@ class TFOpenAIGPTMainLayer(tf.keras.layers.Layer): head_mask = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) - input_shape = shape_list(input_ids) - input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) - inputs_embeds = self.tokens_embed(input_ids, mode='embedding') + if inputs_embeds is None: + inputs_embeds = self.tokens_embed(input_ids, mode='embedding') position_embeds = self.positions_embed(position_ids) if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) @@ -389,6 +403,10 @@ OPENAI_GPT_INPUTS_DOCSTRING = r""" Inputs: Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare OpenAI GPT transformer model outputing raw hidden-states without any specific head on top.", @@ -458,6 +476,9 @@ class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel): super(TFOpenAIGPTLMHeadModel, self).__init__(config, *inputs, **kwargs) self.transformer = TFOpenAIGPTMainLayer(config, name='transformer') + def get_output_embeddings(self): + return self.transformer.tokens_embed + def call(self, inputs, **kwargs): transformer_outputs = self.transformer(inputs, **kwargs) hidden_states = transformer_outputs[0] @@ -520,36 +541,44 @@ class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel): self.transformer = TFOpenAIGPTMainLayer(config, name='transformer') self.multiple_choice_head = TFSequenceSummary(config, initializer_range=config.initializer_range, name='multiple_choice_head') - def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, mc_token_ids=None, training=False): + def get_output_embeddings(self): + return self.transformer.tokens_embed + + def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, mc_token_ids=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids position_ids = inputs[3] if len(inputs) > 3 else position_ids head_mask = inputs[4] if len(inputs) > 4 else head_mask - mc_token_ids = inputs[5] if len(inputs) > 5 else mc_token_ids - assert len(inputs) <= 6, "Too many inputs." + inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds + mc_token_ids = inputs[6] if len(inputs) > 6 else mc_token_ids + assert len(inputs) <= 7, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') attention_mask = inputs.get('attention_mask', attention_mask) token_type_ids = inputs.get('token_type_ids', token_type_ids) position_ids = inputs.get('position_ids', position_ids) head_mask = inputs.get('head_mask', head_mask) + inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) mc_token_ids = inputs.get('mc_token_ids', mc_token_ids) - assert len(inputs) <= 6, "Too many inputs." + assert len(inputs) <= 7, "Too many inputs." else: input_ids = inputs - input_shapes = shape_list(input_ids) + if input_ids is not None: + input_shapes = shape_list(input_ids) + else: + input_shapes = shape_list(inputs_embeds)[:-1] seq_length = input_shapes[-1] - flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) + flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None - flat_inputs = [flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask] + flat_inputs = [flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, inputs_embeds] transformer_outputs = self.transformer(flat_inputs, training=training) hidden_states = transformer_outputs[0] diff --git a/transformers/modeling_tf_pytorch_utils.py b/transformers/modeling_tf_pytorch_utils.py index 88ce4d46102..aa74fcc10ea 100644 --- a/transformers/modeling_tf_pytorch_utils.py +++ b/transformers/modeling_tf_pytorch_utils.py @@ -118,6 +118,9 @@ def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, a new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') + # DialoGPT format + if key == 'lm_head.decoder.weight': + new_key = 'lm_head.weight' if new_key: old_keys.append(key) new_keys.append(new_key) diff --git a/transformers/modeling_tf_roberta.py b/transformers/modeling_tf_roberta.py index 244c83f2b3f..954279f873f 100644 --- a/transformers/modeling_tf_roberta.py +++ b/transformers/modeling_tf_roberta.py @@ -24,7 +24,7 @@ import numpy as np import tensorflow as tf from .configuration_roberta import RobertaConfig -from .modeling_tf_utils import TFPreTrainedModel, get_initializer +from .modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list from .file_utils import add_start_docstrings from .modeling_tf_bert import TFBertEmbeddings, TFBertMainLayer, gelu, gelu_new @@ -48,13 +48,17 @@ class TFRobertaEmbeddings(TFBertEmbeddings): def _embedding(self, inputs, training=False): """Applies embedding based on inputs tensor.""" - input_ids, position_ids, token_type_ids = inputs + input_ids, position_ids, token_type_ids, inputs_embeds = inputs + + if input_ids is not None: + seq_length = shape_list(input_ids)[1] + else: + seq_length = shape_list(inputs_embeds)[1] - seq_length = tf.shape(input_ids)[1] if position_ids is None: position_ids = tf.range(self.padding_idx+1, seq_length+self.padding_idx+1, dtype=tf.int32)[tf.newaxis, :] - return super(TFRobertaEmbeddings, self)._embedding([input_ids, position_ids, token_type_ids], training=training) + return super(TFRobertaEmbeddings, self)._embedding([input_ids, position_ids, token_type_ids, inputs_embeds], training=training) class TFRobertaMainLayer(TFBertMainLayer): @@ -65,21 +69,8 @@ class TFRobertaMainLayer(TFBertMainLayer): super(TFRobertaMainLayer, self).__init__(config, **kwargs) self.embeddings = TFRobertaEmbeddings(config, name='embeddings') - def call(self, inputs, **kwargs): - # Check that input_ids starts with control token - if isinstance(inputs, (tuple, list)): - input_ids = inputs[0] - elif isinstance(inputs, dict): - input_ids = inputs.get('input_ids') - else: - input_ids = inputs - - if tf.not_equal(tf.reduce_sum(input_ids[:, 0]), 0): - tf.print("A sequence with no special tokens has been passed to the RoBERTa model. " - "This model requires special tokens in order to work. " - "Please specify add_special_tokens=True in your encoding.") - - return super(TFRobertaMainLayer, self).call(inputs, **kwargs) + def get_input_embeddings(self): + return self.embeddings class TFRobertaPreTrainedModel(TFPreTrainedModel): @@ -173,6 +164,10 @@ ROBERTA_INPUTS_DOCSTRING = r""" Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare RoBERTa Model transformer outputing raw hidden-states without any specific head on top.", @@ -292,6 +287,9 @@ class TFRobertaForMaskedLM(TFRobertaPreTrainedModel): self.roberta = TFRobertaMainLayer(config, name="roberta") self.lm_head = TFRobertaLMHead(config, self.roberta.embeddings, name="lm_head") + def get_output_embeddings(self): + return self.lm_head.decoder + def call(self, inputs, **kwargs): outputs = self.roberta(inputs, **kwargs) @@ -371,3 +369,54 @@ class TFRobertaForSequenceClassification(TFRobertaPreTrainedModel): outputs = (logits,) + outputs[2:] return outputs # logits, (hidden_states), (attentions) + + +@add_start_docstrings("""RoBERTa Model with a token classification head on top (a linear layer on top of + the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, + ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING) +class TFRobertaForTokenClassification(TFRobertaPreTrainedModel): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **scores**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` + Classification scores (before SoftMax). + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``Numpy array`` or ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + import tensorflow as tf + from transformers import RobertaTokenizer, TFRobertaForTokenClassification + + tokenizer = RobertaTokenizer.from_pretrained('roberta-base') + model = TFRobertaForTokenClassification.from_pretrained('roberta-base') + input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1 + outputs = model(input_ids) + scores = outputs[0] + + """ + def __init__(self, config, *inputs, **kwargs): + super(TFRobertaForTokenClassification, self).__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + + self.roberta = TFRobertaMainLayer(config, name='roberta') + self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.classifier = tf.keras.layers.Dense(config.num_labels, + kernel_initializer=get_initializer(config.initializer_range), + name='classifier') + + def call(self, inputs, **kwargs): + outputs = self.roberta(inputs, **kwargs) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output, training=kwargs.get('training', False)) + logits = self.classifier(sequence_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + + return outputs # scores, (hidden_states), (attentions) diff --git a/transformers/modeling_tf_transfo_xl.py b/transformers/modeling_tf_transfo_xl.py index a3e403ce06d..fd325e218e7 100644 --- a/transformers/modeling_tf_transfo_xl.py +++ b/transformers/modeling_tf_transfo_xl.py @@ -337,7 +337,7 @@ class TFAdaptiveEmbedding(tf.keras.layers.Layer): emb_i = tf.einsum('id,de->ie', emb_i, self.emb_projs[i]) mask_idx = tf.cast(tf.where(mask_i), dtype=tf.int64) - emb_flat += tf.scatter_nd(mask_idx, emb_i, tf.cast(tf.shape(emb_flat), dtype=tf.int64)) + emb_flat += tf.scatter_nd(mask_idx, emb_i, tf.cast(shape_list(emb_flat), dtype=tf.int64)) embed_shape = shape_list(inp) + [self.d_proj] embed = tf.reshape(emb_flat, embed_shape) @@ -413,6 +413,9 @@ class TFTransfoXLMainLayer(tf.keras.layers.Layer): name='r_r_bias') super(TFTransfoXLMainLayer, self).build(input_shape) + def get_input_embeddings(self): + return self.word_emb + def _resize_token_embeddings(self, new_num_tokens): return self.word_emb @@ -427,11 +430,11 @@ class TFTransfoXLMainLayer(tf.keras.layers.Layer): def _prune_heads(self, heads): raise NotImplementedError - def init_mems(self, data): + def init_mems(self, bsz): if self.mem_len > 0: mems = [] for i in range(self.n_layer): - empty = tf.zeros([self.mem_len, shape_list(data)[1], self.d_model]) + empty = tf.zeros([self.mem_len, bsz, self.d_model]) mems.append(empty) return mems @@ -461,28 +464,37 @@ class TFTransfoXLMainLayer(tf.keras.layers.Layer): return new_mems - def call(self, inputs, mems=None, head_mask=None, training=False): + def call(self, inputs, mems=None, head_mask=None, inputs_embeds=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] mems = inputs[1] if len(inputs) > 1 else mems head_mask = inputs[2] if len(inputs) > 2 else head_mask - assert len(inputs) <= 3, "Too many inputs." + inputs_embeds = inputs[3] if len(inputs) > 3 else inputs_embeds + assert len(inputs) <= 4, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') mems = inputs.get('mems', mems) head_mask = inputs.get('head_mask', head_mask) - assert len(inputs) <= 3, "Too many inputs." + inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) + assert len(inputs) <= 4, "Too many inputs." else: input_ids = inputs # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library # so we transpose here from shape [bsz, len] to shape [len, bsz] - input_ids = tf.transpose(input_ids, perm=(1, 0)) + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_ids = tf.transpose(input_ids, perm=(1, 0)) + qlen, bsz = shape_list(input_ids) + elif inputs_embeds is not None: + inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2)) + qlen, bsz = shape_list(inputs_embeds)[:2] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") if mems is None: - mems = self.init_mems(input_ids) - - qlen, bsz = shape_list(input_ids) + mems = self.init_mems(bsz) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head @@ -494,7 +506,10 @@ class TFTransfoXLMainLayer(tf.keras.layers.Layer): else: head_mask = [None] * self.n_layer - word_emb = self.word_emb(input_ids) + if inputs_embeds is not None: + word_emb = inputs_embeds + else: + word_emb = self.word_emb(input_ids) mlen = shape_list(mems[0])[0] if mems is not None else 0 klen = mlen + qlen @@ -626,6 +641,10 @@ TRANSFO_XL_INPUTS_DOCSTRING = r""" Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare Bert Model transformer outputing raw hidden-states without any specific head on top.", @@ -716,28 +735,33 @@ class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel): def reset_length(self, tgt_len, ext_len, mem_len): self.transformer.reset_length(tgt_len, ext_len, mem_len) - def init_mems(self, data): - return self.transformer.init_mems(data) + def init_mems(self, bsz): + return self.transformer.init_mems(bsz) - def call(self, inputs, mems=None, head_mask=None, labels=None, training=False): + def call(self, inputs, mems=None, head_mask=None, inputs_embeds=None, labels=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] mems = inputs[1] if len(inputs) > 1 else mems head_mask = inputs[2] if len(inputs) > 2 else head_mask - labels = inputs[3] if len(inputs) > 3 else labels - assert len(inputs) <= 4, "Too many inputs." + inputs_embeds = inputs[3] if len(inputs) > 3 else inputs_embeds + labels = inputs[4] if len(inputs) > 4 else labels + assert len(inputs) <= 5, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') mems = inputs.get('mems', mems) head_mask = inputs.get('head_mask', head_mask) + inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) labels = inputs.get('labels', labels) - assert len(inputs) <= 4, "Too many inputs." + assert len(inputs) <= 5, "Too many inputs." else: input_ids = inputs - bsz, tgt_len = shape_list(input_ids)[:2] + if input_ids is not None: + bsz, tgt_len = shape_list(input_ids)[:2] + else: + bsz, tgt_len = shape_list(inputs_embeds)[:2] - transformer_outputs = self.transformer([input_ids, mems, head_mask], training=training) + transformer_outputs = self.transformer([input_ids, mems, head_mask, inputs_embeds], training=training) last_hidden = transformer_outputs[0] pred_hid = last_hidden[:, -tgt_len:] diff --git a/transformers/modeling_tf_transfo_xl_utilities.py b/transformers/modeling_tf_transfo_xl_utilities.py index d7666a650e1..e6a6dfe686d 100644 --- a/transformers/modeling_tf_transfo_xl_utilities.py +++ b/transformers/modeling_tf_transfo_xl_utilities.py @@ -105,7 +105,7 @@ class TFAdaptiveSoftmaxMask(tf.keras.layers.Layer): @staticmethod def _gather_logprob(logprob, target): - lp_size = tf.shape(logprob) + lp_size = shape_list(logprob) r = tf.range(lp_size[0]) idx = tf.stack([r, target], 1) return tf.gather_nd(logprob, idx) @@ -159,7 +159,7 @@ class TFAdaptiveSoftmaxMask(tf.keras.layers.Layer): cur_logprob = self._gather_logprob(cur_tail_logprob, cur_target) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: - loss += tf.scatter_nd(mask_idx, -cur_logprob, tf.cast(tf.shape(loss), dtype=tf.int64)) + loss += tf.scatter_nd(mask_idx, -cur_logprob, tf.cast(shape_list(loss), dtype=tf.int64)) out = tf.concat(out, axis=-1) if target is not None: diff --git a/transformers/modeling_tf_utils.py b/transformers/modeling_tf_utils.py index a96e2765fd8..d9a93af21b9 100644 --- a/transformers/modeling_tf_utils.py +++ b/transformers/modeling_tf_utils.py @@ -24,7 +24,8 @@ import os import tensorflow as tf from .configuration_utils import PretrainedConfig -from .file_utils import cached_path, WEIGHTS_NAME, TF_WEIGHTS_NAME, TF2_WEIGHTS_NAME +from .file_utils import (TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, + cached_path, hf_bucket_url, is_remote_url) from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model logger = logging.getLogger(__name__) @@ -35,7 +36,7 @@ class TFPreTrainedModel(tf.keras.Model): r""" Base class for all TF models. :class:`~transformers.TFPreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models - as well as a few methods commons to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads. + as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads. Class attributes (overridden by derived classes): - ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture. @@ -51,7 +52,15 @@ class TFPreTrainedModel(tf.keras.Model): config_class = None pretrained_model_archive_map = {} base_model_prefix = "" - dummy_inputs = tf.constant(DUMMY_INPUTS) # dummy inputs to build the network + + @property + def dummy_inputs(self): + """ Dummy inputs to build the network. + + Returns: + tf.Tensor with dummy inputs + """ + return tf.constant(DUMMY_INPUTS) def __init__(self, config, *inputs, **kwargs): super(TFPreTrainedModel, self).__init__(*inputs, **kwargs) @@ -65,6 +74,21 @@ class TFPreTrainedModel(tf.keras.Model): # Save config in model self.config = config + def get_input_embeddings(self): + """ Get model's input embeddings + """ + base_model = getattr(self, self.base_model_prefix, self) + if base_model is not self: + return base_model.get_input_embeddings() + else: + raise NotImplementedError + + def get_output_embeddings(self): + """ Get model's output embeddings + Return None if the model doesn't have output embeddings + """ + return None # Overwrite for models with output embeddings + def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None): """ Build a resized Embedding Variable from a provided token Embedding Module. Increasing the size will add newly initialized vectors at the end @@ -153,6 +177,7 @@ class TFPreTrainedModel(tf.keras.Model): pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `PyTorch state_dict save file` (e.g. `./pt_model/pytorch_model.bin`). In this case, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the PyTorch checkpoint in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards. @@ -176,6 +201,9 @@ class TFPreTrainedModel(tf.keras.Model): force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. + resume_download: (`optional`) boolean, default False: + Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. + proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. @@ -201,6 +229,7 @@ class TFPreTrainedModel(tf.keras.Model): cache_dir = kwargs.pop('cache_dir', None) from_pt = kwargs.pop('from_pt', False) force_download = kwargs.pop('force_download', False) + resume_download = kwargs.pop('resume_download', False) proxies = kwargs.pop('proxies', None) # Load config @@ -209,6 +238,7 @@ class TFPreTrainedModel(tf.keras.Model): pretrained_model_name_or_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, + resume_download=resume_download, **kwargs ) else: @@ -229,14 +259,19 @@ class TFPreTrainedModel(tf.keras.Model): raise EnvironmentError("Error no file named {} found in directory {} or `from_pt` set to False".format( [WEIGHTS_NAME, TF2_WEIGHTS_NAME], pretrained_model_name_or_path)) - elif os.path.isfile(pretrained_model_name_or_path): + elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path + elif os.path.isfile(pretrained_model_name_or_path + ".index"): + archive_file = pretrained_model_name_or_path + ".index" else: - raise EnvironmentError("Error file {} not found".format(pretrained_model_name_or_path)) + archive_file = hf_bucket_url(pretrained_model_name_or_path, postfix=TF2_WEIGHTS_NAME) + if from_pt: + raise EnvironmentError("Loading a TF model from a PyTorch checkpoint is not supported when using a model identifier name.") # redirect to the cache, if necessary try: - resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies) + resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download, + resume_download=resume_download, proxies=proxies) except EnvironmentError as e: if pretrained_model_name_or_path in cls.pretrained_model_archive_map: logger.error( @@ -439,7 +474,7 @@ class TFSequenceSummary(tf.keras.layers.Layer): elif self.summary_type == 'first': output = hidden_states[:, 0] elif self.summary_type == 'mean': - output = tf.mean(hidden_states, axis=1) + output = tf.reduce_mean(hidden_states, axis=1) elif self.summary_type == 'cls_index': hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims] if cls_index is None: @@ -477,10 +512,10 @@ def shape_list(x): return [dynamic[i] if s is None else s for i, s in enumerate(static)] def get_initializer(initializer_range=0.02): - """Creates a `tf.initializers.truncated_normal` with the given range. - Args: - initializer_range: float, initializer range for stddev. - Returns: - TruncatedNormal initializer with stddev = `initializer_range`. - """ - return tf.keras.initializers.TruncatedNormal(stddev=initializer_range) + """Creates a `tf.initializers.truncated_normal` with the given range. + Args: + initializer_range: float, initializer range for stddev. + Returns: + TruncatedNormal initializer with stddev = `initializer_range`. + """ + return tf.keras.initializers.TruncatedNormal(stddev=initializer_range) diff --git a/transformers/modeling_tf_xlm.py b/transformers/modeling_tf_xlm.py index 84de1517ee3..6f11b0537df 100644 --- a/transformers/modeling_tf_xlm.py +++ b/transformers/modeling_tf_xlm.py @@ -84,7 +84,8 @@ def get_masks(slen, lengths, causal, padding_mask=None, dtype=tf.float32): attn_mask = mask # sanity check - assert shape_list(mask) == [bs, slen] + # assert shape_list(mask) == [bs, slen] + tf.debugging.assert_equal(shape_list(mask), [bs, slen]) assert causal is False or shape_list(attn_mask) == [bs, slen, slen] mask = tf.cast(mask, dtype=dtype) @@ -276,6 +277,9 @@ class TFXLMMainLayer(tf.keras.layers.Layer): self.prune_heads({int(layer): list(map(int, heads))}) + def get_input_embeddings(self): + return self.embeddings + def _resize_token_embeddings(self, new_num_tokens): raise NotImplementedError @@ -287,7 +291,7 @@ class TFXLMMainLayer(tf.keras.layers.Layer): raise NotImplementedError def call(self, inputs, attention_mask=None, langs=None, token_type_ids=None, - position_ids=None, lengths=None, cache=None, head_mask=None, + position_ids=None, lengths=None, cache=None, head_mask=None, inputs_embeds=None, training=False): # removed: src_enc=None, src_len=None if isinstance(inputs, (tuple, list)): input_ids = inputs[0] @@ -298,7 +302,8 @@ class TFXLMMainLayer(tf.keras.layers.Layer): lengths = inputs[5] if len(inputs) > 5 else lengths cache = inputs[6] if len(inputs) > 6 else cache head_mask = inputs[7] if len(inputs) > 7 else head_mask - assert len(inputs) <= 8, "Too many inputs." + inputs_embeds = inputs[8] if len(inputs) > 8 else inputs_embeds + assert len(inputs) <= 9, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') attention_mask = inputs.get('attention_mask', attention_mask) @@ -308,17 +313,30 @@ class TFXLMMainLayer(tf.keras.layers.Layer): lengths = inputs.get('lengths', lengths) cache = inputs.get('cache', cache) head_mask = inputs.get('head_mask', head_mask) - assert len(inputs) <= 8, "Too many inputs." + inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) + assert len(inputs) <= 9, "Too many inputs." else: input_ids = inputs + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + bs, slen = shape_list(input_ids) + elif inputs_embeds is not None: + bs, slen = shape_list(inputs_embeds)[:2] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + if lengths is None: - lengths = tf.reduce_sum(tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=tf.int32), axis=1) + if input_ids is not None: + lengths = tf.reduce_sum(tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=tf.int32), axis=1) + else: + lengths = tf.convert_to_tensor([slen]*bs, tf.int32) # mask = input_ids != self.pad_index # check inputs - bs, slen = shape_list(input_ids) - assert shape_list(lengths)[0] == bs + # assert shape_list(lengths)[0] == bs + tf.debugging.assert_equal(shape_list(lengths)[0], bs) # assert lengths.max().item() <= slen # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0 # assert (src_enc is None) == (src_len is None) @@ -335,12 +353,14 @@ class TFXLMMainLayer(tf.keras.layers.Layer): if position_ids is None: position_ids = tf.expand_dims(tf.range(slen), axis=0) else: - assert shape_list(position_ids) == [bs, slen] # (slen, bs) + # assert shape_list(position_ids) == [bs, slen] # (slen, bs) + tf.debugging.assert_equal(shape_list(position_ids), [bs, slen]) # position_ids = position_ids.transpose(0, 1) # langs if langs is not None: - assert shape_list(langs) == [bs, slen] # (slen, bs) + # assert shape_list(langs) == [bs, slen] # (slen, bs) + tf.debugging.assert_equal(shape_list(langs), [bs, slen]) # langs = langs.transpose(0, 1) # Prepare head mask if needed @@ -354,7 +374,7 @@ class TFXLMMainLayer(tf.keras.layers.Layer): head_mask = [None] * self.n_layers # do not recompute cached elements - if cache is not None: + if cache is not None and input_ids is not None: _slen = slen - cache['slen'] input_ids = input_ids[:, -_slen:] position_ids = position_ids[:, -_slen:] @@ -364,8 +384,10 @@ class TFXLMMainLayer(tf.keras.layers.Layer): attn_mask = attn_mask[:, -_slen:] # embeddings - tensor = self.embeddings(input_ids) - tensor = tensor + self.position_embeddings(position_ids) + if inputs_embeds is None: + inputs_embeds = self.embeddings(input_ids) + + tensor = inputs_embeds + self.position_embeddings(position_ids) if langs is not None and self.use_lang_emb: tensor = tensor + self.lang_embeddings(langs) if token_type_ids is not None: @@ -526,6 +548,10 @@ XLM_INPUTS_DOCSTRING = r""" Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare XLM Model transformer outputing raw hidden-states without any specific head on top.", @@ -633,6 +659,8 @@ class TFXLMWithLMHeadModel(TFXLMPreTrainedModel): self.transformer = TFXLMMainLayer(config, name='transformer') self.pred_layer = TFXLMPredLayer(config, self.transformer.embeddings, name='pred_layer_._proj') + def get_output_embeddings(self): + return self.pred_layer.input_embeddings def call(self, inputs, **kwargs): transformer_outputs = self.transformer(inputs, **kwargs) diff --git a/transformers/modeling_tf_xlnet.py b/transformers/modeling_tf_xlnet.py index 8a25be78c11..759b57d8351 100644 --- a/transformers/modeling_tf_xlnet.py +++ b/transformers/modeling_tf_xlnet.py @@ -112,8 +112,7 @@ class TFXLNetRelativeAttention(tf.keras.layers.Layer): def prune_heads(self, heads): raise NotImplementedError - @staticmethod - def rel_shift(x, klen=-1): + def rel_shift(self, x, klen=-1): """perform relative shift to form the relative attention score.""" x_size = shape_list(x) @@ -135,7 +134,7 @@ class TFXLNetRelativeAttention(tf.keras.layers.Layer): # position based attention score bd = tf.einsum('ibnd,jbnd->ijbn', q_head + self.r_r_bias, k_head_r) - bd = self.rel_shift(bd, klen=ac.shape[1]) + bd = self.rel_shift(bd, klen=shape_list(ac)[1]) # segment based attention score if seg_mat is None: @@ -192,7 +191,7 @@ class TFXLNetRelativeAttention(tf.keras.layers.Layer): if g is not None: ###### Two-stream attention with relative positional encoding. # content based attention score - if mems is not None and mems.shape.ndims > 1: + if mems is not None and len(shape_list(mems)) > 1: cat = tf.concat([mems, h], axis=0) else: cat = h @@ -252,7 +251,7 @@ class TFXLNetRelativeAttention(tf.keras.layers.Layer): else: ###### Multi-head attention with relative positional encoding - if mems is not None and mems.shape.ndims > 1: + if mems is not None and len(shape_list(mems)) > 1: cat = tf.concat([mems, h], axis=0) else: cat = h @@ -371,6 +370,9 @@ class TFXLNetMainLayer(tf.keras.layers.Layer): self.layer = [TFXLNetLayer(config, name='layer_._{}'.format(i)) for i in range(config.n_layer)] self.dropout = tf.keras.layers.Dropout(config.dropout) + def get_input_embeddings(self): + return self.word_embedding + def build(self, input_shape): initializer = get_initializer(self.initializer_range) self.mask_emb = self.add_weight(shape=(1, 1, self.d_model), @@ -484,7 +486,7 @@ class TFXLNetMainLayer(tf.keras.layers.Layer): return pos_emb def call(self, inputs, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, - token_type_ids=None, input_mask=None, head_mask=None, training=False): + token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, training=False): if isinstance(inputs, (tuple, list)): input_ids = inputs[0] attention_mask = inputs[1] if len(inputs) > 1 else attention_mask @@ -494,7 +496,8 @@ class TFXLNetMainLayer(tf.keras.layers.Layer): token_type_ids = inputs[5] if len(inputs) > 5 else token_type_ids input_mask = inputs[6] if len(inputs) > 6 else input_mask head_mask = inputs[7] if len(inputs) > 7 else head_mask - assert len(inputs) <= 8, "Too many inputs." + inputs_embeds = inputs[8] if len(inputs) > 8 else inputs_embeds + assert len(inputs) <= 9, "Too many inputs." elif isinstance(inputs, dict): input_ids = inputs.get('input_ids') attention_mask = inputs.get('attention_mask', attention_mask) @@ -504,7 +507,8 @@ class TFXLNetMainLayer(tf.keras.layers.Layer): token_type_ids = inputs.get('token_type_ids', token_type_ids) input_mask = inputs.get('input_mask', input_mask) head_mask = inputs.get('head_mask', head_mask) - assert len(inputs) <= 8, "Too many inputs." + inputs_embeds = inputs.get('inputs_embeds', inputs_embeds) + assert len(inputs) <= 9, "Too many inputs." else: input_ids = inputs @@ -512,14 +516,23 @@ class TFXLNetMainLayer(tf.keras.layers.Layer): # but we want a unified interface in the library with the batch size on the first dimension # so we move here the first dimension (batch) to the end - input_ids = tf.transpose(input_ids, perm=(1, 0)) + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_ids = tf.transpose(input_ids, perm=(1, 0)) + qlen, bsz = shape_list(input_ids)[:2] + elif inputs_embeds is not None: + inputs_embeds = tf.transpose(inputs_embeds, perm=(1, 0, 2)) + qlen, bsz = shape_list(inputs_embeds)[:2] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + token_type_ids = tf.transpose(token_type_ids, perm=(1, 0)) if token_type_ids is not None else None input_mask = tf.transpose(input_mask, perm=(1, 0)) if input_mask is not None else None attention_mask = tf.transpose(attention_mask, perm=(1, 0)) if attention_mask is not None else None perm_mask = tf.transpose(perm_mask, perm=(1, 2, 0)) if perm_mask is not None else None target_mapping = tf.transpose(target_mapping, perm=(1, 2, 0)) if target_mapping is not None else None - qlen, bsz = shape_list(input_ids)[:2] mlen = shape_list(mems[0])[0] if mems is not None and mems[0] is not None else 0 klen = mlen + qlen @@ -551,7 +564,7 @@ class TFXLNetMainLayer(tf.keras.layers.Layer): if data_mask is not None: # all mems can be attended to - mems_mask = tf.zeros([tf.shape(data_mask)[0], mlen, bsz], + mems_mask = tf.zeros([shape_list(data_mask)[0], mlen, bsz], dtype=dtype_float) data_mask = tf.concat([mems_mask, data_mask], axis=1) if attn_mask is None: @@ -570,10 +583,13 @@ class TFXLNetMainLayer(tf.keras.layers.Layer): non_tgt_mask = None ##### Word embeddings and prepare h & g hidden states - word_emb_k = self.word_embedding(input_ids) + if inputs_embeds is not None: + word_emb_k = inputs_embeds + else: + word_emb_k = self.word_embedding(input_ids) output_h = self.dropout(word_emb_k, training=training) if target_mapping is not None: - word_emb_q = tf.tile(self.mask_emb, [tf.shape(target_mapping)[0], bsz, 1]) + word_emb_q = tf.tile(self.mask_emb, [shape_list(target_mapping)[0], bsz, 1]) # else: # We removed the inp_q input which was same as target mapping # inp_q_ext = inp_q[:, :, None] # word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k @@ -762,6 +778,10 @@ XLNET_INPUTS_DOCSTRING = r""" Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare XLNet Model transformer outputing raw hidden-states without any specific head on top.", @@ -850,6 +870,9 @@ class TFXLNetLMHeadModel(TFXLNetPreTrainedModel): self.transformer = TFXLNetMainLayer(config, name='transformer') self.lm_loss = TFXLNetLMHead(config, self.transformer.word_embedding, name='lm_loss') + def get_output_embeddings(self): + return self.lm_loss.input_embeddings + def call(self, inputs, **kwargs): transformer_outputs = self.transformer(inputs, **kwargs) hidden_state = transformer_outputs[0] @@ -915,6 +938,59 @@ class TFXLNetForSequenceClassification(TFXLNetPreTrainedModel): return outputs # return logits, (mems), (hidden states), (attentions) +@add_start_docstrings("""XLNet Model with a token classification head on top (a linear layer on top of + the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, + XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING) +class TFXLNetForTokenClassification(TFXLNetPreTrainedModel): + r""" + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **scores**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` + Classification scores (before SoftMax). + **mems**: (`optional`, returned when ``config.mem_len > 0``) + list of ``tf.Tensor`` (one for each layer): + that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model + if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context. + See details in the docstring of the `mems` input above. + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + import tensorflow as tf + from transformers import XLNetTokenizer, TFXLNetForTokenClassification + + tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') + model = TFXLNetForSequenceClassification.from_pretrained('xlnet-large-cased') + input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1 + outputs = model(input_ids) + scores = outputs[0] + + """ + def __init__(self, config, *inputs, **kwargs): + super(TFXLNetForTokenClassification, self).__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + + self.transformer = TFXLNetMainLayer(config, name='transformer') + self.classifier = tf.keras.layers.Dense(config.num_labels, + kernel_initializer=get_initializer(config.initializer_range), + name='classifier') + + def call(self, inputs, **kwargs): + transformer_outputs = self.transformer(inputs, **kwargs) + output = transformer_outputs[0] + + logits = self.classifier(output) + + outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it + + return outputs # return logits, (mems), (hidden states), (attentions) + + # @add_start_docstrings("""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of # the hidden-states output to compute `span start logits` and `span end logits`). """, # XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING) diff --git a/transformers/modeling_transfo_xl.py b/transformers/modeling_transfo_xl.py index 6d430e18044..a6a82f0dfed 100644 --- a/transformers/modeling_transfo_xl.py +++ b/transformers/modeling_transfo_xl.py @@ -553,6 +553,10 @@ TRANSFO_XL_INPUTS_DOCSTRING = r""" Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", @@ -639,9 +643,12 @@ class TransfoXLModel(TransfoXLPreTrainedModel): self.init_weights() - def _resize_token_embeddings(self, new_num_tokens): + def get_input_embeddings(self): return self.word_emb + def set_input_embeddings(self, new_embeddings): + self.word_emb = new_embeddings + def backward_compatible(self): self.sample_softmax = -1 @@ -654,12 +661,12 @@ class TransfoXLModel(TransfoXLPreTrainedModel): logger.info("Head pruning is not implemented for Transformer-XL model") pass - def init_mems(self, data): + def init_mems(self, bsz): if self.mem_len > 0: mems = [] param = next(self.parameters()) for i in range(self.n_layer): - empty = torch.zeros(self.mem_len, data.size(1), self.config.d_model, + empty = torch.zeros(self.mem_len, bsz, self.config.d_model, dtype=param.dtype, device=param.device) mems.append(empty) @@ -690,15 +697,22 @@ class TransfoXLModel(TransfoXLPreTrainedModel): return new_mems - def forward(self, input_ids, mems=None, head_mask=None): + def forward(self, input_ids=None, mems=None, head_mask=None, inputs_embeds=None): # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library # so we transpose here from shape [bsz, len] to shape [len, bsz] - input_ids = input_ids.transpose(0, 1).contiguous() + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_ids = input_ids.transpose(0, 1).contiguous() + qlen, bsz = input_ids.size() + elif inputs_embeds is not None: + inputs_embeds = inputs_embeds.transpose(0, 1).contiguous() + qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") if mems is None: - mems = self.init_mems(input_ids) - - qlen, bsz = input_ids.size() + mems = self.init_mems(bsz) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head @@ -715,7 +729,10 @@ class TransfoXLModel(TransfoXLPreTrainedModel): else: head_mask = [None] * self.n_layer - word_emb = self.word_emb(input_ids) + if inputs_embeds is not None: + word_emb = inputs_embeds + else: + word_emb = self.word_emb(input_ids) mlen = mems[0].size(0) if mems is not None else 0 klen = mlen + qlen @@ -826,7 +843,6 @@ class TransfoXLLMHeadModel(TransfoXLPreTrainedModel): self.crit = ProjectedAdaptiveLogSoftmax(config.n_token, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val) self.init_weights() - self.tie_weights() def tie_weights(self): """ @@ -858,14 +874,18 @@ class TransfoXLLMHeadModel(TransfoXLPreTrainedModel): def reset_length(self, tgt_len, ext_len, mem_len): self.transformer.reset_length(tgt_len, ext_len, mem_len) - def init_mems(self, data): - return self.transformer.init_mems(data) + def init_mems(self, bsz): + return self.transformer.init_mems(bsz) - def forward(self, input_ids, mems=None, head_mask=None, labels=None): - bsz = input_ids.size(0) - tgt_len = input_ids.size(1) + def forward(self, input_ids=None, mems=None, head_mask=None, inputs_embeds=None, labels=None): + if input_ids is not None: + bsz, tgt_len = input_ids.size(0), input_ids.size(1) + elif inputs_embeds is not None: + bsz, tgt_len = inputs_embeds.size(0), inputs_embeds.size(1) + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") - transformer_outputs = self.transformer(input_ids, mems=mems, head_mask=head_mask) + transformer_outputs = self.transformer(input_ids, mems=mems, head_mask=head_mask, inputs_embeds=inputs_embeds) last_hidden = transformer_outputs[0] pred_hid = last_hidden[:, -tgt_len:] diff --git a/transformers/modeling_utils.py b/transformers/modeling_utils.py index d082137d5db..676f3559867 100644 --- a/transformers/modeling_utils.py +++ b/transformers/modeling_utils.py @@ -31,7 +31,8 @@ from torch.nn import CrossEntropyLoss from torch.nn import functional as F from .configuration_utils import PretrainedConfig -from .file_utils import cached_path, WEIGHTS_NAME, TF_WEIGHTS_NAME, TF2_WEIGHTS_NAME +from .file_utils import (TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, + cached_path, hf_bucket_url, is_remote_url) logger = logging.getLogger(__name__) @@ -53,7 +54,7 @@ class PreTrainedModel(nn.Module): r""" Base class for all models. :class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models - as well as a few methods commons to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads. + as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads. Class attributes (overridden by derived classes): - ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture. @@ -83,6 +84,94 @@ class PreTrainedModel(nn.Module): # Save config in model self.config = config + @property + def base_model(self): + return getattr(self, self.base_model_prefix, self) + + def get_input_embeddings(self): + """ Get model's input embeddings + """ + base_model = getattr(self, self.base_model_prefix, self) + if base_model is not self: + return base_model.get_input_embeddings() + else: + raise NotImplementedError + + def set_input_embeddings(self, value): + """ Set model's input embeddings + """ + base_model = getattr(self, self.base_model_prefix, self) + if base_model is not self: + base_model.set_input_embeddings(value) + else: + raise NotImplementedError + + def get_output_embeddings(self): + """ Get model's output embeddings + Return None if the model doesn't have output embeddings + """ + return None # Overwrite for models with output embeddings + + def tie_weights(self): + """ Make sure we are sharing the input and output embeddings. + Export to TorchScript can't handle parameter sharing so we are cloning them instead. + """ + output_embeddings = self.get_output_embeddings() + if output_embeddings is not None: + self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) + + def _tie_or_clone_weights(self, output_embeddings, input_embeddings): + """ Tie or clone module weights depending of weither we are using TorchScript or not + """ + if self.config.torchscript: + output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone()) + else: + output_embeddings.weight = input_embeddings.weight + + if hasattr(output_embeddings, 'bias') and output_embeddings.bias is not None: + output_embeddings.bias.data = torch.nn.functional.pad( + output_embeddings.bias.data, + (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]), + 'constant', + 0 + ) + if hasattr(output_embeddings, 'out_features') and hasattr(input_embeddings, 'num_embeddings'): + output_embeddings.out_features = input_embeddings.num_embeddings + + def resize_token_embeddings(self, new_num_tokens=None): + """ Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. + Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. + + Arguments: + + new_num_tokens: (`optional`) int: + New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. + If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model. + + Return: ``torch.nn.Embeddings`` + Pointer to the input tokens Embeddings Module of the model + """ + base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed + model_embeds = base_model._resize_token_embeddings(new_num_tokens) + if new_num_tokens is None: + return model_embeds + + # Update base model and current model config + self.config.vocab_size = new_num_tokens + base_model.vocab_size = new_num_tokens + + # Tie weights again if needed + if hasattr(self, 'tie_weights'): + self.tie_weights() + + return model_embeds + + def _resize_token_embeddings(self, new_num_tokens): + old_embeddings = self.get_input_embeddings() + new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) + self.set_input_embeddings(new_embeddings) + return self.get_input_embeddings() + def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None): """ Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly initialized vectors at the end @@ -117,50 +206,6 @@ class PreTrainedModel(nn.Module): return new_embeddings - def _tie_or_clone_weights(self, first_module, second_module): - """ Tie or clone module weights depending of weither we are using TorchScript or not - """ - if self.config.torchscript: - first_module.weight = nn.Parameter(second_module.weight.clone()) - else: - first_module.weight = second_module.weight - - if hasattr(first_module, 'bias') and first_module.bias is not None: - first_module.bias.data = torch.nn.functional.pad( - first_module.bias.data, - (0, first_module.weight.shape[0] - first_module.bias.shape[0]), - 'constant', - 0 - ) - - def resize_token_embeddings(self, new_num_tokens=None): - """ Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. - Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. - - Arguments: - - new_num_tokens: (`optional`) int: - New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. - If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model. - - Return: ``torch.nn.Embeddings`` - Pointer to the input tokens Embeddings Module of the model - """ - base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed - model_embeds = base_model._resize_token_embeddings(new_num_tokens) - if new_num_tokens is None: - return model_embeds - - # Update base model and current model config - self.config.vocab_size = new_num_tokens - base_model.vocab_size = new_num_tokens - - # Tie weights again if needed - if hasattr(self, 'tie_weights'): - self.tie_weights() - - return model_embeds - def init_weights(self): """ Initialize and prunes weights if needed. """ # Initialize weights @@ -170,6 +215,9 @@ class PreTrainedModel(nn.Module): if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) + # Tie weights if needed + self.tie_weights() + def prune_heads(self, heads_to_prune): """ Prunes heads of the base model. @@ -178,14 +226,12 @@ class PreTrainedModel(nn.Module): heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`). E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ - base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed - # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads for layer, heads in heads_to_prune.items(): union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads) self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON - base_model._prune_heads(heads_to_prune) + self.base_model._prune_heads(heads_to_prune) def save_pretrained(self, save_directory): """ Save a model and its configuration file to a directory, so that it @@ -193,7 +239,7 @@ class PreTrainedModel(nn.Module): """ assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved" - # Only save the model it-self if we are using distributed training + # Only save the model itself if we are using distributed training model_to_save = self.module if hasattr(self, 'module') else self # Save configuration file @@ -220,6 +266,7 @@ class PreTrainedModel(nn.Module): pretrained_model_name_or_path: either: - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. - None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``) @@ -246,6 +293,9 @@ class PreTrainedModel(nn.Module): force_download: (`optional`) boolean, default False: Force to (re-)download the model weights and configuration files and override the cached versions if they exists. + resume_download: (`optional`) boolean, default False: + Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. + proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. @@ -270,11 +320,17 @@ class PreTrainedModel(nn.Module): model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ + if pretrained_model_name_or_path is not None and ( + "albert" in pretrained_model_name_or_path and "v2" in pretrained_model_name_or_path): + logger.warning("There is currently an upstream reproducibility issue with ALBERT v2 models. Please see " + + "https://github.com/google-research/google-research/issues/119 for more information.") + config = kwargs.pop('config', None) state_dict = kwargs.pop('state_dict', None) cache_dir = kwargs.pop('cache_dir', None) from_tf = kwargs.pop('from_tf', False) force_download = kwargs.pop('force_download', False) + resume_download = kwargs.pop('resume_download', False) proxies = kwargs.pop('proxies', None) output_loading_info = kwargs.pop('output_loading_info', False) @@ -284,6 +340,8 @@ class PreTrainedModel(nn.Module): pretrained_model_name_or_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, + resume_download=resume_download, + proxies=proxies, **kwargs ) else: @@ -307,15 +365,21 @@ class PreTrainedModel(nn.Module): raise EnvironmentError("Error no file named {} found in directory {} or `from_tf` set to False".format( [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"], pretrained_model_name_or_path)) - elif os.path.isfile(pretrained_model_name_or_path): + elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path - else: - assert from_tf, "Error finding file {}, no file or TF 1.X checkpoint found".format(pretrained_model_name_or_path) + elif os.path.isfile(pretrained_model_name_or_path + ".index"): + assert from_tf, "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format( + pretrained_model_name_or_path + ".index") archive_file = pretrained_model_name_or_path + ".index" + else: + archive_file = hf_bucket_url(pretrained_model_name_or_path, postfix=WEIGHTS_NAME) + if from_tf: + raise EnvironmentError("Loading a PyTorch model from a TF checkpoint is not supported when using a model identifier name.") # redirect to the cache, if necessary try: - resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies) + resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download, + proxies=proxies, resume_download=resume_download) except EnvironmentError: if pretrained_model_name_or_path in cls.pretrained_model_archive_map: msg = "Couldn't reach server at '{}' to download pretrained weights.".format( @@ -371,6 +435,8 @@ class PreTrainedModel(nn.Module): new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') + if key == 'lm_head.decoder.weight': + new_key = 'lm_head.weight' if new_key: old_keys.append(key) new_keys.append(new_key) @@ -383,6 +449,8 @@ class PreTrainedModel(nn.Module): if metadata is not None: state_dict._metadata = metadata + # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants + # so we need to apply the function recursively. def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( @@ -680,7 +748,7 @@ class SequenceSummary(nn.Module): def __init__(self, config): super(SequenceSummary, self).__init__() - self.summary_type = config.summary_type if hasattr(config, 'summary_use_proj') else 'last' + self.summary_type = config.summary_type if hasattr(config, 'summary_type') else 'last' if self.summary_type == 'attn': # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 diff --git a/transformers/modeling_xlm.py b/transformers/modeling_xlm.py index b29e7215564..257f0da394b 100644 --- a/transformers/modeling_xlm.py +++ b/transformers/modeling_xlm.py @@ -73,15 +73,15 @@ def get_masks(slen, lengths, causal, padding_mask=None): """ Generate hidden states mask, and optionally an attention mask. """ - bs = lengths.size(0) + alen = torch.arange(slen, dtype=torch.long, device=lengths.device) if padding_mask is not None: mask = padding_mask else: assert lengths.max().item() <= slen - alen = torch.arange(slen, dtype=torch.long, device=lengths.device) mask = alen < lengths[:, None] # attention mask is the same as mask, or triangular inferior attention (causal) + bs = lengths.size(0) if causal: attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None] else: @@ -311,6 +311,10 @@ XLM_INPUTS_DOCSTRING = r""" Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare XLM Model transformer outputting raw hidden-states without any specific head on top.", @@ -407,10 +411,12 @@ class XLMModel(XLMPreTrainedModel): self.init_weights() - def _resize_token_embeddings(self, new_num_tokens): - self.embeddings = self._get_resized_embeddings(self.embeddings, new_num_tokens) + def get_input_embeddings(self): return self.embeddings + def set_input_embeddings(self, new_embeddings): + self.embeddings = new_embeddings + def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} @@ -419,14 +425,21 @@ class XLMModel(XLMPreTrainedModel): for layer, heads in heads_to_prune.items(): self.attentions[layer].prune_heads(heads) - def forward(self, input_ids, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, - lengths=None, cache=None, head_mask=None): # removed: src_enc=None, src_len=None + def forward(self, input_ids=None, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, + lengths=None, cache=None, head_mask=None, inputs_embeds=None): # removed: src_enc=None, src_len=None + if input_ids is not None: + bs, slen = input_ids.size() + else: + bs, slen = inputs_embeds.size()[:-1] + if lengths is None: - lengths = (input_ids != self.pad_index).sum(dim=1).long() + if input_ids is not None: + lengths = (input_ids != self.pad_index).sum(dim=1).long() + else: + lengths = torch.LongTensor([slen]*bs) # mask = input_ids != self.pad_index # check inputs - bs, slen = input_ids.size() assert lengths.size(0) == bs assert lengths.max().item() <= slen # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0 @@ -440,10 +453,12 @@ class XLMModel(XLMPreTrainedModel): # if self.is_decoder and src_enc is not None: # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None] + device = input_ids.device if input_ids is not None else inputs_embeds.device + # position_ids if position_ids is None: - position_ids = input_ids.new((slen,)).long() - position_ids = torch.arange(slen, out=position_ids).unsqueeze(0) + position_ids = torch.arange(slen, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0).expand((bs, slen)) else: assert position_ids.size() == (bs, slen) # (slen, bs) # position_ids = position_ids.transpose(0, 1) @@ -469,7 +484,7 @@ class XLMModel(XLMPreTrainedModel): head_mask = [None] * self.n_layers # do not recompute cached elements - if cache is not None: + if cache is not None and input_ids is not None: _slen = slen - cache['slen'] input_ids = input_ids[:, -_slen:] position_ids = position_ids[:, -_slen:] @@ -479,8 +494,10 @@ class XLMModel(XLMPreTrainedModel): attn_mask = attn_mask[:, -_slen:] # embeddings - tensor = self.embeddings(input_ids) - tensor = tensor + self.position_embeddings(position_ids).expand_as(tensor) + if inputs_embeds is None: + inputs_embeds = self.embeddings(input_ids) + + tensor = inputs_embeds + self.position_embeddings(position_ids).expand_as(inputs_embeds) if langs is not None and self.use_lang_emb: tensor = tensor + self.lang_embeddings(langs) if token_type_ids is not None: @@ -618,15 +635,12 @@ class XLMWithLMHeadModel(XLMPreTrainedModel): self.pred_layer = XLMPredLayer(config) self.init_weights() - self.tie_weights() - def tie_weights(self): - """ Make sure we are sharing the embeddings - """ - self._tie_or_clone_weights(self.pred_layer.proj, self.transformer.embeddings) + def get_output_embeddings(self): + return self.pred_layer.proj - def forward(self, input_ids, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, - lengths=None, cache=None, head_mask=None, labels=None): + def forward(self, input_ids=None, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, + lengths=None, cache=None, head_mask=None, inputs_embeds=None, labels=None): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, @@ -634,7 +648,8 @@ class XLMWithLMHeadModel(XLMPreTrainedModel): position_ids=position_ids, lengths=lengths, cache=cache, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) output = transformer_outputs[0] outputs = self.pred_layer(output, labels) @@ -686,8 +701,8 @@ class XLMForSequenceClassification(XLMPreTrainedModel): self.init_weights() - def forward(self, input_ids, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, - lengths=None, cache=None, head_mask=None, labels=None): + def forward(self, input_ids=None, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, + lengths=None, cache=None, head_mask=None, inputs_embeds=None, labels=None): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, @@ -695,7 +710,8 @@ class XLMForSequenceClassification(XLMPreTrainedModel): position_ids=position_ids, lengths=lengths, cache=cache, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) output = transformer_outputs[0] logits = self.sequence_summary(output) @@ -769,8 +785,8 @@ class XLMForQuestionAnsweringSimple(XLMPreTrainedModel): self.init_weights() - def forward(self, input_ids, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, - lengths=None, cache=None, head_mask=None, start_positions=None, end_positions=None): + def forward(self, input_ids=None, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, + lengths=None, cache=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, langs=langs, @@ -778,7 +794,8 @@ class XLMForQuestionAnsweringSimple(XLMPreTrainedModel): position_ids=position_ids, lengths=lengths, cache=cache, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) sequence_output = transformer_outputs[0] @@ -864,8 +881,8 @@ class XLMForQuestionAnswering(XLMPreTrainedModel): self.init_weights() - def forward(self, input_ids, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, - lengths=None, cache=None, head_mask=None, start_positions=None, end_positions=None, + def forward(self, input_ids=None, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, + lengths=None, cache=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, is_impossible=None, cls_index=None, p_mask=None): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, @@ -874,7 +891,8 @@ class XLMForQuestionAnswering(XLMPreTrainedModel): position_ids=position_ids, lengths=lengths, cache=cache, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) output = transformer_outputs[0] diff --git a/transformers/modeling_xlnet.py b/transformers/modeling_xlnet.py index e191ebadd09..225e5b059b3 100644 --- a/transformers/modeling_xlnet.py +++ b/transformers/modeling_xlnet.py @@ -558,6 +558,10 @@ XLNET_INPUTS_DOCSTRING = r""" Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. """ @add_start_docstrings("The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.", @@ -579,6 +583,7 @@ class XLNetModel(XLNetPreTrainedModel): **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + When ``target_mapping is not None``, the attentions outputs are a list of 2-tuple of ``torch.FloatTensor``. Examples:: @@ -611,10 +616,12 @@ class XLNetModel(XLNetPreTrainedModel): self.init_weights() - def _resize_token_embeddings(self, new_num_tokens): - self.word_embedding = self._get_resized_embeddings(self.word_embedding, new_num_tokens) + def get_input_embeddings(self): return self.word_embedding + def set_input_embeddings(self, new_embeddings): + self.word_embedding = new_embeddings + def _prune_heads(self, heads_to_prune): raise NotImplementedError @@ -710,19 +717,29 @@ class XLNetModel(XLNetPreTrainedModel): pos_emb = pos_emb.to(next(self.parameters())) return pos_emb - def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, - token_type_ids=None, input_mask=None, head_mask=None): + def forward(self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, + token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None): # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end # but we want a unified interface in the library with the batch size on the first dimension # so we move here the first dimension (batch) to the end - input_ids = input_ids.transpose(0, 1).contiguous() + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_ids = input_ids.transpose(0, 1).contiguous() + qlen, bsz = input_ids.shape[0], input_ids.shape[1] + elif inputs_embeds is not None: + inputs_embeds.transpose(0, 1).contiguous() + qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None - qlen, bsz = input_ids.shape[0], input_ids.shape[1] + mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0 klen = mlen + qlen @@ -775,7 +792,10 @@ class XLNetModel(XLNetPreTrainedModel): non_tgt_mask = None ##### Word embeddings and prepare h & g hidden states - word_emb_k = self.word_embedding(input_ids) + if inputs_embeds is not None: + word_emb_k = inputs_embeds + else: + word_emb_k = self.word_embedding(input_ids) output_h = self.dropout(word_emb_k) if target_mapping is not None: word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1) @@ -859,7 +879,11 @@ class XLNetModel(XLNetPreTrainedModel): hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states) outputs = outputs + (hidden_states,) if self.output_attentions: - attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions) + if target_mapping is not None: + # when target_mapping is provided, there are 2-tuple of attentions + attentions = tuple(tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions) + else: + attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions) outputs = outputs + (attentions,) return outputs # outputs, (new_mems), (hidden_states), (attentions) @@ -894,6 +918,7 @@ class XLNetLMHeadModel(XLNetPreTrainedModel): **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + When ``target_mapping is not None``, the attentions outputs are a list of 2-tuple of ``torch.FloatTensor``. Examples:: @@ -918,15 +943,12 @@ class XLNetLMHeadModel(XLNetPreTrainedModel): self.lm_loss = nn.Linear(config.d_model, config.n_token, bias=True) self.init_weights() - self.tie_weights() - def tie_weights(self): - """ Make sure we are sharing the embeddings - """ - self._tie_or_clone_weights(self.lm_loss, self.transformer.word_embedding) + def get_output_embeddings(self): + return self.lm_loss - def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, - token_type_ids=None, input_mask=None, head_mask=None, labels=None): + def forward(self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, + token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, labels=None): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, mems=mems, @@ -934,7 +956,8 @@ class XLNetLMHeadModel(XLNetPreTrainedModel): target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) logits = self.lm_loss(transformer_outputs[0]) @@ -978,6 +1001,7 @@ class XLNetForSequenceClassification(XLNetPreTrainedModel): **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + When ``target_mapping is not None``, the attentions outputs are a list of 2-tuple of ``torch.FloatTensor``. Examples:: @@ -999,8 +1023,8 @@ class XLNetForSequenceClassification(XLNetPreTrainedModel): self.init_weights() - def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, - token_type_ids=None, input_mask=None, head_mask=None, labels=None): + def forward(self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, + token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, labels=None): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, mems=mems, @@ -1008,7 +1032,8 @@ class XLNetForSequenceClassification(XLNetPreTrainedModel): target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) output = transformer_outputs[0] output = self.sequence_summary(output) @@ -1028,6 +1053,106 @@ class XLNetForSequenceClassification(XLNetPreTrainedModel): return outputs # return (loss), logits, (mems), (hidden states), (attentions) +@add_start_docstrings("""XLNet Model with a token classification head on top (a linear layer on top of + the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, + XLNET_START_DOCSTRING, + XLNET_INPUTS_DOCSTRING) +class XLNetForTokenClassification(XLNetPreTrainedModel): + r""" + Inputs: + **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``: + Indices of input sequence tokens in the vocabulary. + The second dimension of the input (`num_choices`) indicates the number of choices to scores. + **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: + Segment token indices to indicate first and second portions of the inputs. + Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` + **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: + Mask to avoid performing attention on padding token indices. + Mask values selected in ``[0, 1]``: + ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. + **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: + Mask to nullify selected heads of the self-attention modules. + Mask values selected in ``[0, 1]``: + ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: + Labels for computing the multiple choice classification loss. + Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension + of the input tensors. (see `input_ids` above) + + Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: + **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: + Classification loss. + **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)`` + Classification scores (before SoftMax). + **mems**: (`optional`, returned when ``config.mem_len > 0``) + list of ``torch.FloatTensor`` (one for each layer): + that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model + if config.mem_len > 0 else tuple of None. Can be used to speed up sequential decoding and attend to longer context. + See details in the docstring of the `mems` input above. + **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) + list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) + of shape ``(batch_size, sequence_length, hidden_size)``: + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + **attentions**: (`optional`, returned when ``config.output_attentions=True``) + list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + + Examples:: + + tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') + model = XLNetForSequenceClassification.from_pretrained('xlnet-large-cased') + input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 + labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1 + outputs = model(input_ids, labels=labels) + scores = outputs[0] + + """ + def __init__(self, config): + super(XLNetForTokenClassification, self).__init__(config) + self.num_labels = config.num_labels + + self.transformer = XLNetModel(config) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + self.init_weights() + + def forward(self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, + token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, labels=None): + + outputs = self.transformer(input_ids, + attention_mask=attention_mask, + mems=mems, + perm_mask=perm_mask, + target_mapping=target_mapping, + token_type_ids=token_type_ids, + input_mask=input_mask, + head_mask=head_mask, + inputs_embeds=inputs_embeds) + + sequence_output = outputs[0] + + logits = self.classifier(sequence_output) + + outputs = (logits,) + outputs[1:] # Keep mems, hidden states, attentions if there are in it + if labels is not None: + loss_fct = CrossEntropyLoss() + # Only keep active parts of the loss + if attention_mask is not None: + active_loss = attention_mask.view(-1) == 1 + active_logits = logits.view(-1, self.num_labels)[active_loss] + active_labels = labels.view(-1)[active_loss] + loss = loss_fct(active_logits, active_labels) + else: + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + outputs = (loss,) + outputs + + return outputs # return (loss), logits, (mems), (hidden states), (attentions) + + @add_start_docstrings("""XLNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RACE/SWAG tasks. """, XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING) @@ -1050,6 +1175,10 @@ class XLNetForMultipleChoice(XLNetPreTrainedModel): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. + **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``: + Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension @@ -1073,6 +1202,7 @@ class XLNetForMultipleChoice(XLNetPreTrainedModel): **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + When ``target_mapping is not None``, the attentions outputs are a list of 2-tuple of ``torch.FloatTensor``. Examples:: @@ -1094,9 +1224,9 @@ class XLNetForMultipleChoice(XLNetPreTrainedModel): self.init_weights() - def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None, + def forward(self, input_ids=None, token_type_ids=None, input_mask=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, - labels=None, head_mask=None): + labels=None, head_mask=None, inputs_embeds=None): num_choices = input_ids.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) @@ -1107,7 +1237,7 @@ class XLNetForMultipleChoice(XLNetPreTrainedModel): transformer_outputs = self.transformer(flat_input_ids, token_type_ids=flat_token_type_ids, input_mask=flat_input_mask, attention_mask=flat_attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, - head_mask=head_mask) + head_mask=head_mask, inputs_embeds=inputs_embeds) output = transformer_outputs[0] @@ -1158,6 +1288,7 @@ class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel): **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + When ``target_mapping is not None``, the attentions outputs are a list of 2-tuple of ``torch.FloatTensor``. Examples:: @@ -1179,8 +1310,8 @@ class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel): self.init_weights() - def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, - token_type_ids=None, input_mask=None, head_mask=None, + def forward(self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, + token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None): outputs = self.transformer(input_ids, @@ -1190,7 +1321,8 @@ class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel): target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) sequence_output = outputs[0] @@ -1271,6 +1403,7 @@ class XLNetForQuestionAnswering(XLNetPreTrainedModel): **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. + When ``target_mapping is not None``, the attentions outputs are a list of 2-tuple of ``torch.FloatTensor``. Examples:: @@ -1295,8 +1428,8 @@ class XLNetForQuestionAnswering(XLNetPreTrainedModel): self.init_weights() - def forward(self, input_ids, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, - token_type_ids=None, input_mask=None, head_mask=None, + def forward(self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, + token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, is_impossible=None, cls_index=None, p_mask=None,): transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, @@ -1305,7 +1438,8 @@ class XLNetForQuestionAnswering(XLNetPreTrainedModel): target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, - head_mask=head_mask) + head_mask=head_mask, + inputs_embeds=inputs_embeds) hidden_states = transformer_outputs[0] start_logits = self.start_logits(hidden_states, p_mask=p_mask) diff --git a/transformers/optimization.py b/transformers/optimization.py index 39dc7a50ff1..99e6cc75e40 100644 --- a/transformers/optimization.py +++ b/transformers/optimization.py @@ -23,85 +23,65 @@ from torch.optim.lr_scheduler import LambdaLR logger = logging.getLogger(__name__) -class ConstantLRSchedule(LambdaLR): - """ Constant learning rate schedule. + +def get_constant_schedule(optimizer, last_epoch=-1): + """ Create a schedule with a constant learning rate. """ - def __init__(self, optimizer, last_epoch=-1): - super(ConstantLRSchedule, self).__init__(optimizer, lambda _: 1.0, last_epoch=last_epoch) + return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) -class WarmupConstantSchedule(LambdaLR): - """ Linear warmup and then constant. - Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps. - Keeps learning rate schedule equal to 1. after warmup_steps. +def get_constant_schedule_with_warmup(optimizer, num_warmup_steps, last_epoch=-1): + """ Create a schedule with a constant learning rate preceded by a warmup + period during which the learning rate increases linearly between 0 and 1. """ - def __init__(self, optimizer, warmup_steps, last_epoch=-1): - self.warmup_steps = warmup_steps - super(WarmupConstantSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch) - - def lr_lambda(self, step): - if step < self.warmup_steps: - return float(step) / float(max(1.0, self.warmup_steps)) + def lr_lambda(current_step): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1.0, num_warmup_steps)) return 1. + return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) -class WarmupLinearSchedule(LambdaLR): - """ Linear warmup and then linear decay. - Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps. - Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps. + +def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): + """ Create a schedule with a learning rate that decreases linearly after + linearly increasing during a warmup period. """ - def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1): - self.warmup_steps = warmup_steps - self.t_total = t_total - super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch) + def lr_lambda(current_step): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))) - def lr_lambda(self, step): - if step < self.warmup_steps: - return float(step) / float(max(1, self.warmup_steps)) - return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps))) + return LambdaLR(optimizer, lr_lambda, last_epoch) -class WarmupCosineSchedule(LambdaLR): - """ Linear warmup and then cosine decay. - Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps. - Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps following a cosine curve. - If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup. +def get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=.5, last_epoch=-1): + """ Create a schedule with a learning rate that decreases following the + values of the cosine function between 0 and `pi * cycles` after a warmup + period during which it increases linearly between 0 and 1. """ - def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1): - self.warmup_steps = warmup_steps - self.t_total = t_total - self.cycles = cycles - super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch) + def lr_lambda(current_step): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + return max(0., 0.5 * (1. + math.cos(math.pi * float(num_cycles) * 2. * progress))) - def lr_lambda(self, step): - if step < self.warmup_steps: - return float(step) / float(max(1.0, self.warmup_steps)) - # progress after warmup - progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps)) - return max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress))) + return LambdaLR(optimizer, lr_lambda, last_epoch) -class WarmupCosineWithHardRestartsSchedule(LambdaLR): - """ Linear warmup and then cosine cycles with hard restarts. - Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps. - If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying - learning rate (with hard restarts). +def get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=1., last_epoch=-1): + """ Create a schedule with a learning rate that decreases following the + values of the cosine function with several hard restarts, after a warmup + period during which it increases linearly between 0 and 1. """ - def __init__(self, optimizer, warmup_steps, t_total, cycles=1., last_epoch=-1): - self.warmup_steps = warmup_steps - self.t_total = t_total - self.cycles = cycles - super(WarmupCosineWithHardRestartsSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch) - - def lr_lambda(self, step): - if step < self.warmup_steps: - return float(step) / float(max(1, self.warmup_steps)) - # progress after warmup - progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps)) - if progress >= 1.0: - return 0.0 - return max(0.0, 0.5 * (1. + math.cos(math.pi * ((float(self.cycles) * progress) % 1.0)))) + def lr_lambda(current_step): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + if progress >= 1.: + return 0. + return max(0., 0.5 * (1. + math.cos(math.pi * ((float(num_cycles) * progress) % 1.)))) + return LambdaLR(optimizer, lr_lambda, last_epoch) class AdamW(Optimizer): diff --git a/transformers/optimization_tf.py b/transformers/optimization_tf.py new file mode 100644 index 00000000000..c5fa248083c --- /dev/null +++ b/transformers/optimization_tf.py @@ -0,0 +1,254 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functions and classes related to optimization (weight updates).""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import re + +import tensorflow as tf + + +class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule): + """Applys a warmup schedule on a given learning rate decay schedule.""" + + def __init__( + self, + initial_learning_rate, + decay_schedule_fn, + warmup_steps, + power=1.0, + name=None): + super(WarmUp, self).__init__() + self.initial_learning_rate = initial_learning_rate + self.warmup_steps = warmup_steps + self.power = power + self.decay_schedule_fn = decay_schedule_fn + self.name = name + + def __call__(self, step): + with tf.name_scope(self.name or 'WarmUp') as name: + # Implements polynomial warmup. i.e., if global_step < warmup_steps, the + # learning rate will be `global_step/num_warmup_steps * init_lr`. + global_step_float = tf.cast(step, tf.float32) + warmup_steps_float = tf.cast(self.warmup_steps, tf.float32) + warmup_percent_done = global_step_float / warmup_steps_float + warmup_learning_rate = ( + self.initial_learning_rate * + tf.math.pow(warmup_percent_done, self.power)) + return tf.cond(global_step_float < warmup_steps_float, + lambda: warmup_learning_rate, + lambda: self.decay_schedule_fn(step), + name=name) + + def get_config(self): + return { + 'initial_learning_rate': self.initial_learning_rate, + 'decay_schedule_fn': self.decay_schedule_fn, + 'warmup_steps': self.warmup_steps, + 'power': self.power, + 'name': self.name + } + + +def create_optimizer(init_lr, num_train_steps, num_warmup_steps): + """Creates an optimizer with learning rate schedule.""" + # Implements linear decay of the learning rate. + learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay( + initial_learning_rate=init_lr, + decay_steps=num_train_steps, + end_learning_rate=0.0) + if num_warmup_steps: + learning_rate_fn = WarmUp(initial_learning_rate=init_lr, + decay_schedule_fn=learning_rate_fn, + warmup_steps=num_warmup_steps) + optimizer = AdamWeightDecay( + learning_rate=learning_rate_fn, + weight_decay_rate=0.01, + beta_1=0.9, + beta_2=0.999, + epsilon=1e-6, + exclude_from_weight_decay=['layer_norm', 'bias']) + return optimizer + + +class AdamWeightDecay(tf.keras.optimizers.Adam): + """Adam enables L2 weight decay and clip_by_global_norm on gradients. + + Just adding the square of the weights to the loss function is *not* the + correct way of using L2 regularization/weight decay with Adam, since that will + interact with the m and v parameters in strange ways. + + Instead we want ot decay the weights in a manner that doesn't interact with + the m/v parameters. This is equivalent to adding the square of the weights to + the loss with plain (non-momentum) SGD. + """ + + def __init__(self, + learning_rate=0.001, + beta_1=0.9, + beta_2=0.999, + epsilon=1e-7, + amsgrad=False, + weight_decay_rate=0.0, + include_in_weight_decay=None, + exclude_from_weight_decay=None, + name='AdamWeightDecay', + **kwargs): + super(AdamWeightDecay, self).__init__( + learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs) + self.weight_decay_rate = weight_decay_rate + self._include_in_weight_decay = include_in_weight_decay + self._exclude_from_weight_decay = exclude_from_weight_decay + + @classmethod + def from_config(cls, config): + """Creates an optimizer from its config with WarmUp custom object.""" + custom_objects = {'WarmUp': WarmUp} + return super(AdamWeightDecay, cls).from_config( + config, custom_objects=custom_objects) + + def _prepare_local(self, var_device, var_dtype, apply_state): + super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype, + apply_state) + apply_state['weight_decay_rate'] = tf.constant( + self.weight_decay_rate, name='adam_weight_decay_rate') + + def _decay_weights_op(self, var, learning_rate, apply_state): + do_decay = self._do_use_weight_decay(var.name) + if do_decay: + return var.assign_sub( + learning_rate * var * + apply_state['weight_decay_rate'], + use_locking=self._use_locking) + return tf.no_op() + + def apply_gradients(self, grads_and_vars, clip_norm, name=None): + grads, tvars = list(zip(*grads_and_vars)) + (grads, _) = tf.clip_by_global_norm(grads, clip_norm=clip_norm) + return super(AdamWeightDecay, self).apply_gradients(zip(grads, tvars)) + + def _get_lr(self, var_device, var_dtype, apply_state): + """Retrieves the learning rate with the given state.""" + if apply_state is None: + return self._decayed_lr_t[var_dtype], {} + + apply_state = apply_state or {} + coefficients = apply_state.get((var_device, var_dtype)) + if coefficients is None: + coefficients = self._fallback_apply_state(var_device, var_dtype) + apply_state[(var_device, var_dtype)] = coefficients + + return coefficients['lr_t'], dict(apply_state=apply_state) + + def _resource_apply_dense(self, grad, var, apply_state=None): + lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state) + decay = self._decay_weights_op(var, lr_t, apply_state) + with tf.control_dependencies([decay]): + return super(AdamWeightDecay, self)._resource_apply_dense( + grad, var, **kwargs) + + def _resource_apply_sparse(self, grad, var, indices, apply_state=None): + lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state) + decay = self._decay_weights_op(var, lr_t, apply_state) + with tf.control_dependencies([decay]): + return super(AdamWeightDecay, self)._resource_apply_sparse( + grad, var, indices, **kwargs) + + def get_config(self): + config = super(AdamWeightDecay, self).get_config() + config.update({ + 'weight_decay_rate': self.weight_decay_rate, + }) + return config + + def _do_use_weight_decay(self, param_name): + """Whether to use L2 weight decay for `param_name`.""" + if self.weight_decay_rate == 0: + return False + + if self._include_in_weight_decay: + for r in self._include_in_weight_decay: + if re.search(r, param_name) is not None: + return True + + if self._exclude_from_weight_decay: + for r in self._exclude_from_weight_decay: + if re.search(r, param_name) is not None: + return False + return True + + +## Inspired from https://github.com/OpenNMT/OpenNMT-tf/blob/master/opennmt/optimizers/utils.py +class GradientAccumulator(object): + """Distribution strategies-aware gradient accumulation utility.""" + + def __init__(self): + """Initializes the accumulator.""" + self._gradients = [] + self._accum_steps = tf.Variable( + initial_value=0, + dtype=tf.int64, + trainable=False, + aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) + + @property + def step(self): + """Number of accumulated steps.""" + return self._accum_steps.value() + + @property + def gradients(self): + """The accumulated gradients.""" + return list(gradient.value() if gradient is not None else gradient for gradient in self._get_replica_gradients()) + + def __call__(self, gradients): + """Accumulates :obj:`gradients`.""" + if not self._gradients: + self._gradients.extend([tf.Variable(tf.zeros_like(gradient), trainable=False) if gradient is not None else gradient for gradient in gradients]) + + if len(gradients) != len(self._gradients): + raise ValueError("Expected %s gradients, but got %d" % (len(self._gradients), len(gradients))) + + for accum_gradient, gradient in zip(self._get_replica_gradients(), gradients): + if accum_gradient is not None: + accum_gradient.assign_add(gradient) + + self._accum_steps.assign_add(1) + + def reset(self): + """Resets the accumulated gradients.""" + if self._gradients: + self._accum_steps.assign(0) + + for gradient in self._get_replica_gradients(): + if gradient is not None: + gradient.assign(tf.zeros_like(gradient)) + + def _get_replica_gradients(self): + if tf.distribute.has_strategy(): + # In a replica context, we want to accumulate gradients on each replica + # without synchronization, so we directly assign the value of the + # current replica. + replica_context = tf.distribute.get_replica_context() + + if replica_context is None or tf.distribute.get_strategy().num_replicas_in_sync == 1: + return self._gradients + + return (gradient.device_map.select_for_current_replica(gradient.values, replica_context) for gradient in self._gradients) + else: + return self._gradients diff --git a/transformers/tests/conftest.py b/transformers/tests/conftest.py deleted file mode 100644 index 841ebc8df9e..00000000000 --- a/transformers/tests/conftest.py +++ /dev/null @@ -1,19 +0,0 @@ -# content of conftest.py - -import pytest - - -def pytest_addoption(parser): - parser.addoption( - "--runslow", action="store_true", default=False, help="run slow tests" - ) - - -def pytest_collection_modifyitems(config, items): - if config.getoption("--runslow"): - # --runslow given in cli: do not skip slow tests - return - skip_slow = pytest.mark.skip(reason="need --runslow option to run") - for item in items: - if "slow" in item.keywords: - item.add_marker(skip_slow) diff --git a/transformers/tests/fixtures/spiece.model b/transformers/tests/fixtures/spiece.model new file mode 100644 index 00000000000..c91b8acfa56 Binary files /dev/null and b/transformers/tests/fixtures/spiece.model differ diff --git a/transformers/tests/hf_api_test.py b/transformers/tests/hf_api_test.py new file mode 100644 index 00000000000..92d41b6dffc --- /dev/null +++ b/transformers/tests/hf_api_test.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# Copyright 2019-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function + +import os +import six +import time +import unittest + +from transformers.hf_api import HfApi, S3Obj, PresignedUrl, HfFolder, HTTPError + +USER = "__DUMMY_TRANSFORMERS_USER__" +PASS = "__DUMMY_TRANSFORMERS_PASS__" +FILE_KEY = "Test-{}.txt".format(int(time.time())) +FILE_PATH = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "fixtures/input.txt" +) + + + +class HfApiCommonTest(unittest.TestCase): + _api = HfApi(endpoint="https://moon-staging.huggingface.co") + + +class HfApiLoginTest(HfApiCommonTest): + def test_login_invalid(self): + with self.assertRaises(HTTPError): + self._api.login(username=USER, password="fake") + + def test_login_valid(self): + token = self._api.login(username=USER, password=PASS) + self.assertIsInstance(token, six.string_types) + + +class HfApiEndpointsTest(HfApiCommonTest): + @classmethod + def setUpClass(cls): + """ + Share this valid token in all tests below. + """ + cls._token = cls._api.login(username=USER, password=PASS) + + def test_whoami(self): + user = self._api.whoami(token=self._token) + self.assertEqual(user, USER) + + def test_presign(self): + urls = self._api.presign(token=self._token, filename=FILE_KEY) + self.assertIsInstance(urls, PresignedUrl) + self.assertEqual(urls.type, "text/plain") + + def test_presign_and_upload(self): + access_url = self._api.presign_and_upload( + token=self._token, filename=FILE_KEY, filepath=FILE_PATH + ) + self.assertIsInstance(access_url, six.string_types) + + def test_list_objs(self): + objs = self._api.list_objs(token=self._token) + self.assertIsInstance(objs, list) + if len(objs) > 0: + o = objs[-1] + self.assertIsInstance(o, S3Obj) + + + +class HfFolderTest(unittest.TestCase): + def test_token_workflow(self): + """ + Test the whole token save/get/delete workflow, + with the desired behavior with respect to non-existent tokens. + """ + token = "token-{}".format(int(time.time())) + HfFolder.save_token(token) + self.assertEqual( + HfFolder.get_token(), + token + ) + HfFolder.delete_token() + HfFolder.delete_token() + # ^^ not an error, we test that the + # second call does not fail. + self.assertEqual( + HfFolder.get_token(), + None + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/transformers/tests/modeling_albert_test.py b/transformers/tests/modeling_albert_test.py new file mode 100644 index 00000000000..a14d66ae8f7 --- /dev/null +++ b/transformers/tests/modeling_albert_test.py @@ -0,0 +1,240 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +import shutil + +from transformers import is_torch_available + +from .modeling_common_test import (CommonTestCases, ids_tensor) +from .configuration_common_test import ConfigTester +from .utils import require_torch, slow, torch_device + +if is_torch_available(): + from transformers import (AlbertConfig, AlbertModel, AlbertForMaskedLM, + AlbertForSequenceClassification, AlbertForQuestionAnswering, + ) + from transformers.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP + + +@require_torch +class AlbertModelTest(CommonTestCases.CommonModelTester): + + all_model_classes = (AlbertModel, AlbertForMaskedLM) if is_torch_available() else () + + class AlbertModelTester(object): + + def __init__(self, + parent, + batch_size=13, + seq_length=7, + is_training=True, + use_input_mask=True, + use_token_type_ids=True, + use_labels=True, + vocab_size=99, + embedding_size=16, + hidden_size=36, + num_hidden_layers=6, + num_hidden_groups=6, + num_attention_heads=6, + intermediate_size=37, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=16, + type_sequence_label_size=2, + initializer_range=0.02, + num_labels=3, + num_choices=4, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_input_mask = use_input_mask + self.use_token_type_ids = use_token_type_ids + self.use_labels = use_labels + self.vocab_size = vocab_size + self.embedding_size = embedding_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.type_sequence_label_size = type_sequence_label_size + self.initializer_range = initializer_range + self.num_labels = num_labels + self.num_choices = num_choices + self.scope = scope + self.num_hidden_groups = num_hidden_groups + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + + input_mask = None + if self.use_input_mask: + input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) + + token_type_ids = None + if self.use_token_type_ids: + token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) + + sequence_labels = None + token_labels = None + choice_labels = None + if self.use_labels: + sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) + token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) + choice_labels = ids_tensor([self.batch_size], self.num_choices) + + config = AlbertConfig( + vocab_size_or_config_json_file=self.vocab_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + hidden_act=self.hidden_act, + hidden_dropout_prob=self.hidden_dropout_prob, + attention_probs_dropout_prob=self.attention_probs_dropout_prob, + max_position_embeddings=self.max_position_embeddings, + type_vocab_size=self.type_vocab_size, + initializer_range=self.initializer_range, + num_hidden_groups=self.num_hidden_groups) + + return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + + def check_loss_output(self, result): + self.parent.assertListEqual( + list(result["loss"].size()), + []) + + def create_and_check_albert_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + model = AlbertModel(config=config) + model.to(torch_device) + model.eval() + sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) + sequence_output, pooled_output = model(input_ids, token_type_ids=token_type_ids) + sequence_output, pooled_output = model(input_ids) + + result = { + "sequence_output": sequence_output, + "pooled_output": pooled_output, + } + self.parent.assertListEqual( + list(result["sequence_output"].size()), + [self.batch_size, self.seq_length, self.hidden_size]) + self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size]) + + + def create_and_check_albert_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + model = AlbertForMaskedLM(config=config) + model.to(torch_device) + model.eval() + loss, prediction_scores = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels) + result = { + "loss": loss, + "prediction_scores": prediction_scores, + } + self.parent.assertListEqual( + list(result["prediction_scores"].size()), + [self.batch_size, self.seq_length, self.vocab_size]) + self.check_loss_output(result) + + def create_and_check_albert_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + model = AlbertForQuestionAnswering(config=config) + model.to(torch_device) + model.eval() + loss, start_logits, end_logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, + start_positions=sequence_labels, end_positions=sequence_labels) + result = { + "loss": loss, + "start_logits": start_logits, + "end_logits": end_logits, + } + self.parent.assertListEqual( + list(result["start_logits"].size()), + [self.batch_size, self.seq_length]) + self.parent.assertListEqual( + list(result["end_logits"].size()), + [self.batch_size, self.seq_length]) + self.check_loss_output(result) + + + def create_and_check_albert_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + config.num_labels = self.num_labels + model = AlbertForSequenceClassification(config) + model.to(torch_device) + model.eval() + loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) + result = { + "loss": loss, + "logits": logits, + } + self.parent.assertListEqual( + list(result["logits"].size()), + [self.batch_size, self.num_labels]) + self.check_loss_output(result) + + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + (config, input_ids, token_type_ids, input_mask, + sequence_labels, token_labels, choice_labels) = config_and_inputs + inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} + return config, inputs_dict + + def setUp(self): + self.model_tester = AlbertModelTest.AlbertModelTester(self) + self.config_tester = ConfigTester(self, config_class=AlbertConfig, hidden_size=37) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_albert_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_albert_model(*config_and_inputs) + + def test_for_masked_lm(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_albert_for_masked_lm(*config_and_inputs) + + def test_for_question_answering(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_albert_for_question_answering(*config_and_inputs) + + def test_for_sequence_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_albert_for_sequence_classification(*config_and_inputs) + + @slow + def test_model_from_pretrained(self): + cache_dir = "/tmp/transformers_test/" + for model_name in list(ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: + model = AlbertModel.from_pretrained(model_name, cache_dir=cache_dir) + shutil.rmtree(cache_dir) + self.assertIsNotNone(model) + +if __name__ == "__main__": + unittest.main() diff --git a/transformers/tests/modeling_auto_test.py b/transformers/tests/modeling_auto_test.py index af1de29cce8..871a262fe8c 100644 --- a/transformers/tests/modeling_auto_test.py +++ b/transformers/tests/modeling_auto_test.py @@ -18,11 +18,12 @@ from __future__ import print_function import unittest import shutil -import pytest import logging from transformers import is_torch_available +from .utils import require_torch, slow, SMALL_MODEL_IDENTIFIER + if is_torch_available(): from transformers import (AutoConfig, BertConfig, AutoModel, BertModel, @@ -33,11 +34,11 @@ if is_torch_available(): from .modeling_common_test import (CommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester -else: - pytestmark = pytest.mark.skip("Require Torch") +@require_torch class AutoModelTest(unittest.TestCase): + @slow def test_model_from_pretrained(self): logging.basicConfig(level=logging.INFO) for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: @@ -52,6 +53,7 @@ class AutoModelTest(unittest.TestCase): for value in loading_info.values(): self.assertEqual(len(value), 0) + @slow def test_lmhead_model_from_pretrained(self): logging.basicConfig(level=logging.INFO) for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: @@ -64,6 +66,7 @@ class AutoModelTest(unittest.TestCase): self.assertIsNotNone(model) self.assertIsInstance(model, BertForMaskedLM) + @slow def test_sequence_classification_model_from_pretrained(self): logging.basicConfig(level=logging.INFO) for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: @@ -76,6 +79,7 @@ class AutoModelTest(unittest.TestCase): self.assertIsNotNone(model) self.assertIsInstance(model, BertForSequenceClassification) + @slow def test_question_answering_model_from_pretrained(self): logging.basicConfig(level=logging.INFO) for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: @@ -88,6 +92,11 @@ class AutoModelTest(unittest.TestCase): self.assertIsNotNone(model) self.assertIsInstance(model, BertForQuestionAnswering) + def test_from_pretrained_identifier(self): + logging.basicConfig(level=logging.INFO) + model = AutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER) + self.assertIsInstance(model, BertForMaskedLM) + if __name__ == "__main__": unittest.main() diff --git a/transformers/tests/modeling_bert_test.py b/transformers/tests/modeling_bert_test.py index 633c97e263d..539f66cd3fc 100644 --- a/transformers/tests/modeling_bert_test.py +++ b/transformers/tests/modeling_bert_test.py @@ -18,28 +18,27 @@ from __future__ import print_function import unittest import shutil -import pytest from transformers import is_torch_available -from .modeling_common_test import (CommonTestCases, ids_tensor) +from .modeling_common_test import (CommonTestCases, ids_tensor, floats_tensor) from .configuration_common_test import ConfigTester +from .utils import require_torch, slow, torch_device if is_torch_available(): from transformers import (BertConfig, BertModel, BertForMaskedLM, - BertForNextSentencePrediction, BertForPreTraining, - BertForQuestionAnswering, BertForSequenceClassification, - BertForTokenClassification, BertForMultipleChoice) + BertForNextSentencePrediction, BertForPreTraining, + BertForQuestionAnswering, BertForSequenceClassification, + BertForTokenClassification, BertForMultipleChoice) from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP -else: - pytestmark = pytest.mark.skip("Require Torch") +@require_torch class BertModelTest(CommonTestCases.CommonModelTester): all_model_classes = (BertModel, BertForMaskedLM, BertForNextSentencePrediction, - BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, - BertForTokenClassification) if is_torch_available() else () + BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, + BertForTokenClassification) if is_torch_available() else () class BertModelTester(object): @@ -66,7 +65,7 @@ class BertModelTest(CommonTestCases.CommonModelTester): num_labels=3, num_choices=4, scope=None, - ): + ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length @@ -120,10 +119,20 @@ class BertModelTest(CommonTestCases.CommonModelTester): attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, + is_decoder=False, initializer_range=self.initializer_range) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + def prepare_config_and_inputs_for_decoder(self): + config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels = self.prepare_config_and_inputs() + + config.is_decoder = True + encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) + encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) + + return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask + def check_loss_output(self, result): self.parent.assertListEqual( list(result["loss"].size()), @@ -131,6 +140,7 @@ class BertModelTest(CommonTestCases.CommonModelTester): def create_and_check_bert_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = BertModel(config=config) + model.to(torch_device) model.eval() sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) sequence_output, pooled_output = model(input_ids, token_type_ids=token_type_ids) @@ -145,9 +155,26 @@ class BertModelTest(CommonTestCases.CommonModelTester): [self.batch_size, self.seq_length, self.hidden_size]) self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size]) + def create_and_check_bert_model_as_decoder(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask): + model = BertModel(config) + model.to(torch_device) + model.eval() + sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask) + sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states) + sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) + + result = { + "sequence_output": sequence_output, + "pooled_output": pooled_output, + } + self.parent.assertListEqual( + list(result["sequence_output"].size()), + [self.batch_size, self.seq_length, self.hidden_size]) + self.parent.assertListEqual(list(result["pooled_output"].size()), [self.batch_size, self.hidden_size]) def create_and_check_bert_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = BertForMaskedLM(config=config) + model.to(torch_device) model.eval() loss, prediction_scores = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels) result = { @@ -159,8 +186,24 @@ class BertModelTest(CommonTestCases.CommonModelTester): [self.batch_size, self.seq_length, self.vocab_size]) self.check_loss_output(result) + def create_and_check_bert_model_for_masked_lm_as_decoder(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask): + model = BertForMaskedLM(config=config) + model.to(torch_device) + model.eval() + loss, prediction_scores = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask) + loss, prediction_scores = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels, encoder_hidden_states=encoder_hidden_states) + result = { + "loss": loss, + "prediction_scores": prediction_scores, + } + self.parent.assertListEqual( + list(result["prediction_scores"].size()), + [self.batch_size, self.seq_length, self.vocab_size]) + self.check_loss_output(result) + def create_and_check_bert_for_next_sequence_prediction(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = BertForNextSentencePrediction(config=config) + model.to(torch_device) model.eval() loss, seq_relationship_score = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, next_sentence_label=sequence_labels) result = { @@ -172,9 +215,9 @@ class BertModelTest(CommonTestCases.CommonModelTester): [self.batch_size, 2]) self.check_loss_output(result) - def create_and_check_bert_for_pretraining(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = BertForPreTraining(config=config) + model.to(torch_device) model.eval() loss, prediction_scores, seq_relationship_score = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels, next_sentence_label=sequence_labels) @@ -191,9 +234,9 @@ class BertModelTest(CommonTestCases.CommonModelTester): [self.batch_size, 2]) self.check_loss_output(result) - def create_and_check_bert_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = BertForQuestionAnswering(config=config) + model.to(torch_device) model.eval() loss, start_logits, end_logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels) @@ -210,10 +253,10 @@ class BertModelTest(CommonTestCases.CommonModelTester): [self.batch_size, self.seq_length]) self.check_loss_output(result) - def create_and_check_bert_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): config.num_labels = self.num_labels model = BertForSequenceClassification(config) + model.to(torch_device) model.eval() loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) result = { @@ -225,10 +268,10 @@ class BertModelTest(CommonTestCases.CommonModelTester): [self.batch_size, self.num_labels]) self.check_loss_output(result) - def create_and_check_bert_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): config.num_labels = self.num_labels model = BertForTokenClassification(config=config) + model.to(torch_device) model.eval() loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) result = { @@ -240,10 +283,10 @@ class BertModelTest(CommonTestCases.CommonModelTester): [self.batch_size, self.seq_length, self.num_labels]) self.check_loss_output(result) - def create_and_check_bert_for_multiple_choice(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): config.num_choices = self.num_choices model = BertForMultipleChoice(config=config) + model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() @@ -261,7 +304,6 @@ class BertModelTest(CommonTestCases.CommonModelTester): [self.batch_size, self.num_choices]) self.check_loss_output(result) - def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, token_type_ids, input_mask, @@ -280,10 +322,18 @@ class BertModelTest(CommonTestCases.CommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bert_model(*config_and_inputs) + def test_bert_model_as_decoder(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() + self.model_tester.create_and_check_bert_model_as_decoder(*config_and_inputs) + def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bert_for_masked_lm(*config_and_inputs) + def test_for_masked_lm_decoder(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() + self.model_tester.create_and_check_bert_model_for_masked_lm_as_decoder(*config_and_inputs) + def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bert_for_multiple_choice(*config_and_inputs) @@ -308,7 +358,7 @@ class BertModelTest(CommonTestCases.CommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bert_for_token_classification(*config_and_inputs) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: @@ -316,5 +366,6 @@ class BertModelTest(CommonTestCases.CommonModelTester): shutil.rmtree(cache_dir) self.assertIsNotNone(model) + if __name__ == "__main__": unittest.main() diff --git a/transformers/tests/modeling_common_test.py b/transformers/tests/modeling_common_test.py index 1c8b1584c73..80d5d95455f 100644 --- a/transformers/tests/modeling_common_test.py +++ b/transformers/tests/modeling_common_test.py @@ -27,19 +27,18 @@ import uuid import unittest import logging -import pytest from transformers import is_torch_available +from .utils import require_torch, slow, torch_device + if is_torch_available(): import torch import numpy as np - from transformers import (PretrainedConfig, PreTrainedModel, + from transformers import (AdaptiveEmbedding, PretrainedConfig, PreTrainedModel, BertModel, BertConfig, BERT_PRETRAINED_MODEL_ARCHIVE_MAP, GPT2LMHeadModel, GPT2Config, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP) -else: - pytestmark = pytest.mark.skip("Require Torch") if sys.version_info[0] == 2: import cPickle as pickle @@ -65,6 +64,7 @@ def _config_zero_init(config): class CommonTestCases: + @require_torch class CommonModelTester(unittest.TestCase): model_tester = None @@ -79,6 +79,7 @@ class CommonTestCases: for model_class in self.all_model_classes: model = model_class(config) + model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**inputs_dict) @@ -86,12 +87,13 @@ class CommonTestCases: with TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) + model.to(torch_device) with torch.no_grad(): after_outputs = model(**inputs_dict) # Make sure we don't have nans - out_1 = after_outputs[0].numpy() - out_2 = outputs[0].numpy() + out_1 = after_outputs[0].cpu().numpy() + out_2 = outputs[0].cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) @@ -113,6 +115,7 @@ class CommonTestCases: for model_class in self.all_model_classes: model = model_class(config) + model.to(torch_device) model.eval() first, second = model(inputs_dict["input_ids"])[0], model(inputs_dict["input_ids"])[0] self.assertEqual(first.ne(second).sum().item(), 0) @@ -125,6 +128,7 @@ class CommonTestCases: config.output_attentions = True config.output_hidden_states = False model = model_class(config) + model.to(torch_device) model.eval() outputs = model(**inputs_dict) attentions = outputs[-1] @@ -142,6 +146,7 @@ class CommonTestCases: config.output_attentions = True config.output_hidden_states = True model = model_class(config) + model.to(torch_device) model.eval() outputs = model(**inputs_dict) self.assertEqual(out_len+1, len(outputs)) @@ -181,6 +186,7 @@ class CommonTestCases: configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) + model.to(torch_device) model.eval() inputs = inputs_dict['input_ids'] # Let's keep only input_ids @@ -201,7 +207,10 @@ class CommonTestCases: except ValueError: self.fail("Couldn't load module.") + model.to(torch_device) model.eval() + + loaded_model.to(torch_device) loaded_model.eval() model_params = model.parameters() @@ -228,11 +237,12 @@ class CommonTestCases: configs_no_init = _config_zero_init(config) # To be sure we have no Nan for model_class in self.all_model_classes: model = model_class(config=configs_no_init) + model.to(torch_device) model.eval() # Prepare head_mask # Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior) - head_mask = torch.ones(self.model_tester.num_hidden_layers, self.model_tester.num_attention_heads) + head_mask = torch.ones(self.model_tester.num_hidden_layers, self.model_tester.num_attention_heads, device=torch_device) head_mask[0, 0] = 0 head_mask[-1, :-1] = 0 head_mask.requires_grad_(requires_grad=True) @@ -282,6 +292,7 @@ class CommonTestCases: config.output_attentions = True config.output_hidden_states = False model = model_class(config=config) + model.to(torch_device) model.eval() heads_to_prune = {0: list(range(1, self.model_tester.num_attention_heads)), -1: [0]} @@ -310,6 +321,7 @@ class CommonTestCases: config.output_attentions = True config.output_hidden_states = False model = model_class(config=config) + model.to(torch_device) model.eval() heads_to_prune = {0: list(range(1, self.model_tester.num_attention_heads)), -1: [0]} @@ -319,6 +331,7 @@ class CommonTestCases: os.makedirs(directory) model.save_pretrained(directory) model = model_class.from_pretrained(directory) + model.to(torch_device) outputs = model(**inputs_dict) attentions = outputs[-1] @@ -346,6 +359,7 @@ class CommonTestCases: config.pruned_heads = heads_to_prune model = model_class(config=config) + model.to(torch_device) model.eval() outputs = model(**inputs_dict) @@ -372,6 +386,7 @@ class CommonTestCases: config.pruned_heads = heads_to_prune model = model_class(config=config) + model.to(torch_device) model.eval() outputs = model(**inputs_dict) @@ -388,6 +403,7 @@ class CommonTestCases: os.makedirs(directory) model.save_pretrained(directory) model = model_class.from_pretrained(directory) + model.to(torch_device) shutil.rmtree(directory) outputs = model(**inputs_dict) @@ -419,6 +435,7 @@ class CommonTestCases: config.output_hidden_states = True config.output_attentions = False model = model_class(config) + model.to(torch_device) model.eval() outputs = model(**inputs_dict) hidden_states = outputs[-1] @@ -463,6 +480,21 @@ class CommonTestCases: self.assertTrue(models_equal) + def test_model_common_attributes(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + self.assertIsInstance( + model.get_input_embeddings(), + (torch.nn.Embedding, AdaptiveEmbedding) + ) + model.set_input_embeddings(torch.nn.Embedding(10, 10)) + x = model.get_output_embeddings() + self.assertTrue( + x is None or isinstance(x, torch.nn.Linear) + ) + def test_tie_model_weights(self): if not self.test_torchscript: return @@ -477,11 +509,11 @@ class CommonTestCases: return equal for model_class in self.all_model_classes: - if not hasattr(model_class, 'tie_weights'): - continue - config.torchscript = True model_not_tied = model_class(config) + if model_not_tied.get_output_embeddings() is None: + continue + params_not_tied = list(model_not_tied.parameters()) config_tied = copy.deepcopy(config) @@ -516,6 +548,20 @@ class CommonTestCases: # self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape) # self.assertTrue(check_same_values(model.transformer.wte, model.lm_head)) + def test_inputs_embeds(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + input_ids = inputs_dict["input_ids"] + del inputs_dict["input_ids"] + + for model_class in self.all_model_classes: + model = model_class(config) + model.to(torch_device) + model.eval() + + wte = model.get_input_embeddings() + inputs_dict["inputs_embeds"] = wte(input_ids) + outputs = model(**inputs_dict) + class GPTModelTester(CommonModelTester): @@ -600,6 +646,7 @@ class CommonTestCases: def create_and_check_base_model(self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids): model = self.base_model_class(config) + model.to(torch_device) model.eval() outputs = model(input_ids, position_ids, token_type_ids) @@ -615,6 +662,7 @@ class CommonTestCases: def create_and_check_lm_head(self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids): model = self.lm_head_model_class(config) + model.to(torch_device) model.eval() outputs = model(input_ids, position_ids, token_type_ids, lm_labels) loss, lm_logits = outputs[:2] @@ -631,6 +679,7 @@ class CommonTestCases: mc_labels, lm_labels, mc_token_ids): for model_class in self.all_model_classes: model = model_class(config) + model.to(torch_device) model.eval() outputs = model(input_ids) presents = outputs[-1] @@ -643,6 +692,7 @@ class CommonTestCases: def create_and_check_double_heads(self, config, input_ids, token_type_ids, position_ids, mc_labels, lm_labels, mc_token_ids): model = self.double_head_model_class(config) + model.to(torch_device) model.eval() outputs = model(input_ids, mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels, token_type_ids=token_type_ids, position_ids=position_ids) @@ -688,6 +738,7 @@ class CommonTestCases: config_and_inputs = self.prepare_config_and_inputs() self.create_and_check_presents(*config_and_inputs) + @slow def run_slow_tests(self): self.create_and_check_model_from_pretrained() @@ -741,10 +792,28 @@ def ids_tensor(shape, vocab_size, rng=None, name=None): for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) - return torch.tensor(data=values, dtype=torch.long).view(shape).contiguous() + return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous() +def floats_tensor(shape, scale=1.0, rng=None, name=None): + """Creates a random float32 tensor of the shape within the vocab size.""" + if rng is None: + rng = global_rng + + total_dims = 1 + for dim in shape: + total_dims *= dim + + values = [] + for _ in range(total_dims): + values.append(rng.random() * scale) + + return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous() + + +@require_torch class ModelUtilsTest(unittest.TestCase): + @slow def test_model_from_pretrained(self): logging.basicConfig(level=logging.INFO) for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/modeling_ctrl_test.py b/transformers/tests/modeling_ctrl_test.py index 47ff8d8d51f..8c14578a5c4 100644 --- a/transformers/tests/modeling_ctrl_test.py +++ b/transformers/tests/modeling_ctrl_test.py @@ -16,7 +16,6 @@ from __future__ import division from __future__ import print_function import unittest -import pytest import shutil import pdb @@ -25,13 +24,13 @@ from transformers import is_torch_available if is_torch_available(): from transformers import (CTRLConfig, CTRLModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, CTRLLMHeadModel) -else: - pytestmark = pytest.mark.skip("Require Torch") from .modeling_common_test import (CommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_torch, slow, torch_device +@require_torch class CTRLModelTest(CommonTestCases.CommonModelTester): all_model_classes = (CTRLModel, CTRLLMHeadModel) if is_torch_available() else () @@ -140,6 +139,7 @@ class CTRLModelTest(CommonTestCases.CommonModelTester): def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = CTRLModel(config=config) + model.to(torch_device) model.eval() model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) @@ -157,6 +157,7 @@ class CTRLModelTest(CommonTestCases.CommonModelTester): def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = CTRLLMHeadModel(config) + model.to(torch_device) model.eval() loss, lm_logits, _ = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) @@ -202,7 +203,7 @@ class CTRLModelTest(CommonTestCases.CommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(CTRL_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/modeling_distilbert_test.py b/transformers/tests/modeling_distilbert_test.py index 937d03396d5..4b8f64327dd 100644 --- a/transformers/tests/modeling_distilbert_test.py +++ b/transformers/tests/modeling_distilbert_test.py @@ -17,20 +17,20 @@ from __future__ import division from __future__ import print_function import unittest -import pytest from transformers import is_torch_available if is_torch_available(): from transformers import (DistilBertConfig, DistilBertModel, DistilBertForMaskedLM, + DistilBertForTokenClassification, DistilBertForQuestionAnswering, DistilBertForSequenceClassification) -else: - pytestmark = pytest.mark.skip("Require Torch") from .modeling_common_test import (CommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_torch, slow, torch_device +@require_torch class DistilBertModelTest(CommonTestCases.CommonModelTester): all_model_classes = (DistilBertModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, @@ -125,6 +125,7 @@ class DistilBertModelTest(CommonTestCases.CommonModelTester): def create_and_check_distilbert_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = DistilBertModel(config=config) + model.to(torch_device) model.eval() (sequence_output,) = model(input_ids, input_mask) (sequence_output,) = model(input_ids) @@ -138,6 +139,7 @@ class DistilBertModelTest(CommonTestCases.CommonModelTester): def create_and_check_distilbert_for_masked_lm(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = DistilBertForMaskedLM(config=config) + model.to(torch_device) model.eval() loss, prediction_scores = model(input_ids, attention_mask=input_mask, masked_lm_labels=token_labels) result = { @@ -151,6 +153,7 @@ class DistilBertModelTest(CommonTestCases.CommonModelTester): def create_and_check_distilbert_for_question_answering(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = DistilBertForQuestionAnswering(config=config) + model.to(torch_device) model.eval() loss, start_logits, end_logits = model(input_ids, attention_mask=input_mask, start_positions=sequence_labels, end_positions=sequence_labels) result = { @@ -169,6 +172,7 @@ class DistilBertModelTest(CommonTestCases.CommonModelTester): def create_and_check_distilbert_for_sequence_classification(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): config.num_labels = self.num_labels model = DistilBertForSequenceClassification(config) + model.to(torch_device) model.eval() loss, logits = model(input_ids, attention_mask=input_mask, labels=sequence_labels) result = { @@ -180,6 +184,22 @@ class DistilBertModelTest(CommonTestCases.CommonModelTester): [self.batch_size, self.num_labels]) self.check_loss_output(result) + def create_and_check_distilbert_for_token_classification(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): + config.num_labels = self.num_labels + model = DistilBertForTokenClassification(config=config) + model.to(torch_device) + model.eval() + + loss, logits = model(input_ids, attention_mask=input_mask, labels=token_labels) + result = { + "loss": loss, + "logits": logits, + } + self.parent.assertListEqual( + list(result["logits"].size()), + [self.batch_size, self.seq_length, self.num_labels]) + self.check_loss_output(result) + def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs @@ -209,7 +229,11 @@ class DistilBertModelTest(CommonTestCases.CommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*config_and_inputs) - # @pytest.mark.slow + def test_for_token_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_distilbert_for_token_classification(*config_and_inputs) + + # @slow # def test_model_from_pretrained(self): # cache_dir = "/tmp/transformers_test/" # for model_name in list(DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/modeling_encoder_decoder_test.py b/transformers/tests/modeling_encoder_decoder_test.py new file mode 100644 index 00000000000..64e86df8f5a --- /dev/null +++ b/transformers/tests/modeling_encoder_decoder_test.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# Copyright 2018 The Hugging Face Inc. Team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import unittest + +from transformers import is_torch_available +from .utils import require_torch, slow + +if is_torch_available(): + from transformers import BertModel, BertForMaskedLM, Model2Model + from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP + + +@require_torch +class EncoderDecoderModelTest(unittest.TestCase): + @slow + def test_model2model_from_pretrained(self): + logging.basicConfig(level=logging.INFO) + for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: + model = Model2Model.from_pretrained(model_name) + self.assertIsInstance(model.encoder, BertModel) + self.assertIsInstance(model.decoder, BertForMaskedLM) + self.assertEqual(model.decoder.config.is_decoder, True) + self.assertEqual(model.encoder.config.is_decoder, False) + + def test_model2model_from_pretrained_not_bert(self): + logging.basicConfig(level=logging.INFO) + with self.assertRaises(ValueError): + _ = Model2Model.from_pretrained('roberta') + + with self.assertRaises(ValueError): + _ = Model2Model.from_pretrained('distilbert') + + with self.assertRaises(ValueError): + _ = Model2Model.from_pretrained('does-not-exist') + + +if __name__ == "__main__": + unittest.main() diff --git a/transformers/tests/modeling_gpt2_test.py b/transformers/tests/modeling_gpt2_test.py index 4263e51bc97..ecaa2a4bd09 100644 --- a/transformers/tests/modeling_gpt2_test.py +++ b/transformers/tests/modeling_gpt2_test.py @@ -17,7 +17,6 @@ from __future__ import division from __future__ import print_function import unittest -import pytest import shutil from transformers import is_torch_available @@ -25,13 +24,13 @@ from transformers import is_torch_available if is_torch_available(): from transformers import (GPT2Config, GPT2Model, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP, GPT2LMHeadModel, GPT2DoubleHeadsModel) -else: - pytestmark = pytest.mark.skip("Require Torch") from .modeling_common_test import (CommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_torch, slow, torch_device +@require_torch class GPT2ModelTest(CommonTestCases.CommonModelTester): all_model_classes = (GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel) if is_torch_available() else () @@ -136,6 +135,7 @@ class GPT2ModelTest(CommonTestCases.CommonModelTester): def create_and_check_gpt2_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPT2Model(config=config) + model.to(torch_device) model.eval() model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) @@ -153,6 +153,7 @@ class GPT2ModelTest(CommonTestCases.CommonModelTester): def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args): model = GPT2LMHeadModel(config) + model.to(torch_device) model.eval() loss, lm_logits, _ = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) @@ -171,6 +172,7 @@ class GPT2ModelTest(CommonTestCases.CommonModelTester): def create_and_check_double_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args): model = GPT2DoubleHeadsModel(config) + model.to(torch_device) model.eval() @@ -235,7 +237,7 @@ class GPT2ModelTest(CommonTestCases.CommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(GPT2_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/modeling_openai_test.py b/transformers/tests/modeling_openai_test.py index 33218288a02..8e4d13438d4 100644 --- a/transformers/tests/modeling_openai_test.py +++ b/transformers/tests/modeling_openai_test.py @@ -17,7 +17,6 @@ from __future__ import division from __future__ import print_function import unittest -import pytest import shutil from transformers import is_torch_available @@ -25,13 +24,13 @@ from transformers import is_torch_available if is_torch_available(): from transformers import (OpenAIGPTConfig, OpenAIGPTModel, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel) -else: - pytestmark = pytest.mark.skip("Require Torch") from .modeling_common_test import (CommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_torch, slow, torch_device +@require_torch class OpenAIGPTModelTest(CommonTestCases.CommonModelTester): all_model_classes = (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel) if is_torch_available() else () @@ -124,6 +123,7 @@ class OpenAIGPTModelTest(CommonTestCases.CommonModelTester): def create_and_check_openai_gpt_model(self, config, input_ids, head_mask, token_type_ids, *args): model = OpenAIGPTModel(config=config) + model.to(torch_device) model.eval() model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) @@ -139,6 +139,7 @@ class OpenAIGPTModelTest(CommonTestCases.CommonModelTester): def create_and_check_lm_head_model(self, config, input_ids, head_mask, token_type_ids, *args): model = OpenAIGPTLMHeadModel(config) + model.to(torch_device) model.eval() loss, lm_logits = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) @@ -157,6 +158,7 @@ class OpenAIGPTModelTest(CommonTestCases.CommonModelTester): def create_and_check_double_lm_head_model(self, config, input_ids, head_mask, token_type_ids, *args): model = OpenAIGPTDoubleHeadsModel(config) + model.to(torch_device) model.eval() loss, lm_logits, mc_logits = model(input_ids, token_type_ids=token_type_ids, lm_labels=input_ids) @@ -203,7 +205,7 @@ class OpenAIGPTModelTest(CommonTestCases.CommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/modeling_roberta_test.py b/transformers/tests/modeling_roberta_test.py index 82e10da915d..7a3553b1647 100644 --- a/transformers/tests/modeling_roberta_test.py +++ b/transformers/tests/modeling_roberta_test.py @@ -18,21 +18,21 @@ from __future__ import print_function import unittest import shutil -import pytest from transformers import is_torch_available if is_torch_available(): import torch - from transformers import (RobertaConfig, RobertaModel, RobertaForMaskedLM, RobertaForSequenceClassification) + from transformers import (RobertaConfig, RobertaModel, RobertaForMaskedLM, + RobertaForSequenceClassification, RobertaForTokenClassification) from transformers.modeling_roberta import ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP -else: - pytestmark = pytest.mark.skip("Require Torch") from .modeling_common_test import (CommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_torch, slow, torch_device +@require_torch class RobertaModelTest(CommonTestCases.CommonModelTester): all_model_classes = (RobertaForMaskedLM, RobertaModel) if is_torch_available() else () @@ -128,6 +128,7 @@ class RobertaModelTest(CommonTestCases.CommonModelTester): def create_and_check_roberta_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = RobertaModel(config=config) + model.to(torch_device) model.eval() sequence_output, pooled_output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) sequence_output, pooled_output = model(input_ids, token_type_ids=token_type_ids) @@ -145,6 +146,7 @@ class RobertaModelTest(CommonTestCases.CommonModelTester): def create_and_check_roberta_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = RobertaForMaskedLM(config=config) + model.to(torch_device) model.eval() loss, prediction_scores = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels) result = { @@ -156,6 +158,23 @@ class RobertaModelTest(CommonTestCases.CommonModelTester): [self.batch_size, self.seq_length, self.vocab_size]) self.check_loss_output(result) + def create_and_check_roberta_for_token_classification(self, config, input_ids, token_type_ids, input_mask, + sequence_labels, token_labels, choice_labels): + config.num_labels = self.num_labels + model = RobertaForTokenClassification(config=config) + model.to(torch_device) + model.eval() + loss, logits = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, + labels=token_labels) + result = { + "loss": loss, + "logits": logits, + } + self.parent.assertListEqual( + list(result["logits"].size()), + [self.batch_size, self.seq_length, self.num_labels]) + self.check_loss_output(result) + def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, token_type_ids, input_mask, @@ -178,7 +197,7 @@ class RobertaModelTest(CommonTestCases.CommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_roberta_for_masked_lm(*config_and_inputs) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: @@ -190,10 +209,10 @@ class RobertaModelTest(CommonTestCases.CommonModelTester): class RobertaModelIntegrationTest(unittest.TestCase): - @pytest.mark.slow + @slow def test_inference_masked_lm(self): model = RobertaForMaskedLM.from_pretrained('roberta-base') - + input_ids = torch.tensor([[ 0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 50265)) @@ -211,10 +230,10 @@ class RobertaModelIntegrationTest(unittest.TestCase): torch.allclose(output[:, :3, :3], expected_slice, atol=1e-3) ) - @pytest.mark.slow + @slow def test_inference_no_head(self): model = RobertaModel.from_pretrained('roberta-base') - + input_ids = torch.tensor([[ 0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) output = model(input_ids)[0] # compare the actual values for a slice. @@ -227,10 +246,10 @@ class RobertaModelIntegrationTest(unittest.TestCase): torch.allclose(output[:, :3, :3], expected_slice, atol=1e-3) ) - @pytest.mark.slow + @slow def test_inference_classification_head(self): model = RobertaForSequenceClassification.from_pretrained('roberta-large-mnli') - + input_ids = torch.tensor([[ 0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) output = model(input_ids)[0] expected_shape = torch.Size((1, 3)) diff --git a/transformers/tests/modeling_tf_albert_test.py b/transformers/tests/modeling_tf_albert_test.py new file mode 100644 index 00000000000..7d3325b70b5 --- /dev/null +++ b/transformers/tests/modeling_tf_albert_test.py @@ -0,0 +1,230 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest +import shutil +import sys + +from .modeling_tf_common_test import (TFCommonTestCases, ids_tensor) +from .configuration_common_test import ConfigTester +from .utils import require_tf, slow + +from transformers import AlbertConfig, is_tf_available + +if is_tf_available(): + import tensorflow as tf + from transformers.modeling_tf_albert import (TFAlbertModel, TFAlbertForMaskedLM, + TFAlbertForSequenceClassification, + TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP) + + +@require_tf +class TFAlbertModelTest(TFCommonTestCases.TFCommonModelTester): + + all_model_classes = ( + TFAlbertModel, + TFAlbertForMaskedLM, + TFAlbertForSequenceClassification + ) if is_tf_available() else () + + class TFAlbertModelTester(object): + + def __init__(self, + parent, + batch_size=13, + seq_length=7, + is_training=True, + use_input_mask=True, + use_token_type_ids=True, + use_labels=True, + vocab_size=99, + embedding_size=16, + hidden_size=32, + num_hidden_layers=5, + num_attention_heads=4, + intermediate_size=37, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=16, + type_sequence_label_size=2, + initializer_range=0.02, + num_labels=3, + num_choices=4, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_input_mask = use_input_mask + self.use_token_type_ids = use_token_type_ids + self.use_labels = use_labels + self.vocab_size = vocab_size + self.embedding_size = embedding_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.type_sequence_label_size = type_sequence_label_size + self.initializer_range = initializer_range + self.num_labels = num_labels + self.num_choices = num_choices + self.scope = scope + + def prepare_config_and_inputs(self): + input_ids = ids_tensor( + [self.batch_size, self.seq_length], self.vocab_size) + + input_mask = None + if self.use_input_mask: + input_mask = ids_tensor( + [self.batch_size, self.seq_length], vocab_size=2) + + token_type_ids = None + if self.use_token_type_ids: + token_type_ids = ids_tensor( + [self.batch_size, self.seq_length], self.type_vocab_size) + + sequence_labels = None + token_labels = None + choice_labels = None + if self.use_labels: + sequence_labels = ids_tensor( + [self.batch_size], self.type_sequence_label_size) + token_labels = ids_tensor( + [self.batch_size, self.seq_length], self.num_labels) + choice_labels = ids_tensor([self.batch_size], self.num_choices) + + config = AlbertConfig( + vocab_size_or_config_json_file=self.vocab_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + hidden_act=self.hidden_act, + hidden_dropout_prob=self.hidden_dropout_prob, + attention_probs_dropout_prob=self.attention_probs_dropout_prob, + max_position_embeddings=self.max_position_embeddings, + type_vocab_size=self.type_vocab_size, + initializer_range=self.initializer_range) + + return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + + def create_and_check_albert_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + model = TFAlbertModel(config=config) + # inputs = {'input_ids': input_ids, + # 'attention_mask': input_mask, + # 'token_type_ids': token_type_ids} + # sequence_output, pooled_output = model(**inputs) + inputs = {'input_ids': input_ids, + 'attention_mask': input_mask, + 'token_type_ids': token_type_ids} + sequence_output, pooled_output = model(inputs) + + inputs = [input_ids, input_mask] + sequence_output, pooled_output = model(inputs) + + sequence_output, pooled_output = model(input_ids) + + result = { + "sequence_output": sequence_output.numpy(), + "pooled_output": pooled_output.numpy(), + } + self.parent.assertListEqual( + list(result["sequence_output"].shape), + [self.batch_size, self.seq_length, self.hidden_size]) + self.parent.assertListEqual(list(result["pooled_output"].shape), [ + self.batch_size, self.hidden_size]) + + def create_and_check_albert_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + model = TFAlbertForMaskedLM(config=config) + inputs = {'input_ids': input_ids, + 'attention_mask': input_mask, + 'token_type_ids': token_type_ids} + prediction_scores, = model(inputs) + result = { + "prediction_scores": prediction_scores.numpy(), + } + self.parent.assertListEqual( + list(result["prediction_scores"].shape), + [self.batch_size, self.seq_length, self.vocab_size]) + + def create_and_check_albert_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + config.num_labels = self.num_labels + model = TFAlbertForSequenceClassification(config=config) + inputs = {'input_ids': input_ids, + 'attention_mask': input_mask, + 'token_type_ids': token_type_ids} + logits, = model(inputs) + result = { + "logits": logits.numpy(), + } + self.parent.assertListEqual( + list(result["logits"].shape), + [self.batch_size, self.num_labels]) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + (config, input_ids, token_type_ids, input_mask, + sequence_labels, token_labels, choice_labels) = config_and_inputs + inputs_dict = {'input_ids': input_ids, + 'token_type_ids': token_type_ids, 'attention_mask': input_mask} + return config, inputs_dict + + def setUp(self): + self.model_tester = TFAlbertModelTest.TFAlbertModelTester(self) + self.config_tester = ConfigTester( + self, config_class=AlbertConfig, hidden_size=37) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_albert_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_albert_model(*config_and_inputs) + + def test_for_masked_lm(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_albert_for_masked_lm( + *config_and_inputs) + + def test_for_sequence_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_albert_for_sequence_classification( + *config_and_inputs) + + @slow + def test_model_from_pretrained(self): + cache_dir = "/tmp/transformers_test/" + # for model_name in list(TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: + for model_name in ['albert-base-uncased']: + model = TFAlbertModel.from_pretrained( + model_name, cache_dir=cache_dir) + shutil.rmtree(cache_dir) + self.assertIsNotNone(model) + + +if __name__ == "__main__": + unittest.main() diff --git a/transformers/tests/modeling_tf_auto_test.py b/transformers/tests/modeling_tf_auto_test.py index 2cda3abc1cd..7ab6eaa3d63 100644 --- a/transformers/tests/modeling_tf_auto_test.py +++ b/transformers/tests/modeling_tf_auto_test.py @@ -18,11 +18,12 @@ from __future__ import print_function import unittest import shutil -import pytest import logging from transformers import is_tf_available +from .utils import require_tf, slow, SMALL_MODEL_IDENTIFIER + if is_tf_available(): from transformers import (AutoConfig, BertConfig, TFAutoModel, TFBertModel, @@ -33,11 +34,11 @@ if is_tf_available(): from .modeling_common_test import (CommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester -else: - pytestmark = pytest.mark.skip("Require TensorFlow") +@require_tf class TFAutoModelTest(unittest.TestCase): + @slow def test_model_from_pretrained(self): import h5py self.assertTrue(h5py.version.hdf5_version.startswith("1.10")) @@ -53,6 +54,7 @@ class TFAutoModelTest(unittest.TestCase): self.assertIsNotNone(model) self.assertIsInstance(model, TFBertModel) + @slow def test_lmhead_model_from_pretrained(self): logging.basicConfig(level=logging.INFO) # for model_name in list(TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: @@ -65,6 +67,7 @@ class TFAutoModelTest(unittest.TestCase): self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForMaskedLM) + @slow def test_sequence_classification_model_from_pretrained(self): logging.basicConfig(level=logging.INFO) # for model_name in list(TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: @@ -77,6 +80,7 @@ class TFAutoModelTest(unittest.TestCase): self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForSequenceClassification) + @slow def test_question_answering_model_from_pretrained(self): logging.basicConfig(level=logging.INFO) # for model_name in list(TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: @@ -89,6 +93,11 @@ class TFAutoModelTest(unittest.TestCase): self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForQuestionAnswering) + def test_from_pretrained_identifier(self): + logging.basicConfig(level=logging.INFO) + model = TFAutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER, force_download=True) + self.assertIsInstance(model, TFBertForMaskedLM) + if __name__ == "__main__": unittest.main() diff --git a/transformers/tests/modeling_tf_bert_test.py b/transformers/tests/modeling_tf_bert_test.py index a1715d25684..d7a86fecb93 100644 --- a/transformers/tests/modeling_tf_bert_test.py +++ b/transformers/tests/modeling_tf_bert_test.py @@ -18,11 +18,11 @@ from __future__ import print_function import unittest import shutil -import pytest import sys from .modeling_tf_common_test import (TFCommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_tf, slow from transformers import BertConfig, is_tf_available @@ -36,10 +36,9 @@ if is_tf_available(): TFBertForTokenClassification, TFBertForQuestionAnswering, TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP) -else: - pytestmark = pytest.mark.skip("Require TensorFlow") +@require_tf class TFBertModelTest(TFCommonTestCases.TFCommonModelTester): all_model_classes = (TFBertModel, TFBertForMaskedLM, TFBertForNextSentencePrediction, @@ -131,10 +130,6 @@ class TFBertModelTest(TFCommonTestCases.TFCommonModelTester): def create_and_check_bert_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): model = TFBertModel(config=config) - # inputs = {'input_ids': input_ids, - # 'attention_mask': input_mask, - # 'token_type_ids': token_type_ids} - # sequence_output, pooled_output = model(**inputs) inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} @@ -313,7 +308,7 @@ class TFBertModelTest(TFCommonTestCases.TFCommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bert_for_token_classification(*config_and_inputs) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" # for model_name in list(TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/modeling_tf_common_test.py b/transformers/tests/modeling_tf_common_test.py index f636c428897..439360ba35b 100644 --- a/transformers/tests/modeling_tf_common_test.py +++ b/transformers/tests/modeling_tf_common_test.py @@ -25,18 +25,17 @@ import unittest import uuid import tempfile -import pytest import sys from transformers import is_tf_available, is_torch_available +from .utils import require_tf, slow + if is_tf_available(): import tensorflow as tf import numpy as np from transformers import TFPreTrainedModel # from transformers.modeling_bert import BertModel, BertConfig, BERT_PRETRAINED_MODEL_ARCHIVE_MAP -else: - pytestmark = pytest.mark.skip("Require TensorFlow") if sys.version_info[0] == 2: import cPickle as pickle @@ -62,6 +61,7 @@ def _config_zero_init(config): class TFCommonTestCases: + @require_tf class TFCommonModelTester(unittest.TestCase): model_tester = None @@ -164,7 +164,7 @@ class TFCommonTestCases: for model_class in self.all_model_classes: # Prepare our model model = model_class(config) - + # Let's load it from the disk to be sure we can use pretrained weights with TemporaryDirectory() as tmpdirname: outputs = model(inputs_dict) # build the model @@ -233,80 +233,6 @@ class TFCommonTestCases: self.model_tester.seq_length, self.model_tester.key_len if hasattr(self.model_tester, 'key_len') else self.model_tester.seq_length]) - def test_headmasking(self): - pass - # config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - - # config.output_attentions = True - # config.output_hidden_states = True - # configs_no_init = _config_zero_init(config) # To be sure we have no Nan - # for model_class in self.all_model_classes: - # model = model_class(config=configs_no_init) - # model.eval() - - # # Prepare head_mask - # # Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior) - # head_mask = torch.ones(self.model_tester.num_hidden_layers, self.model_tester.num_attention_heads) - # head_mask[0, 0] = 0 - # head_mask[-1, :-1] = 0 - # head_mask.requires_grad_(requires_grad=True) - # inputs = inputs_dict.copy() - # inputs['head_mask'] = head_mask - - # outputs = model(**inputs) - - # # Test that we can get a gradient back for importance score computation - # output = sum(t.sum() for t in outputs[0]) - # output = output.sum() - # output.backward() - # multihead_outputs = head_mask.grad - - # attentions = outputs[-1] - # hidden_states = outputs[-2] - - # # Remove Nan - - # self.assertIsNotNone(multihead_outputs) - # self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers) - # self.assertAlmostEqual( - # attentions[0][..., 0, :, :].flatten().sum().item(), 0.0) - # self.assertNotEqual( - # attentions[0][..., -1, :, :].flatten().sum().item(), 0.0) - # self.assertNotEqual( - # attentions[1][..., 0, :, :].flatten().sum().item(), 0.0) - # self.assertAlmostEqual( - # attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0) - # self.assertNotEqual( - # attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0) - - - def test_head_pruning(self): - pass - # if not self.test_pruning: - # return - - # config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - - # for model_class in self.all_model_classes: - # config.output_attentions = True - # config.output_hidden_states = False - # model = model_class(config=config) - # model.eval() - # heads_to_prune = {0: list(range(1, self.model_tester.num_attention_heads)), - # -1: [0]} - # model.prune_heads(heads_to_prune) - # outputs = model(**inputs_dict) - - # attentions = outputs[-1] - - # self.assertEqual( - # attentions[0].shape[-3], 1) - # self.assertEqual( - # attentions[1].shape[-3], self.model_tester.num_attention_heads) - # self.assertEqual( - # attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) - - def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() @@ -323,75 +249,14 @@ class TFCommonTestCases: list(hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size]) + def test_model_common_attributes(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - def test_resize_tokens_embeddings(self): - pass - # original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - # if not self.test_resize_embeddings: - # return - - # for model_class in self.all_model_classes: - # config = copy.deepcopy(original_config) - # model = model_class(config) - - # model_vocab_size = config.vocab_size - # # Retrieve the embeddings and clone theme - # model_embed = model.resize_token_embeddings(model_vocab_size) - # cloned_embeddings = model_embed.weight.clone() - - # # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size - # model_embed = model.resize_token_embeddings(model_vocab_size + 10) - # self.assertEqual(model.config.vocab_size, model_vocab_size + 10) - # # Check that it actually resizes the embeddings matrix - # self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) - - # # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size - # model_embed = model.resize_token_embeddings(model_vocab_size - 15) - # self.assertEqual(model.config.vocab_size, model_vocab_size - 15) - # # Check that it actually resizes the embeddings matrix - # self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) - - # # Check that adding and removing tokens has not modified the first part of the embedding matrix. - # models_equal = True - # for p1, p2 in zip(cloned_embeddings, model_embed.weight): - # if p1.data.ne(p2.data).sum() > 0: - # models_equal = False - - # self.assertTrue(models_equal) - - - def test_tie_model_weights(self): - pass - # config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - - # def check_same_values(layer_1, layer_2): - # equal = True - # for p1, p2 in zip(layer_1.weight, layer_2.weight): - # if p1.data.ne(p2.data).sum() > 0: - # equal = False - # return equal - - # for model_class in self.all_model_classes: - # if not hasattr(model_class, 'tie_weights'): - # continue - - # config.torchscript = True - # model_not_tied = model_class(config) - # params_not_tied = list(model_not_tied.parameters()) - - # config_tied = copy.deepcopy(config) - # config_tied.torchscript = False - # model_tied = model_class(config_tied) - # params_tied = list(model_tied.parameters()) - - # # Check that the embedding layer and decoding layer are the same in size and in value - # self.assertGreater(len(params_not_tied), len(params_tied)) - - # # Check that after resize they remain tied. - # model_tied.resize_token_embeddings(config.vocab_size + 10) - # params_tied_2 = list(model_tied.parameters()) - # self.assertGreater(len(params_not_tied), len(params_tied)) - # self.assertEqual(len(params_tied_2), len(params_tied)) + for model_class in self.all_model_classes: + model = model_class(config) + assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) + x = model.get_output_embeddings() + assert x is None or isinstance(x, tf.keras.layers.Layer) def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() @@ -401,6 +266,35 @@ class TFCommonTestCases: first, second = model(inputs_dict, training=False)[0], model(inputs_dict, training=False)[0] self.assertTrue(tf.math.equal(first, second).numpy().all()) + def test_inputs_embeds(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + input_ids = inputs_dict["input_ids"] + del inputs_dict["input_ids"] + + for model_class in self.all_model_classes: + model = model_class(config) + + wte = model.get_input_embeddings() + try: + x = wte(input_ids, mode="embedding") + except: + try: + x = wte([input_ids], mode="embedding") + except: + try: + x = wte([input_ids, None, None, None], mode="embedding") + except: + if hasattr(self.model_tester, "embedding_size"): + x = tf.ones(input_ids.shape + [self.model_tester.embedding_size], dtype=tf.dtypes.float32) + else: + x = tf.ones(input_ids.shape + [self.model_tester.hidden_size], dtype=tf.dtypes.float32) + # ^^ In our TF models, the input_embeddings can take slightly different forms, + # so we try a few of them. + # We used to fall back to just synthetically creating a dummy tensor of ones: + # + inputs_dict["inputs_embeds"] = x + outputs = model(inputs_dict) + def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None): """Creates a random int32 tensor of the shape within the vocab size.""" @@ -422,29 +316,5 @@ def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None): return output -class TFModelUtilsTest(unittest.TestCase): - @pytest.mark.skipif('tensorflow' not in sys.modules, reason="requires TensorFlow") - def test_model_from_pretrained(self): - pass - # logging.basicConfig(level=logging.INFO) - # for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: - # config = BertConfig.from_pretrained(model_name) - # self.assertIsNotNone(config) - # self.assertIsInstance(config, PretrainedConfig) - - # model = BertModel.from_pretrained(model_name) - # model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True) - # self.assertIsNotNone(model) - # self.assertIsInstance(model, PreTrainedModel) - # for value in loading_info.values(): - # self.assertEqual(len(value), 0) - - # config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) - # model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) - # self.assertEqual(model.config.output_attentions, True) - # self.assertEqual(model.config.output_hidden_states, True) - # self.assertEqual(model.config, config) - - if __name__ == "__main__": unittest.main() diff --git a/transformers/tests/modeling_tf_ctrl_test.py b/transformers/tests/modeling_tf_ctrl_test.py index a57c882169f..0b421c20c93 100644 --- a/transformers/tests/modeling_tf_ctrl_test.py +++ b/transformers/tests/modeling_tf_ctrl_test.py @@ -18,11 +18,11 @@ from __future__ import print_function import unittest import shutil -import pytest import sys from .modeling_tf_common_test import (TFCommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_tf, slow from transformers import CTRLConfig, is_tf_available @@ -30,10 +30,9 @@ if is_tf_available(): import tensorflow as tf from transformers.modeling_tf_ctrl import (TFCTRLModel, TFCTRLLMHeadModel, TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP) -else: - pytestmark = pytest.mark.skip("Require TensorFlow") +@require_tf class TFCTRLModelTest(TFCommonTestCases.TFCommonModelTester): all_model_classes = (TFCTRLModel, TFCTRLLMHeadModel) if is_tf_available() else () @@ -188,7 +187,7 @@ class TFCTRLModelTest(TFCommonTestCases.TFCommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_lm_head(*config_and_inputs) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/modeling_tf_distilbert_test.py b/transformers/tests/modeling_tf_distilbert_test.py index e6d37959144..0ec45150cad 100644 --- a/transformers/tests/modeling_tf_distilbert_test.py +++ b/transformers/tests/modeling_tf_distilbert_test.py @@ -17,10 +17,10 @@ from __future__ import division from __future__ import print_function import unittest -import pytest from .modeling_tf_common_test import (TFCommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_tf, slow from transformers import DistilBertConfig, is_tf_available @@ -30,10 +30,9 @@ if is_tf_available(): TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification) -else: - pytestmark = pytest.mark.skip("Require TensorFlow") +@require_tf class TFDistilBertModelTest(TFCommonTestCases.TFCommonModelTester): all_model_classes = (TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, @@ -210,7 +209,7 @@ class TFDistilBertModelTest(TFCommonTestCases.TFCommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*config_and_inputs) - # @pytest.mark.slow + # @slow # def test_model_from_pretrained(self): # cache_dir = "/tmp/transformers_test/" # for model_name in list(DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/modeling_tf_gpt2_test.py b/transformers/tests/modeling_tf_gpt2_test.py index 76e9ee22983..e070b72e65d 100644 --- a/transformers/tests/modeling_tf_gpt2_test.py +++ b/transformers/tests/modeling_tf_gpt2_test.py @@ -18,11 +18,11 @@ from __future__ import print_function import unittest import shutil -import pytest import sys from .modeling_tf_common_test import (TFCommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_tf, slow from transformers import GPT2Config, is_tf_available @@ -31,10 +31,9 @@ if is_tf_available(): from transformers.modeling_tf_gpt2 import (TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel, TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP) -else: - pytestmark = pytest.mark.skip("Require TensorFlow") +@require_tf class TFGPT2ModelTest(TFCommonTestCases.TFCommonModelTester): all_model_classes = (TFGPT2Model, TFGPT2LMHeadModel, @@ -219,7 +218,7 @@ class TFGPT2ModelTest(TFCommonTestCases.TFCommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt2_double_head(*config_and_inputs) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/modeling_tf_openai_gpt_test.py b/transformers/tests/modeling_tf_openai_gpt_test.py index d470c8862da..675e806c127 100644 --- a/transformers/tests/modeling_tf_openai_gpt_test.py +++ b/transformers/tests/modeling_tf_openai_gpt_test.py @@ -18,11 +18,11 @@ from __future__ import print_function import unittest import shutil -import pytest import sys from .modeling_tf_common_test import (TFCommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_tf, slow from transformers import OpenAIGPTConfig, is_tf_available @@ -31,10 +31,9 @@ if is_tf_available(): from transformers.modeling_tf_openai import (TFOpenAIGPTModel, TFOpenAIGPTLMHeadModel, TFOpenAIGPTDoubleHeadsModel, TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP) -else: - pytestmark = pytest.mark.skip("Require TensorFlow") +@require_tf class TFOpenAIGPTModelTest(TFCommonTestCases.TFCommonModelTester): all_model_classes = (TFOpenAIGPTModel, TFOpenAIGPTLMHeadModel, @@ -218,7 +217,7 @@ class TFOpenAIGPTModelTest(TFCommonTestCases.TFCommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_double_head(*config_and_inputs) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/modeling_tf_roberta_test.py b/transformers/tests/modeling_tf_roberta_test.py index 735c9aae27a..42440bf1b7b 100644 --- a/transformers/tests/modeling_tf_roberta_test.py +++ b/transformers/tests/modeling_tf_roberta_test.py @@ -18,10 +18,10 @@ from __future__ import print_function import unittest import shutil -import pytest from .modeling_tf_common_test import (TFCommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_tf, slow from transformers import RobertaConfig, is_tf_available @@ -30,11 +30,11 @@ if is_tf_available(): import numpy from transformers.modeling_tf_roberta import (TFRobertaModel, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, + TFRobertaForTokenClassification, TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP) -else: - pytestmark = pytest.mark.skip("Require TensorFlow") +@require_tf class TFRobertaModelTest(TFCommonTestCases.TFCommonModelTester): all_model_classes = (TFRobertaModel,TFRobertaForMaskedLM, @@ -154,6 +154,20 @@ class TFRobertaModelTest(TFCommonTestCases.TFCommonModelTester): list(result["prediction_scores"].shape), [self.batch_size, self.seq_length, self.vocab_size]) + def create_and_check_roberta_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): + config.num_labels = self.num_labels + model = TFRobertaForTokenClassification(config=config) + inputs = {'input_ids': input_ids, + 'attention_mask': input_mask, + 'token_type_ids': token_type_ids} + logits, = model(inputs) + result = { + "logits": logits.numpy(), + } + self.parent.assertListEqual( + list(result["logits"].shape), + [self.batch_size, self.seq_length, self.num_labels]) + def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, token_type_ids, input_mask, @@ -176,7 +190,7 @@ class TFRobertaModelTest(TFCommonTestCases.TFCommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_roberta_for_masked_lm(*config_and_inputs) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: @@ -188,10 +202,10 @@ class TFRobertaModelTest(TFCommonTestCases.TFCommonModelTester): class TFRobertaModelIntegrationTest(unittest.TestCase): - @pytest.mark.slow + @slow def test_inference_masked_lm(self): model = TFRobertaForMaskedLM.from_pretrained('roberta-base') - + input_ids = tf.constant([[ 0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) output = model(input_ids)[0] expected_shape = [1, 11, 50265] @@ -209,10 +223,10 @@ class TFRobertaModelIntegrationTest(unittest.TestCase): numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-3) ) - @pytest.mark.slow + @slow def test_inference_no_head(self): model = TFRobertaModel.from_pretrained('roberta-base') - + input_ids = tf.constant([[ 0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) output = model(input_ids)[0] # compare the actual values for a slice. @@ -225,10 +239,10 @@ class TFRobertaModelIntegrationTest(unittest.TestCase): numpy.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-3) ) - @pytest.mark.slow + @slow def test_inference_classification_head(self): model = TFRobertaForSequenceClassification.from_pretrained('roberta-large-mnli') - + input_ids = tf.constant([[ 0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) output = model(input_ids)[0] expected_shape = [1, 3] diff --git a/transformers/tests/modeling_tf_transfo_xl_test.py b/transformers/tests/modeling_tf_transfo_xl_test.py index 534fe396468..03e332bdc1e 100644 --- a/transformers/tests/modeling_tf_transfo_xl_test.py +++ b/transformers/tests/modeling_tf_transfo_xl_test.py @@ -19,10 +19,10 @@ from __future__ import print_function import unittest import random import shutil -import pytest from .modeling_tf_common_test import (TFCommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_tf, slow from transformers import TransfoXLConfig, is_tf_available @@ -31,10 +31,9 @@ if is_tf_available(): from transformers.modeling_tf_transfo_xl import (TFTransfoXLModel, TFTransfoXLLMHeadModel, TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP) -else: - pytestmark = pytest.mark.skip("Require TensorFlow") +@require_tf class TFTransfoXLModelTest(TFCommonTestCases.TFCommonModelTester): all_model_classes = (TFTransfoXLModel, TFTransfoXLLMHeadModel) if is_tf_available() else () @@ -204,7 +203,7 @@ class TFTransfoXLModelTest(TFCommonTestCases.TFCommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*config_and_inputs) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/modeling_tf_xlm_test.py b/transformers/tests/modeling_tf_xlm_test.py index 1bd661bebf6..a680b703673 100644 --- a/transformers/tests/modeling_tf_xlm_test.py +++ b/transformers/tests/modeling_tf_xlm_test.py @@ -18,7 +18,6 @@ from __future__ import print_function import unittest import shutil -import pytest from transformers import is_tf_available @@ -29,13 +28,13 @@ if is_tf_available(): TFXLMForSequenceClassification, TFXLMForQuestionAnsweringSimple, TF_XLM_PRETRAINED_MODEL_ARCHIVE_MAP) -else: - pytestmark = pytest.mark.skip("Require TensorFlow") from .modeling_tf_common_test import (TFCommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_tf, slow +@require_tf class TFXLMModelTest(TFCommonTestCases.TFCommonModelTester): all_model_classes = (TFXLMModel, TFXLMWithLMHeadModel, @@ -251,7 +250,7 @@ class TFXLMModelTest(TFCommonTestCases.TFCommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*config_and_inputs) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(TF_XLM_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/modeling_tf_xlnet_test.py b/transformers/tests/modeling_tf_xlnet_test.py index 12a8fbe36f0..94864b86f2d 100644 --- a/transformers/tests/modeling_tf_xlnet_test.py +++ b/transformers/tests/modeling_tf_xlnet_test.py @@ -21,7 +21,6 @@ import unittest import json import random import shutil -import pytest from transformers import XLNetConfig, is_tf_available @@ -30,18 +29,21 @@ if is_tf_available(): from transformers.modeling_tf_xlnet import (TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, + TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple, TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP) -else: - pytestmark = pytest.mark.skip("Require TensorFlow") from .modeling_tf_common_test import (TFCommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_tf, slow + +@require_tf class TFXLNetModelTest(TFCommonTestCases.TFCommonModelTester): all_model_classes=(TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, + TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple) if is_tf_available() else () test_pruning = False @@ -258,6 +260,26 @@ class TFXLNetModelTest(TFCommonTestCases.TFCommonModelTester): list(list(mem.shape) for mem in result["mems_1"]), [[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers) + def create_and_check_xlnet_for_token_classification(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, + target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels): + config.num_labels = input_ids_1.shape[1] + model = TFXLNetForTokenClassification(config) + inputs = {'input_ids': input_ids_1, + 'attention_mask': input_mask, + # 'token_type_ids': token_type_ids + } + logits, mems_1 = model(inputs) + result = { + "mems_1": [mem.numpy() for mem in mems_1], + "logits": logits.numpy(), + } + self.parent.assertListEqual( + list(result["logits"].shape), + [self.batch_size, self.seq_length, config.num_labels]) + self.parent.assertListEqual( + list(list(mem.shape) for mem in result["mems_1"]), + [[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers) + def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, @@ -282,19 +304,23 @@ class TFXLNetModelTest(TFCommonTestCases.TFCommonModelTester): def test_xlnet_lm_head(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_xlnet_lm_head(*config_and_inputs) + self.model_tester.create_and_check_xlnet_lm_head(*config_and_inputs) def test_xlnet_sequence_classif(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_sequence_classif(*config_and_inputs) + def test_xlnet_token_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_xlnet_for_token_classification(*config_and_inputs) + def test_xlnet_qa(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_qa(*config_and_inputs) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/modeling_transfo_xl_test.py b/transformers/tests/modeling_transfo_xl_test.py index f7b913da5b1..647dd3724d9 100644 --- a/transformers/tests/modeling_transfo_xl_test.py +++ b/transformers/tests/modeling_transfo_xl_test.py @@ -19,7 +19,6 @@ from __future__ import print_function import unittest import random import shutil -import pytest from transformers import is_torch_available @@ -27,12 +26,13 @@ if is_torch_available(): import torch from transformers import (TransfoXLConfig, TransfoXLModel, TransfoXLLMHeadModel) from transformers.modeling_transfo_xl import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP -else: - pytestmark = pytest.mark.skip("Require Torch") from .modeling_common_test import (CommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_torch, slow, torch_device + +@require_torch class TransfoXLModelTest(CommonTestCases.CommonModelTester): all_model_classes = (TransfoXLModel, TransfoXLLMHeadModel) if is_torch_available() else () @@ -111,6 +111,7 @@ class TransfoXLModelTest(CommonTestCases.CommonModelTester): def create_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels): model = TransfoXLModel(config) + model.to(torch_device) model.eval() hidden_states_1, mems_1 = model(input_ids_1) @@ -140,6 +141,7 @@ class TransfoXLModelTest(CommonTestCases.CommonModelTester): def create_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels): model = TransfoXLLMHeadModel(config) + model.to(torch_device) model.eval() lm_logits_1, mems_1 = model(input_ids_1) @@ -204,7 +206,7 @@ class TransfoXLModelTest(CommonTestCases.CommonModelTester): output_result = self.model_tester.create_transfo_xl_lm_head(*config_and_inputs) self.model_tester.check_transfo_xl_lm_head_output(output_result) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/modeling_xlm_test.py b/transformers/tests/modeling_xlm_test.py index 0133febb581..f6b980767c9 100644 --- a/transformers/tests/modeling_xlm_test.py +++ b/transformers/tests/modeling_xlm_test.py @@ -18,7 +18,6 @@ from __future__ import print_function import unittest import shutil -import pytest from transformers import is_torch_available @@ -26,13 +25,13 @@ if is_torch_available(): from transformers import (XLMConfig, XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple) from transformers.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_MAP -else: - pytestmark = pytest.mark.skip("Require Torch") from .modeling_common_test import (CommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_torch, slow, torch_device +@require_torch class XLMModelTest(CommonTestCases.CommonModelTester): all_model_classes = (XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, @@ -148,6 +147,7 @@ class XLMModelTest(CommonTestCases.CommonModelTester): def create_and_check_xlm_model(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask): model = XLMModel(config=config) + model.to(torch_device) model.eval() outputs = model(input_ids, lengths=input_lengths, langs=token_type_ids) outputs = model(input_ids, langs=token_type_ids) @@ -163,6 +163,7 @@ class XLMModelTest(CommonTestCases.CommonModelTester): def create_and_check_xlm_lm_head(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask): model = XLMWithLMHeadModel(config) + model.to(torch_device) model.eval() loss, logits = model(input_ids, token_type_ids=token_type_ids, labels=token_labels) @@ -182,6 +183,7 @@ class XLMModelTest(CommonTestCases.CommonModelTester): def create_and_check_xlm_simple_qa(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask): model = XLMForQuestionAnsweringSimple(config) + model.to(torch_device) model.eval() outputs = model(input_ids) @@ -206,6 +208,7 @@ class XLMModelTest(CommonTestCases.CommonModelTester): def create_and_check_xlm_qa(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask): model = XLMForQuestionAnswering(config) + model.to(torch_device) model.eval() outputs = model(input_ids) @@ -260,6 +263,7 @@ class XLMModelTest(CommonTestCases.CommonModelTester): def create_and_check_xlm_sequence_classif(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask): model = XLMForSequenceClassification(config) + model.to(torch_device) model.eval() (logits,) = model(input_ids) @@ -312,7 +316,7 @@ class XLMModelTest(CommonTestCases.CommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*config_and_inputs) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(XLM_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/modeling_xlnet_test.py b/transformers/tests/modeling_xlnet_test.py index d97ea6a425d..56b6bb3f4dd 100644 --- a/transformers/tests/modeling_xlnet_test.py +++ b/transformers/tests/modeling_xlnet_test.py @@ -21,24 +21,25 @@ import unittest import json import random import shutil -import pytest from transformers import is_torch_available if is_torch_available(): import torch - from transformers import (XLNetConfig, XLNetModel, XLNetLMHeadModel, XLNetForSequenceClassification, XLNetForQuestionAnswering) + from transformers import (XLNetConfig, XLNetModel, XLNetLMHeadModel, XLNetForSequenceClassification, + XLNetForTokenClassification, XLNetForQuestionAnswering) from transformers.modeling_xlnet import XLNET_PRETRAINED_MODEL_ARCHIVE_MAP -else: - pytestmark = pytest.mark.skip("Require Torch") from .modeling_common_test import (CommonTestCases, ids_tensor) from .configuration_common_test import ConfigTester +from .utils import require_torch, slow, torch_device + +@require_torch class XLNetModelTest(CommonTestCases.CommonModelTester): - all_model_classes=(XLNetModel, XLNetLMHeadModel, + all_model_classes=(XLNetModel, XLNetLMHeadModel, XLNetForTokenClassification, XLNetForSequenceClassification, XLNetForQuestionAnswering) if is_torch_available() else () test_pruning = False @@ -99,18 +100,20 @@ class XLNetModelTest(CommonTestCases.CommonModelTester): input_mask = ids_tensor([self.batch_size, self.seq_length], 2).float() input_ids_q = ids_tensor([self.batch_size, self.seq_length + 1], self.vocab_size) - perm_mask = torch.zeros(self.batch_size, self.seq_length + 1, self.seq_length + 1, dtype=torch.float) + perm_mask = torch.zeros(self.batch_size, self.seq_length + 1, self.seq_length + 1, dtype=torch.float, device=torch_device) perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token - target_mapping = torch.zeros(self.batch_size, 1, self.seq_length + 1, dtype=torch.float) + target_mapping = torch.zeros(self.batch_size, 1, self.seq_length + 1, dtype=torch.float, device=torch_device) target_mapping[:, 0, -1] = 1.0 # predict last token sequence_labels = None lm_labels = None is_impossible_labels = None + token_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) is_impossible_labels = ids_tensor([self.batch_size], 2).float() + token_labels = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) config = XLNetConfig( vocab_size_or_config_json_file=self.vocab_size, @@ -129,15 +132,16 @@ class XLNetModelTest(CommonTestCases.CommonModelTester): num_labels=self.type_sequence_label_size) return (config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, - target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels) + target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels) def set_seed(self): random.seed(self.seed) torch.manual_seed(self.seed) def create_and_check_xlnet_base_model(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, - target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels): + target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels): model = XLNetModel(config) + model.to(torch_device) model.eval() _, _ = model(input_ids_1, input_mask=input_mask) @@ -152,6 +156,7 @@ class XLNetModelTest(CommonTestCases.CommonModelTester): config.mem_len = 0 model = XLNetModel(config) + model.to(torch_device) model.eval() no_mems_outputs = model(input_ids_1) self.parent.assertEqual(len(no_mems_outputs), 1) @@ -163,9 +168,23 @@ class XLNetModelTest(CommonTestCases.CommonModelTester): list(list(mem.size()) for mem in result["mems_1"]), [[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers) + def create_and_check_xlnet_base_model_with_att_output(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, + target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels): + model = XLNetModel(config) + model.to(torch_device) + model.eval() + + _, _, attentions = model(input_ids_1, target_mapping=target_mapping) + + self.parent.assertEqual(len(attentions), config.n_layer) + self.parent.assertIsInstance(attentions[0], tuple) + self.parent.assertEqual(len(attentions[0]), 2) + self.parent.assertTrue(attentions[0][0].shape, attentions[0][0].shape) + def create_and_check_xlnet_lm_head(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, - target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels): + target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels): model = XLNetLMHeadModel(config) + model.to(torch_device) model.eval() loss_1, all_logits_1, mems_1 = model(input_ids_1, token_type_ids=segment_ids, labels=lm_labels) @@ -204,8 +223,9 @@ class XLNetModelTest(CommonTestCases.CommonModelTester): [[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers) def create_and_check_xlnet_qa(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, - target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels): + target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels): model = XLNetForQuestionAnswering(config) + model.to(torch_device) model.eval() outputs = model(input_ids_1) @@ -261,9 +281,43 @@ class XLNetModelTest(CommonTestCases.CommonModelTester): list(list(mem.size()) for mem in result["mems"]), [[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers) + def create_and_check_xlnet_token_classif(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, + target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels): + model = XLNetForTokenClassification(config) + model.to(torch_device) + model.eval() + + logits, mems_1 = model(input_ids_1) + loss, logits, mems_1 = model(input_ids_1, labels=token_labels) + + result = { + "loss": loss, + "mems_1": mems_1, + "logits": logits, + } + + self.parent.assertListEqual( + list(result["loss"].size()), + []) + self.parent.assertListEqual( + list(result["logits"].size()), + [self.batch_size, self.seq_length, self.type_sequence_label_size]) + self.parent.assertListEqual( + list(list(mem.size()) for mem in result["mems_1"]), + [[self.seq_length, self.batch_size, self.hidden_size]] * self.num_hidden_layers) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + (config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, + target_mapping, segment_ids, lm_labels, + sequence_labels, is_impossible_labels) = config_and_inputs + inputs_dict = {'input_ids': input_ids_1} + return config, inputs_dict + def create_and_check_xlnet_sequence_classif(self, config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, - target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels): + target_mapping, segment_ids, lm_labels, sequence_labels, is_impossible_labels, token_labels): model = XLNetForSequenceClassification(config) + model.to(torch_device) model.eval() logits, mems_1 = model(input_ids_1) @@ -289,7 +343,7 @@ class XLNetModelTest(CommonTestCases.CommonModelTester): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids_1, input_ids_2, input_ids_q, perm_mask, input_mask, target_mapping, segment_ids, lm_labels, - sequence_labels, is_impossible_labels) = config_and_inputs + sequence_labels, is_impossible_labels, token_labels) = config_and_inputs inputs_dict = {'input_ids': input_ids_1} return config, inputs_dict @@ -306,22 +360,33 @@ class XLNetModelTest(CommonTestCases.CommonModelTester): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_base_model(*config_and_inputs) + def test_xlnet_base_model_with_att_output(self): + self.model_tester.set_seed() + config_and_inputs = self.model_tester.prepare_config_and_inputs() + config_and_inputs[0].output_attentions = True + self.model_tester.create_and_check_xlnet_base_model_with_att_output(*config_and_inputs) + def test_xlnet_lm_head(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() - self.model_tester.create_and_check_xlnet_lm_head(*config_and_inputs) + self.model_tester.create_and_check_xlnet_lm_head(*config_and_inputs) def test_xlnet_sequence_classif(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_sequence_classif(*config_and_inputs) + def test_xlnet_token_classif(self): + self.model_tester.set_seed() + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_xlnet_token_classif(*config_and_inputs) + def test_xlnet_qa(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlnet_qa(*config_and_inputs) - @pytest.mark.slow + @slow def test_model_from_pretrained(self): cache_dir = "/tmp/transformers_test/" for model_name in list(XLNET_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: diff --git a/transformers/tests/optimization_test.py b/transformers/tests/optimization_test.py index 84dbaca52a9..cc10ad5908b 100644 --- a/transformers/tests/optimization_test.py +++ b/transformers/tests/optimization_test.py @@ -18,19 +18,21 @@ from __future__ import print_function import unittest import os -import pytest from transformers import is_torch_available if is_torch_available(): import torch - from transformers import (AdamW, ConstantLRSchedule, WarmupConstantSchedule, - WarmupCosineSchedule, WarmupCosineWithHardRestartsSchedule, WarmupLinearSchedule) -else: - pytestmark = pytest.mark.skip("Require Torch") + from transformers import (AdamW, + get_constant_schedule, + get_constant_schedule_with_warmup, + get_cosine_schedule_with_warmup, + get_cosine_with_hard_restarts_schedule_with_warmup, + get_linear_schedule_with_warmup) from .tokenization_tests_commons import TemporaryDirectory +from .utils import require_torch def unwrap_schedule(scheduler, num_steps=10): @@ -54,6 +56,7 @@ def unwrap_and_save_reload_schedule(scheduler, num_steps=10): scheduler.load_state_dict(state_dict) return lrs +@require_torch class OptimizationTest(unittest.TestCase): def assertListAlmostEqual(self, list1, list2, tol): @@ -76,6 +79,7 @@ class OptimizationTest(unittest.TestCase): self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2) +@require_torch class ScheduleInitTest(unittest.TestCase): m = torch.nn.Linear(50, 50) if is_torch_available() else None optimizer = AdamW(m.parameters(), lr=10.) if is_torch_available() else None @@ -87,59 +91,60 @@ class ScheduleInitTest(unittest.TestCase): self.assertAlmostEqual(a, b, delta=tol) def test_constant_scheduler(self): - scheduler = ConstantLRSchedule(self.optimizer) + scheduler = get_constant_schedule(self.optimizer) lrs = unwrap_schedule(scheduler, self.num_steps) expected_learning_rates = [10.] * self.num_steps self.assertEqual(len(lrs[0]), 1) self.assertListEqual([l[0] for l in lrs], expected_learning_rates) - scheduler = ConstantLRSchedule(self.optimizer) + scheduler = get_constant_schedule(self.optimizer) lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps) self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2]) def test_warmup_constant_scheduler(self): - scheduler = WarmupConstantSchedule(self.optimizer, warmup_steps=4) + scheduler = get_constant_schedule_with_warmup(self.optimizer, num_warmup_steps=4) lrs = unwrap_schedule(scheduler, self.num_steps) expected_learning_rates = [2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0] self.assertEqual(len(lrs[0]), 1) self.assertListEqual([l[0] for l in lrs], expected_learning_rates) - scheduler = WarmupConstantSchedule(self.optimizer, warmup_steps=4) + scheduler = get_constant_schedule_with_warmup(self.optimizer, num_warmup_steps=4) lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps) self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2]) def test_warmup_linear_scheduler(self): - scheduler = WarmupLinearSchedule(self.optimizer, warmup_steps=2, t_total=10) + scheduler = get_linear_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_training_steps=10) lrs = unwrap_schedule(scheduler, self.num_steps) expected_learning_rates = [5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25, 0.0] self.assertEqual(len(lrs[0]), 1) self.assertListEqual([l[0] for l in lrs], expected_learning_rates) - scheduler = WarmupLinearSchedule(self.optimizer, warmup_steps=2, t_total=10) + scheduler = get_linear_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_training_steps=10) lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps) self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2]) def test_warmup_cosine_scheduler(self): - scheduler = WarmupCosineSchedule(self.optimizer, warmup_steps=2, t_total=10) + scheduler = get_cosine_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_training_steps=10) lrs = unwrap_schedule(scheduler, self.num_steps) expected_learning_rates = [5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38, 0.0] self.assertEqual(len(lrs[0]), 1) self.assertListAlmostEqual([l[0] for l in lrs], expected_learning_rates, tol=1e-2) - scheduler = WarmupCosineSchedule(self.optimizer, warmup_steps=2, t_total=10) + scheduler = get_cosine_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_training_steps=10) lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps) self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2]) def test_warmup_cosine_hard_restart_scheduler(self): - scheduler = WarmupCosineWithHardRestartsSchedule(self.optimizer, warmup_steps=2, cycles=2, t_total=10) + scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_cycles=2, num_training_steps=10) lrs = unwrap_schedule(scheduler, self.num_steps) expected_learning_rates = [5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46, 0.0] self.assertEqual(len(lrs[0]), 1) self.assertListAlmostEqual([l[0] for l in lrs], expected_learning_rates, tol=1e-2) - scheduler = WarmupCosineWithHardRestartsSchedule(self.optimizer, warmup_steps=2, cycles=2, t_total=10) + scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(self.optimizer, num_warmup_steps=2, num_cycles=2, num_training_steps=10) lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps) self.assertListEqual([l[0] for l in lrs], [l[0] for l in lrs_2]) + if __name__ == "__main__": unittest.main() diff --git a/transformers/tests/optimization_tf_test.py b/transformers/tests/optimization_tf_test.py new file mode 100644 index 00000000000..515d12a158b --- /dev/null +++ b/transformers/tests/optimization_tf_test.py @@ -0,0 +1,90 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest + +from transformers import is_tf_available + +from .utils import require_tf + +if is_tf_available(): + import tensorflow as tf + from tensorflow.python.eager import context + from tensorflow.python.framework import ops + from transformers import (create_optimizer, GradientAccumulator) + + +@require_tf +class OptimizationFTest(unittest.TestCase): + def assertListAlmostEqual(self, list1, list2, tol): + self.assertEqual(len(list1), len(list2)) + for a, b in zip(list1, list2): + self.assertAlmostEqual(a, b, delta=tol) + + def testGradientAccumulator(self): + accumulator = GradientAccumulator() + accumulator([tf.constant([1.0, 2.0])]) + accumulator([tf.constant([-2.0, 1.0])]) + accumulator([tf.constant([-1.0, 2.0])]) + with self.assertRaises(ValueError): + accumulator([tf.constant([1.0, 1.0]), tf.constant([2.0, 2.0])]) + self.assertEqual(accumulator.step, 3) + self.assertEqual(len(accumulator.gradients), 1) + self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1e-2) + accumulator.reset() + self.assertEqual(accumulator.step, 0) + self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1e-2) + + def testGradientAccumulatorDistributionStrategy(self): + context._context = None + ops.enable_eager_execution_internal() + physical_devices = tf.config.experimental.list_physical_devices("CPU") + tf.config.experimental.set_virtual_device_configuration( + physical_devices[0], + [tf.config.experimental.VirtualDeviceConfiguration(), + tf.config.experimental.VirtualDeviceConfiguration()]) + + devices = tf.config.experimental.list_logical_devices(device_type="CPU") + strategy = tf.distribute.MirroredStrategy(devices=[device.name for device in devices]) + + with strategy.scope(): + accumulator = GradientAccumulator() + variable = tf.Variable([4.0, 3.0]) + optimizer = create_optimizer(5e-5, 10, 5) + gradient_placeholder = tf.Variable([0.0, 0.0], trainable=False) + + def accumulate_on_replica(gradient): + accumulator([gradient]) + + def apply_on_replica(): + optimizer.apply_gradients(list(zip(accumulator.gradients, [variable])), 1.0) + + @tf.function + def accumulate(grad1, grad2): + with strategy.scope(): + gradient_placeholder.values[0].assign(grad1) + gradient_placeholder.values[1].assign(grad2) + strategy.experimental_run_v2(accumulate_on_replica, args=(gradient_placeholder,)) + + @tf.function + def apply_grad(): + with strategy.scope(): + strategy.experimental_run_v2(apply_on_replica) + + accumulate([1.0, 2.0], [-1.0, 1.0]) + accumulate([3.0, -1.0], [-1.0, -1.0]) + accumulate([-2.0, 2.0], [3.0, -2.0]) + self.assertEqual(accumulator.step, 3) + self.assertListAlmostEqual(accumulator._gradients[0].values[0].value().numpy().tolist(), [2.0, 3.0], tol=1e-2) + self.assertListAlmostEqual(accumulator._gradients[0].values[1].value().numpy().tolist(), [1.0, -2.0], tol=1e-2) + apply_grad() + self.assertListAlmostEqual(variable.value().numpy().tolist(), [4.0, 3.0], tol=1e-2) + accumulator.reset() + self.assertEqual(accumulator.step, 0) + self.assertListAlmostEqual(accumulator._gradients[0].values[0].value().numpy().tolist(), [0.0, 0.0], tol=1e-2) + self.assertListAlmostEqual(accumulator._gradients[0].values[1].value().numpy().tolist(), [0.0, 0.0], tol=1e-2) + + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/transformers/tests/tokenization_albert_test.py b/transformers/tests/tokenization_albert_test.py new file mode 100644 index 00000000000..59eb3bceb0d --- /dev/null +++ b/transformers/tests/tokenization_albert_test.py @@ -0,0 +1,78 @@ +# coding=utf-8 +# Copyright 2019 Hugging Face inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function, unicode_literals + +import os +import unittest + +from transformers.tokenization_albert import (AlbertTokenizer, SPIECE_UNDERLINE) + +from .tokenization_tests_commons import CommonTestCases + +SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), + 'fixtures/spiece.model') + +class AlbertTokenizationTest(CommonTestCases.CommonTokenizerTester): + + tokenizer_class = AlbertTokenizer + + def setUp(self): + super(AlbertTokenizationTest, self).setUp() + + # We have a SentencePiece fixture for testing + tokenizer = AlbertTokenizer(SAMPLE_VOCAB) + tokenizer.save_pretrained(self.tmpdirname) + + def get_tokenizer(self, **kwargs): + return AlbertTokenizer.from_pretrained(self.tmpdirname, **kwargs) + + def get_input_output_texts(self): + input_text = u"this is a test" + output_text = u"this is a test" + return input_text, output_text + + + def test_full_tokenizer(self): + tokenizer = AlbertTokenizer(SAMPLE_VOCAB, keep_accents=True) + + tokens = tokenizer.tokenize(u'This is a test') + self.assertListEqual(tokens, [u'▁this', u'▁is', u'▁a', u'▁test']) + + self.assertListEqual( + tokenizer.convert_tokens_to_ids(tokens), [48, 25, 21, 1289]) + + tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.") + self.assertListEqual(tokens, [u'▁i', u'▁was', u'▁born', u'▁in', u'▁9', u'2000', u',', u'▁and', u'▁this', u'▁is', u'▁fal', u's', u'é', u'.']) + ids = tokenizer.convert_tokens_to_ids(tokens) + self.assertListEqual(ids, [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]) + + back_tokens = tokenizer.convert_ids_to_tokens(ids) + self.assertListEqual(back_tokens, ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '', '.']) + + def test_sequence_builders(self): + tokenizer = AlbertTokenizer(SAMPLE_VOCAB) + + text = tokenizer.encode("sequence builders") + text_2 = tokenizer.encode("multi-sequence build") + + encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) + encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) + + assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [tokenizer.sep_token_id] + + +if __name__ == '__main__': + unittest.main() diff --git a/transformers/tests/tokenization_auto_test.py b/transformers/tests/tokenization_auto_test.py index 0f49ec75fb2..0a894cac043 100644 --- a/transformers/tests/tokenization_auto_test.py +++ b/transformers/tests/tokenization_auto_test.py @@ -18,14 +18,16 @@ from __future__ import print_function import unittest import shutil -import pytest import logging from transformers import AutoTokenizer, BertTokenizer, AutoTokenizer, GPT2Tokenizer from transformers import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP +from .utils import slow, SMALL_MODEL_IDENTIFIER + class AutoTokenizerTest(unittest.TestCase): + @slow def test_tokenizer_from_pretrained(self): logging.basicConfig(level=logging.INFO) for model_name in list(BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys())[:1]: @@ -40,6 +42,11 @@ class AutoTokenizerTest(unittest.TestCase): self.assertIsInstance(tokenizer, GPT2Tokenizer) self.assertGreater(len(tokenizer), 0) + def test_tokenizer_from_pretrained_identifier(self): + logging.basicConfig(level=logging.INFO) + tokenizer = AutoTokenizer.from_pretrained(SMALL_MODEL_IDENTIFIER) + self.assertIsInstance(tokenizer, BertTokenizer) + self.assertEqual(len(tokenizer), 12) if __name__ == "__main__": unittest.main() diff --git a/transformers/tests/tokenization_bert_japanese_test.py b/transformers/tests/tokenization_bert_japanese_test.py new file mode 100644 index 00000000000..545193c7cce --- /dev/null +++ b/transformers/tests/tokenization_bert_japanese_test.py @@ -0,0 +1,191 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import absolute_import, division, print_function, unicode_literals + +import os +import unittest +from io import open + +from transformers.tokenization_bert import WordpieceTokenizer +from transformers.tokenization_bert_japanese import (BertJapaneseTokenizer, + MecabTokenizer, CharacterTokenizer, + VOCAB_FILES_NAMES) + +from .tokenization_tests_commons import CommonTestCases +from .utils import slow, custom_tokenizers + + +@custom_tokenizers +class BertJapaneseTokenizationTest(CommonTestCases.CommonTokenizerTester): + + tokenizer_class = BertJapaneseTokenizer + + def setUp(self): + super(BertJapaneseTokenizationTest, self).setUp() + + vocab_tokens = [u"[UNK]", u"[CLS]", u"[SEP]", + u"こんにちは", u"こん", u"にちは", u"ばんは", u"##こん", u"##にちは", u"##ばんは", + u"世界", u"##世界", u"、", u"##、", u"。", u"##。"] + + self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) + with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: + vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) + + def get_tokenizer(self, **kwargs): + return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, **kwargs) + + def get_input_output_texts(self): + input_text = u"こんにちは、世界。 \nこんばんは、世界。" + output_text = u"こんにちは 、 世界 。 こんばんは 、 世界 。" + return input_text, output_text + + def test_full_tokenizer(self): + tokenizer = self.tokenizer_class(self.vocab_file) + + tokens = tokenizer.tokenize(u"こんにちは、世界。\nこんばんは、世界。") + self.assertListEqual(tokens, + [u"こんにちは", u"、", u"世界", u"。", + u"こん", u"##ばんは", u"、", u"世界", "。"]) + self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), + [3, 12, 10, 14, 4, 9, 12, 10, 14]) + + def test_mecab_tokenizer(self): + tokenizer = MecabTokenizer() + + self.assertListEqual( + tokenizer.tokenize(u" \tアップルストアでiPhone8 が \n 発売された 。 "), + [u"アップルストア", u"で", u"iPhone", u"8", u"が", + u"発売", u"さ", u"れ", u"た", u"。"]) + + def test_mecab_tokenizer_lower(self): + tokenizer = MecabTokenizer(do_lower_case=True) + + self.assertListEqual( + tokenizer.tokenize(u" \tアップルストアでiPhone8 が \n 発売された 。 "), + [u"アップルストア", u"で", u"iphone", u"8", u"が", + u"発売", u"さ", u"れ", u"た", u"。"]) + + def test_mecab_tokenizer_no_normalize(self): + tokenizer = MecabTokenizer(normalize_text=False) + + self.assertListEqual( + tokenizer.tokenize(u" \tアップルストアでiPhone8 が \n 発売された 。 "), + [u"アップルストア", u"で", u"iPhone", u"8", u"が", + u"発売", u"さ", u"れ", u"た", u" ", u"。"]) + + def test_wordpiece_tokenizer(self): + vocab_tokens = [u"[UNK]", u"[CLS]", u"[SEP]", + u"こんにちは", u"こん", u"にちは" u"ばんは", u"##こん", u"##にちは", u"##ばんは"] + + vocab = {} + for (i, token) in enumerate(vocab_tokens): + vocab[token] = i + tokenizer = WordpieceTokenizer(vocab=vocab, unk_token=u"[UNK]") + + self.assertListEqual(tokenizer.tokenize(u""), []) + + self.assertListEqual(tokenizer.tokenize(u"こんにちは"), + [u"こんにちは"]) + + self.assertListEqual(tokenizer.tokenize(u"こんばんは"), + [u"こん", u"##ばんは"]) + + self.assertListEqual(tokenizer.tokenize(u"こんばんは こんばんにちは こんにちは"), + [u"こん", u"##ばんは", u"[UNK]", u"こんにちは"]) + + @slow + def test_sequence_builders(self): + tokenizer = self.tokenizer_class.from_pretrained("bert-base-japanese") + + text = tokenizer.encode(u"ありがとう。", add_special_tokens=False) + text_2 = tokenizer.encode(u"どういたしまして。", add_special_tokens=False) + + encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) + encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) + + # 2 is for "[CLS]", 3 is for "[SEP]" + assert encoded_sentence == [2] + text + [3] + assert encoded_pair == [2] + text + [3] + text_2 + [3] + + +class BertJapaneseCharacterTokenizationTest(CommonTestCases.CommonTokenizerTester): + + tokenizer_class = BertJapaneseTokenizer + + def setUp(self): + super(BertJapaneseCharacterTokenizationTest, self).setUp() + + vocab_tokens = [u"[UNK]", u"[CLS]", u"[SEP]", + u"こ", u"ん", u"に", u"ち", u"は", u"ば", u"世", u"界", u"、", u"。"] + + self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) + with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: + vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) + + def get_tokenizer(self, **kwargs): + return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, + subword_tokenizer_type="character", + **kwargs) + + def get_input_output_texts(self): + input_text = u"こんにちは、世界。 \nこんばんは、世界。" + output_text = u"こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。" + return input_text, output_text + + def test_full_tokenizer(self): + tokenizer = self.tokenizer_class(self.vocab_file, + subword_tokenizer_type="character") + + tokens = tokenizer.tokenize(u"こんにちは、世界。 \nこんばんは、世界。") + self.assertListEqual(tokens, + [u"こ", u"ん", u"に", u"ち", u"は", u"、", u"世", u"界", u"。", + u"こ", u"ん", u"ば", u"ん", u"は", u"、", u"世", u"界", u"。"]) + self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), + [3, 4, 5, 6, 7, 11, 9, 10, 12, + 3, 4, 8, 4, 7, 11, 9, 10, 12]) + + def test_character_tokenizer(self): + vocab_tokens = [u"[UNK]", u"[CLS]", u"[SEP]", + u"こ", u"ん", u"に", u"ち", u"は", u"ば", u"世", u"界"u"、", u"。"] + + vocab = {} + for (i, token) in enumerate(vocab_tokens): + vocab[token] = i + tokenizer = CharacterTokenizer(vocab=vocab, unk_token=u"[UNK]") + + self.assertListEqual(tokenizer.tokenize(u""), []) + + self.assertListEqual(tokenizer.tokenize(u"こんにちは"), + [u"こ", u"ん", u"に", u"ち", u"は"]) + + self.assertListEqual(tokenizer.tokenize(u"こんにちほ"), + [u"こ", u"ん", u"に", u"ち", u"[UNK]"]) + + @slow + def test_sequence_builders(self): + tokenizer = self.tokenizer_class.from_pretrained("bert-base-japanese-char") + + text = tokenizer.encode(u"ありがとう。", add_special_tokens=False) + text_2 = tokenizer.encode(u"どういたしまして。", add_special_tokens=False) + + encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) + encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) + + # 2 is for "[CLS]", 3 is for "[SEP]" + assert encoded_sentence == [2] + text + [3] + assert encoded_pair == [2] + text + [3] + text_2 + [3] + + + diff --git a/transformers/tests/tokenization_bert_test.py b/transformers/tests/tokenization_bert_test.py index 5e49e2915b3..f3902489565 100644 --- a/transformers/tests/tokenization_bert_test.py +++ b/transformers/tests/tokenization_bert_test.py @@ -25,6 +25,7 @@ from transformers.tokenization_bert import (BasicTokenizer, _is_whitespace, VOCAB_FILES_NAMES) from .tokenization_tests_commons import CommonTestCases +from .utils import slow class BertTokenizationTest(CommonTestCases.CommonTokenizerTester): @@ -125,11 +126,12 @@ class BertTokenizationTest(CommonTestCases.CommonTokenizerTester): self.assertFalse(_is_punctuation(u"A")) self.assertFalse(_is_punctuation(u" ")) + @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("bert-base-uncased") - text = tokenizer.encode("sequence builders") - text_2 = tokenizer.encode("multi-sequence build") + text = tokenizer.encode("sequence builders", add_special_tokens=False) + text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) diff --git a/transformers/tests/tokenization_distilbert_test.py b/transformers/tests/tokenization_distilbert_test.py index a18d644fe8b..e815eca672c 100644 --- a/transformers/tests/tokenization_distilbert_test.py +++ b/transformers/tests/tokenization_distilbert_test.py @@ -22,6 +22,7 @@ from transformers.tokenization_distilbert import (DistilBertTokenizer) from .tokenization_tests_commons import CommonTestCases from .tokenization_bert_test import BertTokenizationTest +from .utils import slow class DistilBertTokenizationTest(BertTokenizationTest): @@ -30,11 +31,12 @@ class DistilBertTokenizationTest(BertTokenizationTest): def get_tokenizer(self, **kwargs): return DistilBertTokenizer.from_pretrained(self.tmpdirname, **kwargs) + @slow def test_sequence_builders(self): tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased") - text = tokenizer.encode("sequence builders") - text_2 = tokenizer.encode("multi-sequence build") + text = tokenizer.encode("sequence builders", add_special_tokens=False) + text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) diff --git a/transformers/tests/tokenization_roberta_test.py b/transformers/tests/tokenization_roberta_test.py index a731ac26c92..8ad0b59511c 100644 --- a/transformers/tests/tokenization_roberta_test.py +++ b/transformers/tests/tokenization_roberta_test.py @@ -21,6 +21,7 @@ from io import open from transformers.tokenization_roberta import RobertaTokenizer, VOCAB_FILES_NAMES from .tokenization_tests_commons import CommonTestCases +from .utils import slow class RobertaTokenizationTest(CommonTestCases.CommonTokenizerTester): @@ -70,19 +71,20 @@ class RobertaTokenizationTest(CommonTestCases.CommonTokenizerTester): tokenizer = self.get_tokenizer() self.assertListEqual( - tokenizer.encode('Hello world!'), + tokenizer.encode('Hello world!', add_special_tokens=False), [0, 31414, 232, 328, 2] ) self.assertListEqual( - tokenizer.encode('Hello world! cécé herlolip 418'), + tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=False), [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] ) + @slow def test_sequence_builders(self): tokenizer = RobertaTokenizer.from_pretrained("roberta-base") - text = tokenizer.encode("sequence builders") - text_2 = tokenizer.encode("multi-sequence build") + text = tokenizer.encode("sequence builders", add_special_tokens=False) + text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_text_from_decode = tokenizer.encode("sequence builders", add_special_tokens=True) encoded_pair_from_decode = tokenizer.encode("sequence builders", "multi-sequence build", add_special_tokens=True) diff --git a/transformers/tests/tokenization_tests_commons.py b/transformers/tests/tokenization_tests_commons.py index b2801d5f411..c0099581357 100644 --- a/transformers/tests/tokenization_tests_commons.py +++ b/transformers/tests/tokenization_tests_commons.py @@ -79,13 +79,13 @@ class CommonTestCases: # Now let's start the test tokenizer = self.get_tokenizer(max_len=42) - before_tokens = tokenizer.encode(u"He is very happy, UNwant\u00E9d,running") + before_tokens = tokenizer.encode(u"He is very happy, UNwant\u00E9d,running", add_special_tokens=False) with TemporaryDirectory() as tmpdirname: tokenizer.save_pretrained(tmpdirname) tokenizer = self.tokenizer_class.from_pretrained(tmpdirname) - after_tokens = tokenizer.encode(u"He is very happy, UNwant\u00E9d,running") + after_tokens = tokenizer.encode(u"He is very happy, UNwant\u00E9d,running", add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) self.assertEqual(tokenizer.max_len, 42) @@ -102,14 +102,48 @@ class CommonTestCases: with TemporaryDirectory() as tmpdirname: filename = os.path.join(tmpdirname, u"tokenizer.bin") - pickle.dump(tokenizer, open(filename, "wb")) + with open(filename, "wb") as handle: + pickle.dump(tokenizer, handle) - tokenizer_new = pickle.load(open(filename, "rb")) + with open(filename, "rb") as handle: + tokenizer_new = pickle.load(handle) subwords_loaded = tokenizer_new.tokenize(text) self.assertListEqual(subwords, subwords_loaded) + def test_added_tokens_do_lower_case(self): + tokenizer = self.get_tokenizer(do_lower_case=True) + + special_token = tokenizer.all_special_tokens[0] + + text = special_token + " aaaaa bbbbbb low cccccccccdddddddd l " + special_token + text2 = special_token + " AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l " + special_token + + toks0 = tokenizer.tokenize(text) # toks before adding new_toks + + new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd", 'AAAAA BBBBBB', 'CCCCCCCCCDDDDDDDD'] + added = tokenizer.add_tokens(new_toks) + self.assertEqual(added, 2) + + toks = tokenizer.tokenize(text) + toks2 = tokenizer.tokenize(text2) + + self.assertEqual(len(toks), len(toks2)) + self.assertNotEqual(len(toks), len(toks0)) # toks0 should be longer + self.assertListEqual(toks, toks2) + + tokenizer = self.get_tokenizer(do_lower_case=False) + + added = tokenizer.add_tokens(new_toks) + self.assertEqual(added, 4) + + toks = tokenizer.tokenize(text) + toks2 = tokenizer.tokenize(text2) + + self.assertEqual(len(toks), len(toks2)) # Length should still be the same + self.assertNotEqual(len(toks), len(toks0)) + self.assertNotEqual(toks[1], toks2[1]) # But at least the first non-special tokens should differ def test_add_tokens_tokenizer(self): tokenizer = self.get_tokenizer() @@ -130,7 +164,7 @@ class CommonTestCases: self.assertEqual(added_toks, len(new_toks)) self.assertEqual(all_size_2, all_size + len(new_toks)) - tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l") + tokens = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False) out_string = tokenizer.decode(tokens) self.assertGreaterEqual(len(tokens), 4) @@ -148,7 +182,8 @@ class CommonTestCases: self.assertEqual(added_toks_2, len(new_toks_2)) self.assertEqual(all_size_3, all_size_2 + len(new_toks_2)) - tokens = tokenizer.encode(">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l") + tokens = tokenizer.encode(">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", + add_special_tokens=False) out_string = tokenizer.decode(tokens) self.assertGreaterEqual(len(tokens), 6) @@ -159,6 +194,26 @@ class CommonTestCases: self.assertEqual(tokens[0], tokenizer.eos_token_id) self.assertEqual(tokens[-2], tokenizer.pad_token_id) + def test_add_special_tokens(self): + tokenizer = self.get_tokenizer() + input_text, output_text = self.get_input_output_texts() + + special_token = "[SPECIAL TOKEN]" + + tokenizer.add_special_tokens({"cls_token": special_token}) + encoded_special_token = tokenizer.encode(special_token, add_special_tokens=False) + assert len(encoded_special_token) == 1 + + text = " ".join([input_text, special_token, output_text]) + encoded = tokenizer.encode(text, add_special_tokens=False) + + input_encoded = tokenizer.encode(input_text, add_special_tokens=False) + output_encoded = tokenizer.encode(output_text, add_special_tokens=False) + special_token_id = tokenizer.encode(special_token, add_special_tokens=False) + assert encoded == input_encoded + special_token_id + output_encoded + + decoded = tokenizer.decode(encoded, skip_special_tokens=True) + assert special_token not in decoded def test_required_methods_tokenizer(self): tokenizer = self.get_tokenizer() @@ -166,7 +221,7 @@ class CommonTestCases: tokens = tokenizer.tokenize(input_text) ids = tokenizer.convert_tokens_to_ids(tokens) - ids_2 = tokenizer.encode(input_text) + ids_2 = tokenizer.encode(input_text, add_special_tokens=False) self.assertListEqual(ids, ids_2) tokens_2 = tokenizer.convert_ids_to_tokens(ids) @@ -206,7 +261,7 @@ class CommonTestCases: seq_0 = "Test this method." seq_1 = "With these inputs." - sequences = tokenizer.encode(seq_0, seq_1) + sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=False) attached_sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=True) # Method is implemented (e.g. not GPT-2) @@ -219,10 +274,14 @@ class CommonTestCases: seq_0 = "This is a sentence to be encoded." stride = 2 - sequence = tokenizer.encode(seq_0) + sequence = tokenizer.encode(seq_0, add_special_tokens=False) num_added_tokens = tokenizer.num_added_tokens() total_length = len(sequence) + num_added_tokens - information = tokenizer.encode_plus(seq_0, max_length=total_length - 2, add_special_tokens=True, stride=stride) + information = tokenizer.encode_plus(seq_0, + max_length=total_length - 2, + add_special_tokens=True, + stride=stride, + return_overflowing_tokens=True) truncated_sequence = information["input_ids"] overflowing_tokens = information["overflowing_tokens"] @@ -239,20 +298,22 @@ class CommonTestCases: seq_1 = "This is another sentence to be encoded." stride = 2 - sequence_0_no_special_tokens = tokenizer.encode(seq_0) - sequence_1_no_special_tokens = tokenizer.encode(seq_1) + sequence_0_no_special_tokens = tokenizer.encode(seq_0, add_special_tokens=False) + sequence_1_no_special_tokens = tokenizer.encode(seq_1, add_special_tokens=False) sequence = tokenizer.encode(seq_0, seq_1, add_special_tokens=True) truncated_second_sequence = tokenizer.build_inputs_with_special_tokens( - tokenizer.encode(seq_0), - tokenizer.encode(seq_1)[:-2] + tokenizer.encode(seq_0, add_special_tokens=False), + tokenizer.encode(seq_1, add_special_tokens=False)[:-2] ) information = tokenizer.encode_plus(seq_0, seq_1, max_length=len(sequence) - 2, add_special_tokens=True, - stride=stride, truncation_strategy='only_second') + stride=stride, truncation_strategy='only_second', + return_overflowing_tokens=True) information_first_truncated = tokenizer.encode_plus(seq_0, seq_1, max_length=len(sequence) - 2, add_special_tokens=True, stride=stride, - truncation_strategy='only_first') + truncation_strategy='only_first', + return_overflowing_tokens=True) truncated_sequence = information["input_ids"] overflowing_tokens = information["overflowing_tokens"] @@ -283,8 +344,8 @@ class CommonTestCases: sequence_1 = "This one too please." # Testing single inputs - encoded_sequence = tokenizer.encode(sequence_0) - encoded_sequence_dict = tokenizer.encode_plus(sequence_0, add_special_tokens=True) + encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False) + encoded_sequence_dict = tokenizer.encode_plus(sequence_0, add_special_tokens=True, return_special_tokens_mask=True) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) @@ -294,8 +355,10 @@ class CommonTestCases: self.assertEqual(encoded_sequence, filtered_sequence) # Testing inputs pairs - encoded_sequence = tokenizer.encode(sequence_0) + tokenizer.encode(sequence_1) - encoded_sequence_dict = tokenizer.encode_plus(sequence_0, sequence_1, add_special_tokens=True) + encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False) + tokenizer.encode(sequence_1, + add_special_tokens=False) + encoded_sequence_dict = tokenizer.encode_plus(sequence_0, sequence_1, add_special_tokens=True, + return_special_tokens_mask=True) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) @@ -307,9 +370,98 @@ class CommonTestCases: # Testing with already existing special tokens if tokenizer.cls_token_id == tokenizer.unk_token_id and tokenizer.cls_token_id == tokenizer.unk_token_id: tokenizer.add_special_tokens({'cls_token': '', 'sep_token': ''}) - encoded_sequence_dict = tokenizer.encode_plus(sequence_0, add_special_tokens=True) + encoded_sequence_dict = tokenizer.encode_plus(sequence_0, + add_special_tokens=True, + return_special_tokens_mask=True) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask_orig = encoded_sequence_dict["special_tokens_mask"] special_tokens_mask = tokenizer.get_special_tokens_mask(encoded_sequence_w_special, already_has_special_tokens=True) self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) self.assertEqual(special_tokens_mask_orig, special_tokens_mask) + + def test_padding_to_max_length(self): + tokenizer = self.get_tokenizer() + + sequence = "Sequence" + padding_size = 10 + padding_idx = tokenizer.pad_token_id + + # RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True + tokenizer.padding_side = "right" + encoded_sequence = tokenizer.encode(sequence) + sequence_length = len(encoded_sequence) + padded_sequence = tokenizer.encode(sequence, max_length=sequence_length + padding_size, pad_to_max_length=True) + padded_sequence_length = len(padded_sequence) + assert sequence_length + padding_size == padded_sequence_length + assert encoded_sequence + [padding_idx] * padding_size == padded_sequence + + # LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True + tokenizer.padding_side = "left" + encoded_sequence = tokenizer.encode(sequence) + sequence_length = len(encoded_sequence) + padded_sequence = tokenizer.encode(sequence, max_length=sequence_length + padding_size, pad_to_max_length=True) + padded_sequence_length = len(padded_sequence) + assert sequence_length + padding_size == padded_sequence_length + assert [padding_idx] * padding_size + encoded_sequence == padded_sequence + + # RIGHT & LEFT PADDING - Check that nothing is done when a maximum length is not specified + encoded_sequence = tokenizer.encode(sequence) + sequence_length = len(encoded_sequence) + + tokenizer.padding_side = "right" + padded_sequence_right = tokenizer.encode(sequence, pad_to_max_length=True) + padded_sequence_right_length = len(padded_sequence_right) + + tokenizer.padding_side = "left" + padded_sequence_left = tokenizer.encode(sequence, pad_to_max_length=True) + padded_sequence_left_length = len(padded_sequence_left) + + assert sequence_length == padded_sequence_right_length + assert encoded_sequence == padded_sequence_right + assert sequence_length == padded_sequence_left_length + assert encoded_sequence == padded_sequence_left + + def test_encode_plus_with_padding(self): + tokenizer = self.get_tokenizer() + + sequence = "Sequence" + padding_size = 10 + padding_idx = tokenizer.pad_token_id + token_type_padding_idx = tokenizer.pad_token_type_id + + encoded_sequence = tokenizer.encode_plus(sequence, return_special_tokens_mask=True) + input_ids = encoded_sequence['input_ids'] + token_type_ids = encoded_sequence['token_type_ids'] + attention_mask = encoded_sequence['attention_mask'] + special_tokens_mask = encoded_sequence['special_tokens_mask'] + sequence_length = len(input_ids) + + # Test right padding + tokenizer.padding_side = "right" + padded_sequence = tokenizer.encode_plus(sequence, max_length=sequence_length + padding_size, pad_to_max_length=True, return_special_tokens_mask=True) + padded_input_ids = padded_sequence['input_ids'] + padded_token_type_ids = padded_sequence['token_type_ids'] + padded_attention_mask = padded_sequence['attention_mask'] + padded_special_tokens_mask = padded_sequence['special_tokens_mask'] + padded_sequence_length = len(padded_input_ids) + + assert sequence_length + padding_size == padded_sequence_length + assert input_ids + [padding_idx] * padding_size == padded_input_ids + assert token_type_ids + [token_type_padding_idx] * padding_size == padded_token_type_ids + assert attention_mask + [0] * padding_size == padded_attention_mask + assert special_tokens_mask + [1] * padding_size == padded_special_tokens_mask + + # Test left padding + tokenizer.padding_side = "left" + padded_sequence = tokenizer.encode_plus(sequence, max_length=sequence_length + padding_size, pad_to_max_length=True, return_special_tokens_mask=True) + padded_input_ids = padded_sequence['input_ids'] + padded_token_type_ids = padded_sequence['token_type_ids'] + padded_attention_mask = padded_sequence['attention_mask'] + padded_special_tokens_mask = padded_sequence['special_tokens_mask'] + padded_sequence_length = len(padded_input_ids) + + assert sequence_length + padding_size == padded_sequence_length + assert [padding_idx] * padding_size + input_ids == padded_input_ids + assert [token_type_padding_idx] * padding_size + token_type_ids == padded_token_type_ids + assert [0] * padding_size + attention_mask == padded_attention_mask + assert [1] * padding_size + special_tokens_mask == padded_special_tokens_mask \ No newline at end of file diff --git a/transformers/tests/tokenization_transfo_xl_test.py b/transformers/tests/tokenization_transfo_xl_test.py index 4e99484b0cc..5495ebd3a68 100644 --- a/transformers/tests/tokenization_transfo_xl_test.py +++ b/transformers/tests/tokenization_transfo_xl_test.py @@ -16,7 +16,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera import os import unittest -import pytest from io import open from transformers import is_torch_available @@ -24,11 +23,12 @@ from transformers import is_torch_available if is_torch_available(): import torch from transformers.tokenization_transfo_xl import TransfoXLTokenizer, VOCAB_FILES_NAMES -else: - pytestmark = pytest.mark.skip("Require Torch") # TODO: untangle Transfo-XL tokenizer from torch.load and torch.save from .tokenization_tests_commons import CommonTestCases +from .utils import require_torch + +@require_torch class TransfoXLTokenizationTest(CommonTestCases.CommonTokenizerTester): tokenizer_class = TransfoXLTokenizer if is_torch_available() else None diff --git a/transformers/tests/tokenization_utils_test.py b/transformers/tests/tokenization_utils_test.py index cf55982c8f2..ff3f80ff7d0 100644 --- a/transformers/tests/tokenization_utils_test.py +++ b/transformers/tests/tokenization_utils_test.py @@ -22,7 +22,10 @@ import six from transformers import PreTrainedTokenizer from transformers.tokenization_gpt2 import GPT2Tokenizer +from .utils import slow + class TokenizerUtilsTest(unittest.TestCase): + def check_tokenizer_from_pretrained(self, tokenizer_class): s3_models = list(tokenizer_class.max_model_input_sizes.keys()) for model_name in s3_models[:1]: @@ -39,6 +42,7 @@ class TokenizerUtilsTest(unittest.TestCase): special_tok_id = tokenizer.convert_tokens_to_ids(special_tok) self.assertIsInstance(special_tok_id, int) + @slow def test_pretrained_tokenizers(self): self.check_tokenizer_from_pretrained(GPT2Tokenizer) diff --git a/transformers/tests/tokenization_xlm_test.py b/transformers/tests/tokenization_xlm_test.py index 0949b0cce43..7582a466628 100644 --- a/transformers/tests/tokenization_xlm_test.py +++ b/transformers/tests/tokenization_xlm_test.py @@ -21,6 +21,7 @@ import json from transformers.tokenization_xlm import XLMTokenizer, VOCAB_FILES_NAMES from .tokenization_tests_commons import CommonTestCases +from .utils import slow class XLMTokenizationTest(CommonTestCases.CommonTokenizerTester): @@ -66,11 +67,12 @@ class XLMTokenizationTest(CommonTestCases.CommonTokenizerTester): self.assertListEqual( tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) + @slow def test_sequence_builders(self): tokenizer = XLMTokenizer.from_pretrained("xlm-mlm-en-2048") - text = tokenizer.encode("sequence builders") - text_2 = tokenizer.encode("multi-sequence build") + text = tokenizer.encode("sequence builders", add_special_tokens=False) + text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) diff --git a/transformers/tests/tokenization_xlnet_test.py b/transformers/tests/tokenization_xlnet_test.py index 1a5dbcf6df6..b68495a796c 100644 --- a/transformers/tests/tokenization_xlnet_test.py +++ b/transformers/tests/tokenization_xlnet_test.py @@ -20,6 +20,7 @@ import unittest from transformers.tokenization_xlnet import (XLNetTokenizer, SPIECE_UNDERLINE) from .tokenization_tests_commons import CommonTestCases +from .utils import slow SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures/test_sentencepiece.model') @@ -89,11 +90,12 @@ class XLNetTokenizationTest(CommonTestCases.CommonTokenizerTester): u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this', SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u'se', u'.']) + @slow def test_sequence_builders(self): tokenizer = XLNetTokenizer.from_pretrained("xlnet-base-cased") - text = tokenizer.encode("sequence builders") - text_2 = tokenizer.encode("multi-sequence build") + text = tokenizer.encode("sequence builders", add_special_tokens=False) + text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) diff --git a/transformers/tests/utils.py b/transformers/tests/utils.py new file mode 100644 index 00000000000..c950ad8f17e --- /dev/null +++ b/transformers/tests/utils.py @@ -0,0 +1,85 @@ +import os +import unittest + +from distutils.util import strtobool + +from transformers.file_utils import _tf_available, _torch_available + + +SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy" + + +def parse_flag_from_env(key, default=False): + try: + value = os.environ[key] + except KeyError: + # KEY isn't set, default to `default`. + _value = default + else: + # KEY is set, convert it to True or False. + try: + _value = strtobool(value) + except ValueError: + # More values are supported, but let's keep the message simple. + raise ValueError("If set, {} must be yes or no.".format(key)) + return _value + +_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) +_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False) + + +def slow(test_case): + """ + Decorator marking a test as slow. + + Slow tests are skipped by default. Set the RUN_SLOW environment variable + to a truthy value to run them. + + """ + if not _run_slow_tests: + test_case = unittest.skip("test is slow")(test_case) + return test_case + + +def custom_tokenizers(test_case): + """ + Decorator marking a test for a custom tokenizer. + + Custom tokenizers require additional dependencies, and are skipped + by default. Set the RUN_CUSTOM_TOKENIZERS environment variable + to a truthy value to run them. + """ + if not _run_custom_tokenizers: + test_case = unittest.skip("test of custom tokenizers")(test_case) + return test_case + + +def require_torch(test_case): + """ + Decorator marking a test that requires PyTorch. + + These tests are skipped when PyTorch isn't installed. + + """ + if not _torch_available: + test_case = unittest.skip("test requires PyTorch")(test_case) + return test_case + + +def require_tf(test_case): + """ + Decorator marking a test that requires TensorFlow. + + These tests are skipped when TensorFlow isn't installed. + + """ + if not _tf_available: + test_case = unittest.skip("test requires TensorFlow")(test_case) + return test_case + + +if _torch_available: + # Set the USE_CUDA environment variable to select a GPU. + torch_device = "cuda" if parse_flag_from_env("USE_CUDA") else "cpu" +else: + torch_device = None diff --git a/transformers/tokenization_albert.py b/transformers/tokenization_albert.py new file mode 100644 index 00000000000..6b92d07218e --- /dev/null +++ b/transformers/tokenization_albert.py @@ -0,0 +1,252 @@ +# coding=utf-8 +# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Tokenization classes for ALBERT model.""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +from .tokenization_utils import PreTrainedTokenizer +import logging +import unicodedata +import six +import os +from shutil import copyfile + +logger = logging.getLogger(__name__) +VOCAB_FILES_NAMES = {'vocab_file': 'spiece.model'} + +PRETRAINED_VOCAB_FILES_MAP = { + 'vocab_file': + { + 'albert-base-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-spiece.model", + 'albert-large-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-spiece.model", + 'albert-xlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-spiece.model", + 'albert-xxlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-spiece.model", + 'albert-base-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-spiece.model", + 'albert-large-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-spiece.model", + 'albert-xlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-spiece.model", + 'albert-xxlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-spiece.model", + } +} + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + 'albert-base-v1': 512, + 'albert-large-v1': 512, + 'albert-xlarge-v1': 512, + 'albert-xxlarge-v1': 512, + 'albert-base-v2': 512, + 'albert-large-v2': 512, + 'albert-xlarge-v2': 512, + 'albert-xxlarge-v2': 512, +} + +SPIECE_UNDERLINE = u'▁' + +class AlbertTokenizer(PreTrainedTokenizer): + """ + SentencePiece based tokenizer. Peculiarities: + + - requires `SentencePiece `_ + """ + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + + def __init__(self, vocab_file, + do_lower_case=True, remove_space=True, keep_accents=False, + bos_token="[CLS]", eos_token="[SEP]", unk_token="", sep_token="[SEP]", + pad_token="", cls_token="[CLS]", mask_token="[MASK]", **kwargs): + super(AlbertTokenizer, self).__init__(bos_token=bos_token, eos_token=eos_token, + unk_token=unk_token, sep_token=sep_token, + pad_token=pad_token, cls_token=cls_token, + mask_token=mask_token, **kwargs) + + self.max_len_single_sentence = self.max_len - 2 # take into account special tokens + self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens + + try: + import sentencepiece as spm + except ImportError: + logger.warning("You need to install SentencePiece to use AlbertTokenizer: https://github.com/google/sentencepiece" + "pip install sentencepiece") + + self.do_lower_case = do_lower_case + self.remove_space = remove_space + self.keep_accents = keep_accents + self.vocab_file = vocab_file + + self.sp_model = spm.SentencePieceProcessor() + self.sp_model.Load(vocab_file) + + @property + def vocab_size(self): + return len(self.sp_model) + + def __getstate__(self): + state = self.__dict__.copy() + state["sp_model"] = None + return state + + def __setstate__(self, d): + self.__dict__ = d + try: + import sentencepiece as spm + except ImportError: + logger.warning("You need to install SentencePiece to use AlbertTokenizer: https://github.com/google/sentencepiece" + "pip install sentencepiece") + self.sp_model = spm.SentencePieceProcessor() + self.sp_model.Load(self.vocab_file) + + def preprocess_text(self, inputs): + if self.remove_space: + outputs = ' '.join(inputs.strip().split()) + else: + outputs = inputs + outputs = outputs.replace("``", '"').replace("''", '"') + + if six.PY2 and isinstance(outputs, str): + outputs = outputs.decode('utf-8') + + if not self.keep_accents: + outputs = unicodedata.normalize('NFKD', outputs) + outputs = ''.join([c for c in outputs if not unicodedata.combining(c)]) + if self.do_lower_case: + outputs = outputs.lower() + + return outputs + + def _tokenize(self, text, return_unicode=True, sample=False): + """ Tokenize a string. + return_unicode is used only for py2 + """ + text = self.preprocess_text(text) + # note(zhiliny): in some systems, sentencepiece only accepts str for py2 + if six.PY2 and isinstance(text, unicode): + text = text.encode('utf-8') + + if not sample: + pieces = self.sp_model.EncodeAsPieces(text) + else: + pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1) + new_pieces = [] + for piece in pieces: + if len(piece) > 1 and piece[-1] == str(',') and piece[-2].isdigit(): + cur_pieces = self.sp_model.EncodeAsPieces( + piece[:-1].replace(SPIECE_UNDERLINE, '')) + if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: + if len(cur_pieces[0]) == 1: + cur_pieces = cur_pieces[1:] + else: + cur_pieces[0] = cur_pieces[0][1:] + cur_pieces.append(piece[-1]) + new_pieces.extend(cur_pieces) + else: + new_pieces.append(piece) + + # note(zhiliny): convert back to unicode for py2 + if six.PY2 and return_unicode: + ret_pieces = [] + for piece in new_pieces: + if isinstance(piece, str): + piece = piece.decode('utf-8') + ret_pieces.append(piece) + new_pieces = ret_pieces + + return new_pieces + + def _convert_token_to_id(self, token): + """ Converts a token (str/unicode) in an id using the vocab. """ + return self.sp_model.PieceToId(token) + + def _convert_id_to_token(self, index, return_unicode=True): + """Converts an index (integer) in a token (string/unicode) using the vocab.""" + token = self.sp_model.IdToPiece(index) + if six.PY2 and return_unicode and isinstance(token, str): + token = token.decode('utf-8') + return token + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (strings for sub-words) in a single string.""" + out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip() + return out_string + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks + by concatenating and adding special tokens. + An ALBERT sequence has the following format: + single sequence: [CLS] X [SEP] + pair of sequences: [CLS] A [SEP] B [SEP] + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return cls + token_ids_0 + sep + return cls + token_ids_0 + sep + token_ids_1 + sep + + def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): + """ + Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. + + Args: + token_ids_0: list of ids (must not contain special tokens) + token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids + for sequence pairs + already_has_special_tokens: (default False) Set to True if the token list is already formated with + special tokens for the model + + Returns: + A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token. + """ + + if already_has_special_tokens: + if token_ids_1 is not None: + raise ValueError("You should not supply a second sequence if the provided sequence of " + "ids is already formated with special tokens for the model.") + return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) + + if token_ids_1 is not None: + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1] + + def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): + """ + Creates a mask from the two sequences passed to be used in a sequence-pair classification task. + An ALBERT sequence pair mask has the following format: + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence + + if token_ids_1 is None, only returns the first portion of the mask (0's). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, save_directory): + """ Save the sentencepiece vocabulary (copy original file) and special tokens file + to a directory. + """ + if not os.path.isdir(save_directory): + logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) + return + out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file']) + + if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): + copyfile(self.vocab_file, out_vocab_file) + + return (out_vocab_file,) diff --git a/transformers/tokenization_auto.py b/transformers/tokenization_auto.py index ec056de17fa..1f0599ef7f9 100644 --- a/transformers/tokenization_auto.py +++ b/transformers/tokenization_auto.py @@ -19,6 +19,7 @@ from __future__ import absolute_import, division, print_function, unicode_litera import logging from .tokenization_bert import BertTokenizer +from .tokenization_bert_japanese import BertJapaneseTokenizer from .tokenization_openai import OpenAIGPTTokenizer from .tokenization_gpt2 import GPT2Tokenizer from .tokenization_ctrl import CTRLTokenizer @@ -27,6 +28,8 @@ from .tokenization_xlnet import XLNetTokenizer from .tokenization_xlm import XLMTokenizer from .tokenization_roberta import RobertaTokenizer from .tokenization_distilbert import DistilBertTokenizer +from .tokenization_camembert import CamembertTokenizer +from .tokenization_albert import AlbertTokenizer logger = logging.getLogger(__name__) @@ -42,14 +45,16 @@ class AutoTokenizer(object): The tokenizer class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: DistilBertTokenizer (DistilBert model) + - contains `albert`: AlbertTokenizer (ALBERT model) + - contains `camembert`: CamembertTokenizer (CamemBERT model) - contains `roberta`: RobertaTokenizer (RoBERTa model) - contains `bert`: BertTokenizer (Bert model) - contains `openai-gpt`: OpenAIGPTTokenizer (OpenAI GPT model) - contains `gpt2`: GPT2Tokenizer (OpenAI GPT-2 model) - - contains `ctrl`: CTRLTokenizer (Salesforce CTRL model) - contains `transfo-xl`: TransfoXLTokenizer (Transformer-XL model) - contains `xlnet`: XLNetTokenizer (XLNet model) - contains `xlm`: XLMTokenizer (XLM model) + - contains `ctrl`: CTRLTokenizer (Salesforce CTRL model) This class cannot be instantiated using `__init__()` (throw an error). """ @@ -65,19 +70,23 @@ class AutoTokenizer(object): The tokenizer class to instantiate is selected as the first pattern matching in the `pretrained_model_name_or_path` string (in the following order): - contains `distilbert`: DistilBertTokenizer (DistilBert model) - - contains `roberta`: RobertaTokenizer (XLM model) + - contains `albert`: AlbertTokenizer (ALBERT model) + - contains `camembert`: CamembertTokenizer (CamemBERT model) + - contains `roberta`: RobertaTokenizer (RoBERTa model) + - contains `bert-base-japanese`: BertJapaneseTokenizer (Bert model) - contains `bert`: BertTokenizer (Bert model) - contains `openai-gpt`: OpenAIGPTTokenizer (OpenAI GPT model) - contains `gpt2`: GPT2Tokenizer (OpenAI GPT-2 model) - - contains `ctrl`: CTRLTokenizer (Salesforce CTRL model) - contains `transfo-xl`: TransfoXLTokenizer (Transformer-XL model) - contains `xlnet`: XLNetTokenizer (XLNet model) - contains `xlm`: XLMTokenizer (XLM model) + - contains `ctrl`: CTRLTokenizer (Salesforce CTRL model) Params: pretrained_model_name_or_path: either: - a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``. - (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``. @@ -87,6 +96,9 @@ class AutoTokenizer(object): force_download: (`optional`) boolean, default False: Force to (re-)download the vocabulary files and override the cached versions if they exists. + resume_download: (`optional`) boolean, default False: + Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. + proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. @@ -97,14 +109,26 @@ class AutoTokenizer(object): Examples:: - tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') # Download vocabulary from S3 and cache. - tokenizer = AutoTokenizer.from_pretrained('./test/bert_saved_model/') # E.g. tokenizer was saved using `save_pretrained('./test/saved_model/')` + # Download vocabulary from S3 and cache. + tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') + + # Download vocabulary from S3 (user-uploaded) and cache. + tokenizer = AutoTokenizer.from_pretrained('dbmdz/bert-base-german-cased') + + # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`) + tokenizer = AutoTokenizer.from_pretrained('./test/bert_saved_model/') """ if 'distilbert' in pretrained_model_name_or_path: return DistilBertTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) + elif 'albert' in pretrained_model_name_or_path: + return AlbertTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) + elif 'camembert' in pretrained_model_name_or_path: + return CamembertTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) elif 'roberta' in pretrained_model_name_or_path: return RobertaTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) + elif 'bert-base-japanese' in pretrained_model_name_or_path: + return BertJapaneseTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) elif 'bert' in pretrained_model_name_or_path: return BertTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) elif 'openai-gpt' in pretrained_model_name_or_path: @@ -121,4 +145,4 @@ class AutoTokenizer(object): return CTRLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) raise ValueError("Unrecognized model identifier in {}. Should contains one of " "'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', " - "'xlm', 'roberta', 'ctrl'".format(pretrained_model_name_or_path)) + "'xlm', 'roberta', 'distilbert,' 'camembert', 'ctrl', 'albert'".format(pretrained_model_name_or_path)) diff --git a/transformers/tokenization_bert.py b/transformers/tokenization_bert.py index 8affdd90366..ded5072e588 100644 --- a/transformers/tokenization_bert.py +++ b/transformers/tokenization_bert.py @@ -220,7 +220,7 @@ class BertTokenizer(PreTrainedTokenizer): special tokens for the model Returns: - A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token. + A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: diff --git a/transformers/tokenization_bert_japanese.py b/transformers/tokenization_bert_japanese.py new file mode 100644 index 00000000000..0ff45cbfe71 --- /dev/null +++ b/transformers/tokenization_bert_japanese.py @@ -0,0 +1,253 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes.""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +import collections +import logging +import os +import six +import unicodedata +from io import open + +from .tokenization_bert import BertTokenizer, BasicTokenizer, WordpieceTokenizer, load_vocab +from .tokenization_utils import PreTrainedTokenizer + +logger = logging.getLogger(__name__) + +VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'} + +PRETRAINED_VOCAB_FILES_MAP = { + 'vocab_file': + { + 'bert-base-japanese': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-vocab.txt", + 'bert-base-japanese-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking-vocab.txt", + 'bert-base-japanese-char': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-vocab.txt", + 'bert-base-japanese-char-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking-vocab.txt" + } +} + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + 'bert-base-japanese': 512, + 'bert-base-japanese-whole-word-masking': 512, + 'bert-base-japanese-char': 512, + 'bert-base-japanese-char-whole-word-masking': 512 +} + +PRETRAINED_INIT_CONFIGURATION = { + 'bert-base-japanese': { + 'do_lower_case': False, + 'word_tokenizer_type': 'mecab', + 'subword_tokenizer_type': 'wordpiece' + }, + 'bert-base-japanese-whole-word-masking':{ + 'do_lower_case': False, + 'word_tokenizer_type': 'mecab', + 'subword_tokenizer_type': 'wordpiece' + }, + 'bert-base-japanese-char': { + 'do_lower_case': False, + 'word_tokenizer_type': 'mecab', + 'subword_tokenizer_type': 'character' + }, + 'bert-base-japanese-char-whole-word-masking': { + 'do_lower_case': False, + 'word_tokenizer_type': 'mecab', + 'subword_tokenizer_type': 'character' + } +} + + +class BertJapaneseTokenizer(BertTokenizer): + """BERT tokenizer for Japanese text""" + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + + def __init__(self, vocab_file, do_lower_case=False, + do_word_tokenize=True, do_subword_tokenize=True, + word_tokenizer_type='basic', subword_tokenizer_type='wordpiece', + never_split=None, unk_token='[UNK]', sep_token='[SEP]', + pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', **kwargs): + """Constructs a MecabBertTokenizer. + + Args: + **vocab_file**: Path to a one-wordpiece-per-line vocabulary file. + **do_lower_case**: (`optional`) boolean (default True) + Whether to lower case the input. + Only has an effect when do_basic_tokenize=True. + **do_word_tokenize**: (`optional`) boolean (default True) + Whether to do word tokenization. + **do_subword_tokenize**: (`optional`) boolean (default True) + Whether to do subword tokenization. + **word_tokenizer_type**: (`optional`) string (default "basic") + Type of word tokenizer. + **subword_tokenizer_type**: (`optional`) string (default "wordpiece") + Type of subword tokenizer. + """ + super(BertTokenizer, self).__init__(unk_token=unk_token, sep_token=sep_token, + pad_token=pad_token, cls_token=cls_token, + mask_token=mask_token, **kwargs) + self.max_len_single_sentence = self.max_len - 2 # take into account special tokens + self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens + + if not os.path.isfile(vocab_file): + raise ValueError( + "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " + "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) + self.vocab = load_vocab(vocab_file) + self.ids_to_tokens = collections.OrderedDict( + [(ids, tok) for tok, ids in self.vocab.items()]) + + self.do_word_tokenize = do_word_tokenize + if do_word_tokenize: + if word_tokenizer_type == 'basic': + self.word_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, + never_split=never_split, + tokenize_chinese_chars=False) + elif word_tokenizer_type == 'mecab': + self.word_tokenizer = MecabTokenizer(do_lower_case=do_lower_case, + never_split=never_split) + else: + raise ValueError( + "Invalid word_tokenizer_type '{}' is specified.".format(word_tokenizer_type)) + + self.do_subword_tokenize = do_subword_tokenize + if do_subword_tokenize: + if subword_tokenizer_type == 'wordpiece': + self.subword_tokenizer = WordpieceTokenizer(vocab=self.vocab, + unk_token=self.unk_token) + elif subword_tokenizer_type == 'character': + self.subword_tokenizer = CharacterTokenizer(vocab=self.vocab, + unk_token=self.unk_token) + else: + raise ValueError( + "Invalid subword_tokenizer_type '{}' is specified.".format(subword_tokenizer_type)) + + + def _tokenize(self, text): + if self.do_word_tokenize: + tokens = self.word_tokenizer.tokenize(text, + never_split=self.all_special_tokens) + else: + tokens = [text] + + if self.do_subword_tokenize: + split_tokens = [sub_token for token in tokens + for sub_token in self.subword_tokenizer.tokenize(token)] + else: + split_tokens = tokens + + return split_tokens + + +class MecabTokenizer(object): + """Runs basic tokenization with MeCab morphological parser.""" + + def __init__(self, do_lower_case=False, never_split=None, normalize_text=True): + """Constructs a MecabTokenizer. + + Args: + **do_lower_case**: (`optional`) boolean (default True) + Whether to lower case the input. + **never_split**: (`optional`) list of str + Kept for backward compatibility purposes. + Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`) + List of token not to split. + **normalize_text**: (`optional`) boolean (default True) + Whether to apply unicode normalization to text before tokenization. + """ + self.do_lower_case = do_lower_case + self.never_split = never_split if never_split is not None else [] + self.normalize_text = normalize_text + + import MeCab + self.mecab = MeCab.Tagger() + + def tokenize(self, text, never_split=None, **kwargs): + """Tokenizes a piece of text.""" + if self.normalize_text: + text = unicodedata.normalize('NFKC', text) + + never_split = self.never_split + (never_split if never_split is not None else []) + tokens = [] + + if six.PY2: + mecab_output = self.mecab.parse(text.encode('utf-8')).decode('utf-8') + else: + mecab_output = self.mecab.parse(text) + + cursor = 0 + for line in mecab_output.split('\n'): + if line == 'EOS': + break + + token, _ = line.split('\t') + token_start = text.index(token, cursor) + token_end = token_start + len(token) + if self.do_lower_case and token not in never_split: + token = token.lower() + + tokens.append(token) + cursor = token_end + + return tokens + + +class CharacterTokenizer(object): + """Runs Character tokenziation.""" + + def __init__(self, vocab, unk_token, normalize_text=True): + """Constructs a CharacterTokenizer. + + Args: + **vocab**: + Vocabulary object. + **unk_token**: str + A special symbol for out-of-vocabulary token. + **normalize_text**: (`optional`) boolean (default True) + Whether to apply unicode normalization to text before tokenization. + """ + self.vocab = vocab + self.unk_token = unk_token + self.normalize_text = normalize_text + + def tokenize(self, text): + """Tokenizes a piece of text into characters. + + For example: + input = "apple" + output = ["a", "p", "p", "l", "e"] + Args: + text: A single token or whitespace separated tokens. + This should have already been passed through `BasicTokenizer`. + Returns: + A list of characters. + """ + if self.normalize_text: + text = unicodedata.normalize('NFKC', text) + + output_tokens = [] + for i, char in enumerate(text): + if char not in self.vocab: + output_tokens.append(self.unk_token) + continue + + output_tokens.append(char) + + return output_tokens diff --git a/transformers/tokenization_camembert.py b/transformers/tokenization_camembert.py new file mode 100644 index 00000000000..b4091558e11 --- /dev/null +++ b/transformers/tokenization_camembert.py @@ -0,0 +1,160 @@ +# coding=utf-8 +# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License +""" Tokenization classes for Camembert model.""" +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import logging +import os +from shutil import copyfile + +import sentencepiece as spm +from transformers.tokenization_utils import PreTrainedTokenizer + +logger = logging.getLogger(__name__) + +VOCAB_FILES_NAMES = {'vocab_file': 'sentencepiece.bpe.model'} + +PRETRAINED_VOCAB_FILES_MAP = { + 'vocab_file': + { + 'camembert-base': "https://s3.amazonaws.com/models.huggingface.co/bert/camembert-base-sentencepiece.bpe.model", + } +} + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + 'camembert-base': None, +} + +class CamembertTokenizer(PreTrainedTokenizer): + """ + Adapted from RobertaTokenizer and XLNetTokenizer + SentencePiece based tokenizer. Peculiarities: + + - requires `SentencePiece `_ + """ + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + + def __init__(self, vocab_file, bos_token="", eos_token="", sep_token="", + cls_token="", unk_token="", pad_token='', mask_token='', + additional_special_tokens=['NOTUSED', 'NOTUSED'], **kwargs): + super(CamembertTokenizer, self).__init__(max_len=512, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, + sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, + mask_token=mask_token, additional_special_tokens=additional_special_tokens, + **kwargs) + self.max_len_single_sentence = self.max_len - 2 # take into account special tokens + self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens + self.sp_model = spm.SentencePieceProcessor() + self.sp_model.Load(str(vocab_file)) + self.vocab_file = vocab_file + # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual + # sentencepiece vocabulary (this is the case for and + self.fairseq_tokens_to_ids = {'NOTUSED': 0, '': 1, 'NOTUSED': 2, '': 3} + self.fairseq_offset = len(self.fairseq_tokens_to_ids) + self.fairseq_tokens_to_ids[''] = len(self.sp_model) + len(self.fairseq_tokens_to_ids) + self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks + by concatenating and adding special tokens. + A RoBERTa sequence has the following format: + single sequence: X + pair of sequences: A B + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + sep + token_ids_1 + sep + + def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): + """ + Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. + + Args: + token_ids_0: list of ids (must not contain special tokens) + token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids + for sequence pairs + already_has_special_tokens: (default False) Set to True if the token list is already formated with + special tokens for the model + + Returns: + A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + if token_ids_1 is not None: + raise ValueError("You should not supply a second sequence if the provided sequence of " + "ids is already formated with special tokens for the model.") + return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] + + def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): + """ + Creates a mask from the two sequences passed to be used in a sequence-pair classification task. + A RoBERTa sequence pair mask has the following format: + 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence + + if token_ids_1 is None, only returns the first portion of the mask (0's). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep + sep) * [0] + len(token_ids_1 + sep) * [1] + + @property + def vocab_size(self): + return len(self.fairseq_tokens_to_ids) + len(self.sp_model) + + def _tokenize(self, text): + return self.sp_model.EncodeAsPieces(text) + + def _convert_token_to_id(self, token): + """ Converts a token (str/unicode) in an id using the vocab. """ + if token in self.fairseq_tokens_to_ids: + return self.fairseq_tokens_to_ids[token] + elif self.sp_model.PieceToId(token) == 0: + # Convert sentence piece unk token to fairseq unk token index + return self.unk_token_id + return self.fairseq_offset + self.sp_model.PieceToId(token) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (string/unicode) using the vocab.""" + if index in self.fairseq_ids_to_tokens: + return self.fairseq_ids_to_tokens[index] + return self.sp_model.IdToPiece(index - self.fairseq_offset) + + def save_vocabulary(self, save_directory): + """ Save the sentencepiece vocabulary (copy original file) and special tokens file + to a directory. + """ + if not os.path.isdir(save_directory): + logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) + return + out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file']) + + if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): + copyfile(self.vocab_file, out_vocab_file) + + return (out_vocab_file,) diff --git a/transformers/tokenization_ctrl.py b/transformers/tokenization_ctrl.py index 2406fa256b6..219f17c404b 100644 --- a/transformers/tokenization_ctrl.py +++ b/transformers/tokenization_ctrl.py @@ -46,6 +46,64 @@ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'ctrl': 256, } +CONTROL_CODES = { + "Pregnancy": 168629, + "Christianity": 7675, + "Explain": 106423, + "Fitness": 63440, + "Saving": 63163, + "Ask": 27171, + "Ass": 95985, + "Joke": 163509, + "Questions": 45622, + "Thoughts": 49605, + "Retail": 52342, + "Feminism": 164338, + "Writing": 11992, + "Atheism": 192263, + "Netflix": 48616, + "Computing": 39639, + "Opinion": 43213, + "Alone": 44967, + "Funny": 58917, + "Gaming": 40358, + "Human": 4088, + "India": 1331, + "Joker": 77138, + "Diet": 36206, + "Legal": 11859, + "Norman": 4939, + "Tip": 72689, + "Weight": 52343, + "Movies": 46273, + "Running": 23425, + "Science": 2090, + "Horror": 37793, + "Confession": 60572, + "Finance": 12250, + "Politics": 16360, + "Scary": 191985, + "Support": 12654, + "Technologies": 32516, + "Teenage": 66160, + "Event": 32769, + "Learned": 67460, + "Notion": 182770, + "Wikipedia": 37583, + "Books": 6665, + "Extract": 76050, + "Confessions": 102701, + "Conspiracy": 75932, + "Links": 63674, + "Narcissus": 150425, + "Relationship": 54766, + "Relationships": 134796, + "Reviews": 41671, + "News": 4256, + "Translation": 26820, + "multilingual": 128406, +} + def get_pairs(word): """Return set of symbol pairs in a word. @@ -63,24 +121,23 @@ def get_pairs(word): class CTRLTokenizer(PreTrainedTokenizer): """ CTRL BPE tokenizer. Peculiarities: - - Byte-level Byte-Pair-Encoding - - Requires a space to start the input string => the encoding methods should be called with the - ``add_prefix_space`` flag set to ``True``. - Otherwise, this tokenizer ``encode`` and ``decode`` method will not conserve - the absence of a space at the beginning of a string: `tokenizer.decode(tokenizer.encode("Hello")) = " Hello"` + - Byte-Pair-Encoding """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + control_codes = CONTROL_CODES def __init__(self, vocab_file, merges_file, unk_token="", **kwargs): super(CTRLTokenizer, self).__init__(unk_token=unk_token, **kwargs) self.max_len_single_sentence = self.max_len # no default special tokens - you can update this value if you add special tokens self.max_len_sentences_pair = self.max_len # no default special tokens - you can update this value if you add special tokens - self.encoder = json.load(open(vocab_file, encoding="utf-8")) + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) self.decoder = {v:k for k,v in self.encoder.items()} - merges = open(merges_file, encoding='utf-8').read().split('\n')[1:-1] + with open(merges_file, encoding='utf-8') as merges_handle: + merges = merges_handle.read().split('\n')[1:-1] merges = [tuple(merge.split()) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} @@ -137,9 +194,9 @@ class CTRLTokenizer(PreTrainedTokenizer): """ split_tokens = [] - text = text.split(' ') + words = re.findall(r'\S+\n?', text) - for token in text: + for token in words: split_tokens.extend([t for t in self.bpe(token).split(' ')]) return split_tokens diff --git a/transformers/tokenization_distilbert.py b/transformers/tokenization_distilbert.py index dfa02926d82..f40bf2bd77e 100644 --- a/transformers/tokenization_distilbert.py +++ b/transformers/tokenization_distilbert.py @@ -33,12 +33,16 @@ PRETRAINED_VOCAB_FILES_MAP = { { 'distilbert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", 'distilbert-base-uncased-distilled-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", + 'distilbert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-vocab.txt", + 'distilbert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'distilbert-base-uncased': 512, 'distilbert-base-uncased-distilled-squad': 512, + 'distilbert-base-german-cased': 512, + 'distilbert-base-multilingual-cased': 512, } diff --git a/transformers/tokenization_gpt2.py b/transformers/tokenization_gpt2.py index 6a7f75acb2f..68c6101860b 100644 --- a/transformers/tokenization_gpt2.py +++ b/transformers/tokenization_gpt2.py @@ -46,6 +46,7 @@ PRETRAINED_VOCAB_FILES_MAP = { 'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json", 'gpt2-medium': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-vocab.json", 'gpt2-large': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-vocab.json", + 'gpt2-xl': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-vocab.json", 'distilgpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-vocab.json", }, 'merges_file': @@ -53,6 +54,7 @@ PRETRAINED_VOCAB_FILES_MAP = { 'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt", 'gpt2-medium': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-merges.txt", 'gpt2-large': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-merges.txt", + 'gpt2-xl': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-merges.txt", 'distilgpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-merges.txt", }, } @@ -61,6 +63,7 @@ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'gpt2': 1024, 'gpt2-medium': 1024, 'gpt2-large': 1024, + 'gpt2-xl': 1024, 'distilgpt2': 1024, } @@ -69,7 +72,7 @@ def bytes_to_unicode(): """ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. - + The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. @@ -104,10 +107,10 @@ class GPT2Tokenizer(PreTrainedTokenizer): """ GPT-2 BPE tokenizer. Peculiarities: - Byte-level Byte-Pair-Encoding - - Requires a space to start the input string => the encoding methods should be called with the + - Requires a space to start the input string => the encoding and tokenize methods should be called with the ``add_prefix_space`` flag set to ``True``. - Otherwise, this tokenizer ``encode`` and ``decode`` method will not conserve - the absence of a space at the beginning of a string: `tokenizer.decode(tokenizer.encode("Hello")) = " Hello"` + Otherwise, this tokenizer's ``encode``, ``decode``, and ``tokenize`` methods will not conserve + the spaces at the beginning of a string: `tokenizer.decode(tokenizer.encode(" Hello")) = "Hello"` """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP @@ -119,13 +122,15 @@ class GPT2Tokenizer(PreTrainedTokenizer): self.max_len_single_sentence = self.max_len # no default special tokens - you can update this value if you add special tokens self.max_len_sentences_pair = self.max_len # no default special tokens - you can update this value if you add special tokens - self.encoder = json.load(open(vocab_file, encoding="utf-8")) + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} - bpe_data = open(merges_file, encoding='utf-8').read().split('\n')[1:-1] - bpe_merges = [tuple(merge.split()) for merge in bpe_data] + with open(merges_file, encoding='utf-8') as merges_handle: + bpe_merges = merges_handle.read().split('\n')[1:-1] + bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} @@ -181,7 +186,7 @@ class GPT2Tokenizer(PreTrainedTokenizer): """ Tokenize a string. Args: - add_prefix_space (boolean, default False): - Begin the sentence with at least one space toto get invariance to word order in GPT-2 (and RoBERTa) tokenizers. + Begin the sentence with at least one space to get invariance to word order in GPT-2 (and RoBERTa) tokenizers. """ if add_prefix_space: text = ' ' + text @@ -231,4 +236,4 @@ class GPT2Tokenizer(PreTrainedTokenizer): writer.write(' '.join(bpe_tokens) + u'\n') index += 1 - return vocab_file, merge_file \ No newline at end of file + return vocab_file, merge_file diff --git a/transformers/tokenization_openai.py b/transformers/tokenization_openai.py index 0efbdb37c0c..a4c64b7020d 100644 --- a/transformers/tokenization_openai.py +++ b/transformers/tokenization_openai.py @@ -101,9 +101,11 @@ class OpenAIGPTTokenizer(PreTrainedTokenizer): self.nlp = BasicTokenizer(do_lower_case=True) self.fix_text = None - self.encoder = json.load(open(vocab_file, encoding="utf-8")) + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) self.decoder = {v:k for k,v in self.encoder.items()} - merges = open(merges_file, encoding='utf-8').read().split('\n')[1:-1] + with open(merges_file, encoding='utf-8') as merges_handle: + merges = merges_handle.read().split('\n')[1:-1] merges = [tuple(merge.split()) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} diff --git a/transformers/tokenization_roberta.py b/transformers/tokenization_roberta.py index 5e1300fa4d7..b44e0049978 100644 --- a/transformers/tokenization_roberta.py +++ b/transformers/tokenization_roberta.py @@ -47,6 +47,8 @@ PRETRAINED_VOCAB_FILES_MAP = { 'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json", 'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-vocab.json", 'distilroberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-vocab.json", + 'roberta-base-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json", + 'roberta-large-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json", }, 'merges_file': { @@ -54,6 +56,8 @@ PRETRAINED_VOCAB_FILES_MAP = { 'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt", 'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-merges.txt", 'distilroberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-merges.txt", + 'roberta-base-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt", + 'roberta-large-openai-detector': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt", }, } @@ -62,6 +66,8 @@ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'roberta-large': 512, 'roberta-large-mnli': 512, 'distilroberta-base': 512, + 'roberta-base-openai-detector': 512, + 'roberta-large-openai-detector': 512, } @@ -114,7 +120,7 @@ class RobertaTokenizer(GPT2Tokenizer): special tokens for the model Returns: - A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token. + A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: diff --git a/transformers/tokenization_utils.py b/transformers/tokenization_utils.py index 5e5be872efe..317ecd167b7 100644 --- a/transformers/tokenization_utils.py +++ b/transformers/tokenization_utils.py @@ -21,9 +21,11 @@ import os import json import six import copy +import itertools +import re from io import open -from .file_utils import cached_path, is_tf_available, is_torch_available +from .file_utils import cached_path, is_remote_url, hf_bucket_url, is_tf_available, is_torch_available if is_tf_available(): import tensorflow as tf @@ -76,6 +78,8 @@ class PreTrainedTokenizer(object): "pad_token", "cls_token", "mask_token", "additional_special_tokens"] + padding_side = "right" + @property def bos_token(self): """ Beginning of sentence token (string). Log an error if used while not having been set. """ @@ -189,6 +193,11 @@ class PreTrainedTokenizer(object): """ Id of the padding token in the vocabulary. Log an error if used while not having been set. """ return self.convert_tokens_to_ids(self.pad_token) + @property + def pad_token_type_id(self): + """ Id of the padding token type in the vocabulary.""" + return self._pad_token_type_id + @property def cls_token_id(self): """ Id of the classification token in the vocabulary. E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """ @@ -212,10 +221,14 @@ class PreTrainedTokenizer(object): self._pad_token = None self._cls_token = None self._mask_token = None + self._pad_token_type_id = 0 self._additional_special_tokens = [] self.max_len = max_len if max_len is not None else int(1e12) + # Padding side is right by default and over-riden in subclasses. If specified in the kwargs, it is changed. + self.padding_side = kwargs.pop('padding_side', self.padding_side) + # Added tokens self.added_tokens_encoder = {} self.added_tokens_decoder = {} @@ -242,6 +255,7 @@ class PreTrainedTokenizer(object): pretrained_model_name_or_path: either: - a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``. + - a string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``. - a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``. - (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``. @@ -251,6 +265,9 @@ class PreTrainedTokenizer(object): force_download: (`optional`) boolean, default False: Force to (re-)download the vocabulary files and override the cached versions if they exists. + resume_download: (`optional`) boolean, default False: + Do not delete incompletely recieved file. Attempt to resume the download if such a file exists. + proxies: (`optional`) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. @@ -266,6 +283,9 @@ class PreTrainedTokenizer(object): # Download vocabulary from S3 and cache. tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + # Download vocabulary from S3 (user-uploaded) and cache. + tokenizer = BertTokenizer.from_pretrained('dbmdz/bert-base-german-cased') + # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`) tokenizer = BertTokenizer.from_pretrained('./test/saved_model/') @@ -286,6 +306,7 @@ class PreTrainedTokenizer(object): def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs): cache_dir = kwargs.pop('cache_dir', None) force_download = kwargs.pop('force_download', False) + resume_download = kwargs.pop('resume_download', False) proxies = kwargs.pop('proxies', None) s3_models = list(cls.max_model_input_sizes.keys()) @@ -310,12 +331,15 @@ class PreTrainedTokenizer(object): if os.path.isdir(pretrained_model_name_or_path): # If a directory is provided we look for the standard filenames full_file_name = os.path.join(pretrained_model_name_or_path, file_name) - else: + if not os.path.exists(full_file_name): + logger.info("Didn't find file {}. We won't load it.".format(full_file_name)) + full_file_name = None + elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): # If a path to a file is provided we use it (will only work for non-BPE tokenizer using a single vocabulary file) full_file_name = pretrained_model_name_or_path - if not os.path.exists(full_file_name): - logger.info("Didn't find file {}. We won't load it.".format(full_file_name)) - full_file_name = None + else: + full_file_name = hf_bucket_url(pretrained_model_name_or_path, postfix=file_name) + vocab_files[file_id] = full_file_name # Look for the additional tokens files @@ -342,7 +366,7 @@ class PreTrainedTokenizer(object): "We assumed '{}' was a path or url to a directory containing vocabulary files " "named {} but couldn't find such vocabulary files at this path or url.".format( pretrained_model_name_or_path, ', '.join(s3_models), - pretrained_model_name_or_path, + pretrained_model_name_or_path, list(cls.vocab_files_names.values()))) # Get files from url, cache, or disk depending on the case @@ -352,7 +376,7 @@ class PreTrainedTokenizer(object): if file_path is None: resolved_vocab_files[file_id] = None else: - resolved_vocab_files[file_id] = cached_path(file_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies) + resolved_vocab_files[file_id] = cached_path(file_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download) except EnvironmentError: if pretrained_model_name_or_path in s3_models: msg = "Couldn't reach server at '{}' to download vocabulary files." @@ -377,7 +401,8 @@ class PreTrainedTokenizer(object): # Did we saved some inputs and kwargs to reload ? tokenizer_config_file = resolved_vocab_files.pop('tokenizer_config_file', None) if tokenizer_config_file is not None: - init_kwargs = json.load(open(tokenizer_config_file, encoding="utf-8")) + with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle: + init_kwargs = json.load(tokenizer_config_handle) saved_init_inputs = init_kwargs.pop('init_inputs', ()) if not init_inputs: init_inputs = saved_init_inputs @@ -402,7 +427,8 @@ class PreTrainedTokenizer(object): if args_name not in init_kwargs: init_kwargs[args_name] = file_path if special_tokens_map_file is not None: - special_tokens_map = json.load(open(special_tokens_map_file, encoding="utf-8")) + with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle: + special_tokens_map = json.load(special_tokens_map_handle) for key, value in special_tokens_map.items(): if key not in init_kwargs: init_kwargs[key] = value @@ -416,7 +442,8 @@ class PreTrainedTokenizer(object): # Add supplementary tokens. if added_tokens_file is not None: - added_tok_encoder = json.load(open(added_tokens_file, encoding="utf-8")) + with open(added_tokens_file, encoding="utf-8") as added_tokens_handle: + added_tok_encoder = json.load(added_tokens_handle) added_tok_decoder = {v:k for k, v in added_tok_encoder.items()} tokenizer.added_tokens_encoder.update(added_tok_encoder) tokenizer.added_tokens_decoder.update(added_tok_decoder) @@ -512,6 +539,8 @@ class PreTrainedTokenizer(object): to_add_tokens = [] for token in new_tokens: assert isinstance(token, str) or (six.PY2 and isinstance(token, unicode)) + if self.init_kwargs.get('do_lower_case', False) and token not in self.all_special_tokens: + token = token.lower() if token != self.unk_token and \ self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token) and \ token not in to_add_tokens: @@ -604,7 +633,23 @@ class PreTrainedTokenizer(object): vocabularies (BPE/SentencePieces/WordPieces). Take care of added tokens. + + text: The sequence to be encoded. + **kwargs: passed to the child `self.tokenize()` method """ + def lowercase_text(t): + # convert non-special tokens to lowercase + escaped_special_toks = [re.escape(s_tok) for s_tok in self.all_special_tokens] + pattern = r'(^' + r'|'.join(escaped_special_toks) + r')|' + \ + r'(.+?)' + return re.sub( + pattern, + lambda m: m.groups()[0] or m.groups()[1].lower(), + t) + + if self.init_kwargs.get('do_lower_case', False): + text = lowercase_text(text) + def split_on_token(tok, text): result = [] split_text = text.split(tok) @@ -624,7 +669,7 @@ class PreTrainedTokenizer(object): return result def split_on_tokens(tok_list, text): - if not text: + if not text.strip(): return [] if not tok_list: return self._tokenize(text, **kwargs) @@ -641,9 +686,9 @@ class PreTrainedTokenizer(object): tokenized_text += [sub_text] text_list = tokenized_text - return sum((self._tokenize(token, **kwargs) if token not \ + return list(itertools.chain.from_iterable((self._tokenize(token, **kwargs) if token not \ in self.added_tokens_encoder and token not in self.all_special_tokens \ - else [token] for token in tokenized_text), []) + else [token] for token in tokenized_text))) added_tokens = list(self.added_tokens_encoder.keys()) + self.all_special_tokens tokenized_text = split_on_tokens(added_tokens, text) @@ -671,10 +716,6 @@ class PreTrainedTokenizer(object): ids = [] for token in tokens: ids.append(self._convert_token_to_id_with_added_voc(token)) - if len(ids) > self.max_len: - logger.warning("Token indices sequence length is longer than the specified maximum sequence length " - "for this model ({} > {}). Running this sequence through the model will result in " - "indexing errors".format(len(ids), self.max_len)) return ids def _convert_token_to_id_with_added_voc(self, token): @@ -689,14 +730,15 @@ class PreTrainedTokenizer(object): raise NotImplementedError def encode(self, - text, - text_pair=None, - add_special_tokens=False, - max_length=None, - stride=0, - truncation_strategy='longest_first', - return_tensors=None, - **kwargs): + text, + text_pair=None, + add_special_tokens=True, + max_length=None, + stride=0, + truncation_strategy='longest_first', + pad_to_max_length=False, + return_tensors=None, + **kwargs): """ Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary. @@ -721,6 +763,12 @@ class PreTrainedTokenizer(object): - 'only_first': Only truncate the first sequence - 'only_second': Only truncate the second sequence - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length) + pad_to_max_length: if set to True, the returned sequences will be padded according to the model's padding side and + padding index, up to their max length. If no max length is specified, the padding is done up to the model's max length. + The tokenizer padding sides are handled by the following strings: + - 'left': pads on the left of the sequences + - 'right': pads on the right of the sequences + Defaults to False: no padding. return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant or PyTorch torch.Tensor instead of a list of python integers. **kwargs: passed to the `self.tokenize()` method @@ -731,6 +779,7 @@ class PreTrainedTokenizer(object): add_special_tokens=add_special_tokens, stride=stride, truncation_strategy=truncation_strategy, + pad_to_max_length=pad_to_max_length, return_tensors=return_tensors, **kwargs) @@ -739,11 +788,16 @@ class PreTrainedTokenizer(object): def encode_plus(self, text, text_pair=None, - add_special_tokens=False, + add_special_tokens=True, max_length=None, stride=0, truncation_strategy='longest_first', + pad_to_max_length=False, return_tensors=None, + return_token_type_ids=True, + return_attention_mask=True, + return_overflowing_tokens=False, + return_special_tokens_mask=False, **kwargs): """ Returns a dictionary containing the encoded sequence or sequence pair and additional informations: @@ -768,9 +822,40 @@ class PreTrainedTokenizer(object): - 'only_first': Only truncate the first sequence - 'only_second': Only truncate the second sequence - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length) + pad_to_max_length: if set to True, the returned sequences will be padded according to the model's padding side and + padding index, up to their max length. If no max length is specified, the padding is done up to the model's max length. + The tokenizer padding sides are handled by the following strings: + - 'left': pads on the left of the sequences + - 'right': pads on the right of the sequences + Defaults to False: no padding. return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant or PyTorch torch.Tensor instead of a list of python integers. + return_token_type_ids: (optional) Set to False to avoid returning token_type_ids (default True). + return_attention_mask: (optional) Set to False to avoir returning attention mask (default True) + return_overflowing_tokens: (optional) Set to True to return overflowing token information (default False). + return_special_tokens_mask: (optional) Set to True to return special tokens mask information (default False). **kwargs: passed to the `self.tokenize()` method + + Return: + A Dictionary of shape:: + + { + input_ids: list[int], + token_type_ids: list[int] if return_token_type_ids is True (default) + attention_mask: list[int] if return_attention_mask is True (default) + overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True + num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True + special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True + } + + With the fields: + ``input_ids``: list of token ids to be fed to a model + ``token_type_ids``: list of token type ids to be fed to a model + ``attention_mask``: list of indices specifying which tokens should be attended to by the model + ``overflowing_tokens``: list of overflowing tokens if a max length is specified. + ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified + ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added + tokens and 1 specifying sequence tokens. """ def get_input_ids(text): @@ -789,13 +874,24 @@ class PreTrainedTokenizer(object): return self.prepare_for_model(first_ids, pair_ids=second_ids, max_length=max_length, + pad_to_max_length=pad_to_max_length, add_special_tokens=add_special_tokens, stride=stride, truncation_strategy=truncation_strategy, - return_tensors=return_tensors) + return_tensors=return_tensors, + return_attention_mask=return_attention_mask, + return_token_type_ids=return_token_type_ids, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask) - def prepare_for_model(self, ids, pair_ids=None, max_length=None, add_special_tokens=False, stride=0, - truncation_strategy='longest_first', return_tensors=None): + def prepare_for_model(self, ids, pair_ids=None, max_length=None, add_special_tokens=True, stride=0, + truncation_strategy='longest_first', + pad_to_max_length=False, + return_tensors=None, + return_token_type_ids=True, + return_attention_mask=True, + return_overflowing_tokens=False, + return_special_tokens_mask=False): """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates @@ -818,23 +914,36 @@ class PreTrainedTokenizer(object): - 'only_first': Only truncate the first sequence - 'only_second': Only truncate the second sequence - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length) + pad_to_max_length: if set to True, the returned sequences will be padded according to the model's padding side and + padding index, up to their max length. If no max length is specified, the padding is done up to the model's max length. + The tokenizer padding sides are handled by the following strings: + - 'left': pads on the left of the sequences + - 'right': pads on the right of the sequences + Defaults to False: no padding. return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant or PyTorch torch.Tensor instead of a list of python integers. + return_token_type_ids: (optional) Set to False to avoid returning token_type_ids (default True). + return_attention_mask: (optional) Set to False to avoid returning attention mask (default True) + return_overflowing_tokens: (optional) Set to True to return overflowing token information (default False). + return_special_tokens_mask: (optional) Set to True to return special tokens mask information (default False). Return: A Dictionary of shape:: { input_ids: list[int], - overflowing_tokens: list[int] if a ``max_length`` is specified, else None - special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` + token_type_ids: list[int] if return_token_type_ids is True (default) + overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True + num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True + special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True } With the fields: - ``input_ids``: list of tokens to be fed to a model + ``input_ids``: list of token ids to be fed to a model + ``token_type_ids``: list of token type ids to be fed to a model ``overflowing_tokens``: list of overflowing tokens if a max length is specified. - + ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added tokens and 1 specifying sequence tokens. """ @@ -843,39 +952,98 @@ class PreTrainedTokenizer(object): len_pair_ids = len(pair_ids) if pair else 0 encoded_inputs = {} + + # Handle max sequence length total_len = len_ids + len_pair_ids + (self.num_added_tokens(pair=pair) if add_special_tokens else 0) if max_length and total_len > max_length: ids, pair_ids, overflowing_tokens = self.truncate_sequences(ids, pair_ids=pair_ids, num_tokens_to_remove=total_len-max_length, truncation_strategy=truncation_strategy, stride=stride) - encoded_inputs["overflowing_tokens"] = overflowing_tokens - encoded_inputs["num_truncated_tokens"] = total_len - max_length + if return_overflowing_tokens: + encoded_inputs["overflowing_tokens"] = overflowing_tokens + encoded_inputs["num_truncated_tokens"] = total_len - max_length + # Handle special_tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) - encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([1] * len(pair_ids) if pair else []) - if return_tensors == 'tf' and is_tf_available(): - sequence = tf.constant([sequence]) - token_type_ids = tf.constant([token_type_ids]) - elif return_tensors == 'pt' and is_torch_available(): - sequence = torch.tensor([sequence]) - token_type_ids = torch.tensor([token_type_ids]) - elif return_tensors is not None: - logger.warning("Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format(return_tensors)) + if return_special_tokens_mask: + encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) encoded_inputs["input_ids"] = sequence - encoded_inputs["token_type_ids"] = token_type_ids + if return_token_type_ids: + encoded_inputs["token_type_ids"] = token_type_ids if max_length and len(encoded_inputs["input_ids"]) > max_length: encoded_inputs["input_ids"] = encoded_inputs["input_ids"][:max_length] - encoded_inputs["token_type_ids"] = encoded_inputs["token_type_ids"][:max_length] - encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"][:max_length] + if return_token_type_ids: + encoded_inputs["token_type_ids"] = encoded_inputs["token_type_ids"][:max_length] + if return_special_tokens_mask: + encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"][:max_length] + + if max_length is None and len(encoded_inputs["input_ids"]) > self.max_len: + logger.warning("Token indices sequence length is longer than the specified maximum sequence length " + "for this model ({} > {}). Running this sequence through the model will result in " + "indexing errors".format(len(ids), self.max_len)) + + needs_to_be_padded = pad_to_max_length and ( + max_length and len(encoded_inputs["input_ids"]) < max_length + or + max_length is None and len(encoded_inputs["input_ids"]) < self.max_len and self.max_len <= 10000 + ) + + if pad_to_max_length and max_length is None and self.max_len > 10000: + logger.warning("Sequence can't be padded as no maximum length is specified and the model maximum length is too high.") + + if needs_to_be_padded: + difference = (max_length if max_length is not None else self.max_len) - len(encoded_inputs["input_ids"]) + + if self.padding_side == 'right': + if return_attention_mask: + encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) + [0] * difference + if return_token_type_ids: + encoded_inputs["token_type_ids"] = encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference + if return_special_tokens_mask: + encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference + encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference + elif self.padding_side == 'left': + if return_attention_mask: + encoded_inputs["attention_mask"] = [0] * difference + [1] * len(encoded_inputs["input_ids"]) + if return_token_type_ids: + encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs["token_type_ids"] + if return_special_tokens_mask: + encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] + encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"] + + else: + raise ValueError("Invalid padding strategy:" + str(self.padding_side)) + + elif return_attention_mask: + encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"]) + + # Prepare inputs as tensors if asked + if return_tensors == 'tf' and is_tf_available(): + encoded_inputs["input_ids"] = tf.constant([encoded_inputs["input_ids"]]) + encoded_inputs["token_type_ids"] = tf.constant([encoded_inputs["token_type_ids"]]) + + if "attention_mask" in encoded_inputs: + encoded_inputs["attention_mask"] = tf.constant([encoded_inputs["attention_mask"]]) + + elif return_tensors == 'pt' and is_torch_available(): + encoded_inputs["input_ids"] = torch.tensor([encoded_inputs["input_ids"]]) + encoded_inputs["token_type_ids"] = torch.tensor([encoded_inputs["token_type_ids"]]) + + if "attention_mask" in encoded_inputs: + encoded_inputs["attention_mask"] = torch.tensor([encoded_inputs["attention_mask"]]) + elif return_tensors is not None: + logger.warning( + "Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format( + return_tensors)) return encoded_inputs @@ -920,7 +1088,6 @@ class PreTrainedTokenizer(object): return (ids, pair_ids, overflowing_tokens) def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): - logger.warning("This tokenizer does not make use of special tokens.") if token_ids_1 is None: return len(token_ids_0) * [0] return [0] * len(token_ids_0) + [1] * len(token_ids_1) @@ -933,7 +1100,6 @@ class PreTrainedTokenizer(object): single sequence: X pair of sequences: A B """ - logger.warning("This tokenizer does not make use of special tokens. Input is returned with no modification.") if token_ids_1 is None: return token_ids_0 return token_ids_0 + token_ids_1 @@ -951,7 +1117,7 @@ class PreTrainedTokenizer(object): special tokens for the model Returns: - A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token. + A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0)) @@ -1055,7 +1221,7 @@ class PreTrainedTokenizer(object): class attributes (cls_token, unk_token...). """ all_toks = self.all_special_tokens - all_ids = list(self._convert_token_to_id(t) for t in all_toks) + all_ids = self.convert_tokens_to_ids(all_toks) return all_ids @staticmethod diff --git a/transformers/tokenization_xlm.py b/transformers/tokenization_xlm.py index d09ce6b9dce..8def80bec49 100644 --- a/transformers/tokenization_xlm.py +++ b/transformers/tokenization_xlm.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Tokenization classes for OpenAI GPT.""" +"""Tokenization classes for XLM.""" from __future__ import (absolute_import, division, print_function, unicode_literals) @@ -524,7 +524,7 @@ class XLMTokenizer(PreTrainedTokenizer): - argument ``special_tokens`` and function ``set_special_tokens``, can be used to add additional symbols \ (ex: "__classify__") to a vocabulary - + - `lang2id` attribute maps the languages supported by the model with their ids if provided (automatically set for pretrained vocabularies) - `id2lang` attributes does reverse mapping if provided (automatically set for pretrained vocabularies) @@ -549,6 +549,10 @@ class XLMTokenizer(PreTrainedTokenizer): additional_special_tokens=additional_special_tokens, **kwargs) + + self.max_len_single_sentence = self.max_len - 2 # take into account special tokens + self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens + # cache of sm.MosesPunctNormalizer instance self.cache_moses_punct_normalizer = dict() # cache of sm.MosesTokenizer instance @@ -564,9 +568,11 @@ class XLMTokenizer(PreTrainedTokenizer): self.ja_word_tokenizer = None self.zh_word_tokenizer = None - self.encoder = json.load(open(vocab_file, encoding="utf-8")) + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) self.decoder = {v:k for k,v in self.encoder.items()} - merges = open(merges_file, encoding='utf-8').read().split('\n')[:-1] + with open(merges_file, encoding='utf-8') as merges_handle: + merges = merges_handle.read().split('\n')[:-1] merges = [tuple(merge.split()[:2]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} @@ -758,9 +764,9 @@ class XLMTokenizer(PreTrainedTokenizer): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. - A RoBERTa sequence has the following format: + A XLM sequence has the following format: single sequence: X - pair of sequences: A B + pair of sequences: A B """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] @@ -781,7 +787,7 @@ class XLMTokenizer(PreTrainedTokenizer): special tokens for the model Returns: - A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token. + A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: diff --git a/transformers/tokenization_xlnet.py b/transformers/tokenization_xlnet.py index deae8de336b..a8369df67b4 100644 --- a/transformers/tokenization_xlnet.py +++ b/transformers/tokenization_xlnet.py @@ -60,6 +60,7 @@ class XLNetTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + padding_side = "left" def __init__(self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=False, @@ -74,6 +75,7 @@ class XLNetTokenizer(PreTrainedTokenizer): self.max_len_single_sentence = self.max_len - 2 # take into account special tokens self.max_len_sentences_pair = self.max_len - 3 # take into account special tokens + self._pad_token_type_id = 3 try: import sentencepiece as spm @@ -141,7 +143,7 @@ class XLNetTokenizer(PreTrainedTokenizer): pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1) new_pieces = [] for piece in pieces: - if len(piece) > 1 and piece[-1] == ',' and piece[-2].isdigit(): + if len(piece) > 1 and piece[-1] == str(',') and piece[-2].isdigit(): cur_pieces = self.sp_model.EncodeAsPieces( piece[:-1].replace(SPIECE_UNDERLINE, '')) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: @@ -185,9 +187,9 @@ class XLNetTokenizer(PreTrainedTokenizer): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. - A RoBERTa sequence has the following format: - single sequence: X - pair of sequences: A B + An XLNet sequence has the following format: + single sequence: X + pair of sequences: A B """ sep = [self.sep_token_id] cls = [self.cls_token_id] @@ -208,7 +210,7 @@ class XLNetTokenizer(PreTrainedTokenizer): special tokens for the model Returns: - A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token. + A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: @@ -224,10 +226,10 @@ class XLNetTokenizer(PreTrainedTokenizer): def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. - A BERT sequence pair mask has the following format: + An XLNet sequence pair mask has the following format: 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 2 | first sequence | second sequence | CLS segment ID - + if token_ids_1 is None, only returns the first portion of the mask (0's). """ sep = [self.sep_token_id] diff --git a/utils/download_glue_data.py b/utils/download_glue_data.py new file mode 100644 index 00000000000..de8cfa9e732 --- /dev/null +++ b/utils/download_glue_data.py @@ -0,0 +1,142 @@ +''' Script for downloading all GLUE data. +Original source: https://gist.github.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e + +Note: for legal reasons, we are unable to host MRPC. +You can either use the version hosted by the SentEval team, which is already tokenized, +or you can download the original data from (https://download.microsoft.com/download/D/4/6/D46FF87A-F6B9-4252-AA8B-3604ED519838/MSRParaphraseCorpus.msi) and extract the data from it manually. +For Windows users, you can run the .msi file. For Mac and Linux users, consider an external library such as 'cabextract' (see below for an example). +You should then rename and place specific files in a folder (see below for an example). + +mkdir MRPC +cabextract MSRParaphraseCorpus.msi -d MRPC +cat MRPC/_2DEC3DBE877E4DB192D17C0256E90F1D | tr -d $'\r' > MRPC/msr_paraphrase_train.txt +cat MRPC/_D7B391F9EAFF4B1B8BCE8F21B20B1B61 | tr -d $'\r' > MRPC/msr_paraphrase_test.txt +rm MRPC/_* +rm MSRParaphraseCorpus.msi + +1/30/19: It looks like SentEval is no longer hosting their extracted and tokenized MRPC data, so you'll need to download the data from the original source for now. +2/11/19: It looks like SentEval actually *is* hosting the extracted data. Hooray! +''' + +import os +import sys +import shutil +import argparse +import tempfile +import urllib.request +import zipfile + +TASKS = ["CoLA", "SST", "MRPC", "QQP", "STS", "MNLI", "SNLI", "QNLI", "RTE", "WNLI", "diagnostic"] +TASK2PATH = {"CoLA":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FCoLA.zip?alt=media&token=46d5e637-3411-4188-bc44-5809b5bfb5f4', + "SST":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-44a2-b9b4-cf6337f84ac8', + "MRPC":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc', + "QQP":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQQP.zip?alt=media&token=700c6acf-160d-4d89-81d1-de4191d02cb5', + "STS":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSTS-B.zip?alt=media&token=bddb94a7-8706-4e0d-a694-1109e12273b5', + "MNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce', + "SNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df', + "QNLI": 'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FQNLIv2.zip?alt=media&token=6fdcf570-0fc5-4631-8456-9505272d1601', + "RTE":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-4f19-8ea2-9e1840f077fb', + "WNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-4bd7-99a5-5e00222e0faf', + "diagnostic":'https://storage.googleapis.com/mtl-sentence-representations.appspot.com/tsvsWithoutLabels%2FAX.tsv?GoogleAccessId=firebase-adminsdk-0khhl@mtl-sentence-representations.iam.gserviceaccount.com&Expires=2498860800&Signature=DuQ2CSPt2Yfre0C%2BiISrVYrIFaZH1Lc7hBVZDD4ZyR7fZYOMNOUGpi8QxBmTNOrNPjR3z1cggo7WXFfrgECP6FBJSsURv8Ybrue8Ypt%2FTPxbuJ0Xc2FhDi%2BarnecCBFO77RSbfuz%2Bs95hRrYhTnByqu3U%2FYZPaj3tZt5QdfpH2IUROY8LiBXoXS46LE%2FgOQc%2FKN%2BA9SoscRDYsnxHfG0IjXGwHN%2Bf88q6hOmAxeNPx6moDulUF6XMUAaXCSFU%2BnRO2RDL9CapWxj%2BDl7syNyHhB7987hZ80B%2FwFkQ3MEs8auvt5XW1%2Bd4aCU7ytgM69r8JDCwibfhZxpaa4gd50QXQ%3D%3D'} + +MRPC_TRAIN = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt' +MRPC_TEST = 'https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt' + +def download_and_extract(task, data_dir): + print("Downloading and extracting %s..." % task) + data_file = "%s.zip" % task + urllib.request.urlretrieve(TASK2PATH[task], data_file) + with zipfile.ZipFile(data_file) as zip_ref: + zip_ref.extractall(data_dir) + os.remove(data_file) + print("\tCompleted!") + +def format_mrpc(data_dir, path_to_data): + print("Processing MRPC...") + mrpc_dir = os.path.join(data_dir, "MRPC") + if not os.path.isdir(mrpc_dir): + os.mkdir(mrpc_dir) + if path_to_data: + mrpc_train_file = os.path.join(path_to_data, "msr_paraphrase_train.txt") + mrpc_test_file = os.path.join(path_to_data, "msr_paraphrase_test.txt") + else: + print("Local MRPC data not specified, downloading data from %s" % MRPC_TRAIN) + mrpc_train_file = os.path.join(mrpc_dir, "msr_paraphrase_train.txt") + mrpc_test_file = os.path.join(mrpc_dir, "msr_paraphrase_test.txt") + urllib.request.urlretrieve(MRPC_TRAIN, mrpc_train_file) + urllib.request.urlretrieve(MRPC_TEST, mrpc_test_file) + assert os.path.isfile(mrpc_train_file), "Train data not found at %s" % mrpc_train_file + assert os.path.isfile(mrpc_test_file), "Test data not found at %s" % mrpc_test_file + urllib.request.urlretrieve(TASK2PATH["MRPC"], os.path.join(mrpc_dir, "dev_ids.tsv")) + + dev_ids = [] + with open(os.path.join(mrpc_dir, "dev_ids.tsv"), encoding="utf8") as ids_fh: + for row in ids_fh: + dev_ids.append(row.strip().split('\t')) + + with open(mrpc_train_file, encoding="utf8") as data_fh, \ + open(os.path.join(mrpc_dir, "train.tsv"), 'w', encoding="utf8") as train_fh, \ + open(os.path.join(mrpc_dir, "dev.tsv"), 'w', encoding="utf8") as dev_fh: + header = data_fh.readline() + train_fh.write(header) + dev_fh.write(header) + for row in data_fh: + label, id1, id2, s1, s2 = row.strip().split('\t') + if [id1, id2] in dev_ids: + dev_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2)) + else: + train_fh.write("%s\t%s\t%s\t%s\t%s\n" % (label, id1, id2, s1, s2)) + + with open(mrpc_test_file, encoding="utf8") as data_fh, \ + open(os.path.join(mrpc_dir, "test.tsv"), 'w', encoding="utf8") as test_fh: + header = data_fh.readline() + test_fh.write("index\t#1 ID\t#2 ID\t#1 String\t#2 String\n") + for idx, row in enumerate(data_fh): + label, id1, id2, s1, s2 = row.strip().split('\t') + test_fh.write("%d\t%s\t%s\t%s\t%s\n" % (idx, id1, id2, s1, s2)) + print("\tCompleted!") + +def download_diagnostic(data_dir): + print("Downloading and extracting diagnostic...") + if not os.path.isdir(os.path.join(data_dir, "diagnostic")): + os.mkdir(os.path.join(data_dir, "diagnostic")) + data_file = os.path.join(data_dir, "diagnostic", "diagnostic.tsv") + urllib.request.urlretrieve(TASK2PATH["diagnostic"], data_file) + print("\tCompleted!") + return + +def get_tasks(task_names): + task_names = task_names.split(',') + if "all" in task_names: + tasks = TASKS + else: + tasks = [] + for task_name in task_names: + assert task_name in TASKS, "Task %s not found!" % task_name + tasks.append(task_name) + return tasks + +def main(arguments): + parser = argparse.ArgumentParser() + parser.add_argument('--data_dir', help='directory to save data to', type=str, default='glue_data') + parser.add_argument('--tasks', help='tasks to download data for as a comma separated string', + type=str, default='all') + parser.add_argument('--path_to_mrpc', help='path to directory containing extracted MRPC data, msr_paraphrase_train.txt and msr_paraphrase_text.txt', + type=str, default='') + args = parser.parse_args(arguments) + + if not os.path.isdir(args.data_dir): + os.mkdir(args.data_dir) + tasks = get_tasks(args.tasks) + + for task in tasks: + if task == 'MRPC': + format_mrpc(args.data_dir, args.path_to_mrpc) + elif task == 'diagnostic': + download_diagnostic(args.data_dir) + else: + download_and_extract(task, args.data_dir) + + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/utils/link_tester.py b/utils/link_tester.py new file mode 100644 index 00000000000..fe3990d28c0 --- /dev/null +++ b/utils/link_tester.py @@ -0,0 +1,79 @@ +""" Link tester. + +This little utility reads all the python files in the repository, +scans for links pointing to S3 and tests the links one by one. Raises an error +at the end of the scan if at least one link was reported broken. +""" +import os +import re +import sys + +import requests + + +REGEXP_FIND_S3_LINKS = r"""([\"'])(https:\/\/s3)(.*)?\1""" + + +def list_python_files_in_repository(): + """ List all python files in the repository. + + This function assumes that the script is executed in the root folder. + """ + source_code_files = [] + for path, subdirs, files in os.walk("."): + if "templates" in path: + continue + for name in files: + if ".py" in name and ".pyc" not in name: + path_to_files = os.path.join(path, name) + source_code_files.append(path_to_files) + + return source_code_files + + +def find_all_links(file_paths): + links = [] + for path in file_paths: + links += scan_code_for_links(path) + + return links + + +def scan_code_for_links(source): + """ Scans the file to find links using a regular expression. + Returns a list of links. + """ + with open(source, 'r') as content: + content = content.read() + raw_links = re.findall(REGEXP_FIND_S3_LINKS, content) + links = [prefix + suffix for _, prefix, suffix in raw_links] + + return links + + +def check_all_links(links): + """ Check that the provided links are valid. + + Links are considered valid if a HEAD request to the server + returns a 200 status code. + """ + broken_links = [] + for link in links: + head = requests.head(link) + if head.status_code != 200: + broken_links.append(link) + + return broken_links + + +if __name__ == "__main__": + file_paths = list_python_files_in_repository() + links = find_all_links(file_paths) + broken_links = check_all_links(links) + print("Looking for broken links to pre-trained models/configs/tokenizers...") + if broken_links: + print("The following links did not respond:") + for link in broken_links: + print("- {}".format(link)) + sys.exit(1) + print("All links are ok.") diff --git a/valohai.yaml b/valohai.yaml new file mode 100644 index 00000000000..2573551b4e2 --- /dev/null +++ b/valohai.yaml @@ -0,0 +1,94 @@ +--- + +- step: + name: Execute python examples/run_glue.py + image: pytorch/pytorch:nightly-devel-cuda10.0-cudnn7 + command: + - python /valohai/repository/utils/download_glue_data.py --data_dir=/glue_data + - pip install -e . + - pip install -r examples/requirements.txt + - python examples/run_glue.py --do_train --data_dir=/glue_data/{parameter-value:task_name} {parameters} + parameters: + - name: model_type + pass-as: --model_type={v} + type: string + default: bert + - name: model_name_or_path + pass-as: --model_name_or_path={v} + type: string + default: bert-base-uncased + - name: task_name + pass-as: --task_name={v} + type: string + default: MRPC + - name: max_seq_length + pass-as: --max_seq_length={v} + description: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. + type: integer + default: 128 + - name: per_gpu_train_batch_size + pass-as: --per_gpu_train_batch_size={v} + description: Batch size per GPU/CPU for training. + type: integer + default: 8 + - name: per_gpu_eval_batch_size + pass-as: --per_gpu_eval_batch_size={v} + description: Batch size per GPU/CPU for evaluation. + type: integer + default: 8 + - name: gradient_accumulation_steps + pass-as: --gradient_accumulation_steps={v} + description: Number of updates steps to accumulate before performing a backward/update pass. + type: integer + default: 1 + - name: learning_rate + pass-as: --learning_rate={v} + description: The initial learning rate for Adam. + type: float + default: 0.00005 + - name: adam_epsilon + pass-as: --adam_epsilon={v} + description: Epsilon for Adam optimizer. + type: float + default: 0.00000001 + - name: max_grad_norm + pass-as: --max_grad_norm={v} + description: Max gradient norm. + type: float + default: 1.0 + - name: num_train_epochs + pass-as: --num_train_epochs={v} + description: Total number of training epochs to perform. + type: integer + default: 3 + - name: max_steps + pass-as: --max_steps={v} + description: If > 0, set total number of training steps to perform. Override num_train_epochs. + type: integer + default: -1 + - name: warmup_steps + pass-as: --warmup_steps={v} + description: Linear warmup over warmup_steps. + type: integer + default: -1 + - name: logging_steps + pass-as: --logging_steps={v} + description: Log every X updates steps. + type: integer + default: 25 + - name: save_steps + pass-as: --save_steps={v} + description: Save checkpoint every X updates steps. + type: integer + default: -1 + - name: output_dir + pass-as: --output_dir={v} + type: string + default: /valohai/outputs + - name: evaluate_during_training + description: Run evaluation during training at each logging step. + type: flag + default: true + - name: do_lower_case + description: Set this flag if you are using an uncased model. + type: flag