From 691176283d81f5927218d81ff027b84097dd2a37 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Thu, 29 Oct 2020 13:38:11 -0400 Subject: [PATCH] Add a template for examples and apply it for mlm and plm examples (#8153) * Add a template for example scripts and apply it to mlm * Formatting * Fix test * Add plm script * Add a template for example scripts and apply it to mlm * Formatting * Fix test * Add plm script * Add a template for example scripts and apply it to mlm * Formatting * Fix test * Add plm script * Styling --- .circleci/config.yml | 6 +- Makefile | 2 +- examples/language-modeling/run_clm.py | 4 +- examples/language-modeling/run_mlm.py | 310 +++++ examples/language-modeling/run_plm.py | 310 +++++ examples/test_examples.py | 18 +- src/transformers/data/data_collator.py | 2 +- .../adding_a_new_example_script/README.md | 29 +- .../cookiecutter.json | 8 + .../adding_a_new_example_script/run_xxx.py | 702 ------------ .../adding_a_new_example_script/utils_xxx.py | 1011 ----------------- .../run_{{cookiecutter.example_shortcut}}.py | 339 ++++++ 12 files changed, 1000 insertions(+), 1741 deletions(-) create mode 100644 examples/language-modeling/run_mlm.py create mode 100644 examples/language-modeling/run_plm.py create mode 100644 templates/adding_a_new_example_script/cookiecutter.json delete mode 100644 templates/adding_a_new_example_script/run_xxx.py delete mode 100644 templates/adding_a_new_example_script/utils_xxx.py create mode 100644 templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 75be5f9e7e8..524e5a940d7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -320,9 +320,9 @@ jobs: key: v0.4-code_quality-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: black --check examples templates tests src utils - - run: isort --check-only examples templates tests src utils - - run: flake8 examples templates tests src utils + - run: black --check examples tests src utils + - run: isort --check-only examples tests src utils + - run: flake8 examples tests src utils - run: python utils/style_doc.py src/transformers docs/source --max_len 119 --check_only - run: python utils/check_copies.py - run: python utils/check_dummies.py diff --git a/Makefile b/Makefile index 86dfafac841..93f47fdbb6c 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ .PHONY: modified_only_fixup extra_quality_checks quality style fixup fix-copies test test-examples docs -check_dirs := examples templates tests src utils +check_dirs := examples tests src utils # get modified files since the branch was made fork_point_sha := $(shell git merge-base --fork-point master) diff --git a/examples/language-modeling/run_clm.py b/examples/language-modeling/run_clm.py index 024427e257a..410dabec205 100644 --- a/examples/language-modeling/run_clm.py +++ b/examples/language-modeling/run_clm.py @@ -18,7 +18,7 @@ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, . Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=causal-lm """ -# You can also adapt this script on your own text classification task. Pointers for this are left as comments. +# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import logging import math @@ -325,7 +325,7 @@ def main(): perplexity = math.exp(eval_output["eval_loss"]) results["perplexity"] = perplexity - output_eval_file = os.path.join(training_args.output_dir, "eval_results_lm.txt") + output_eval_file = os.path.join(training_args.output_dir, "eval_results_clm.txt") if trainer.is_world_process_zero(): with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") diff --git a/examples/language-modeling/run_mlm.py b/examples/language-modeling/run_mlm.py new file mode 100644 index 00000000000..e9a5c847d4f --- /dev/null +++ b/examples/language-modeling/run_mlm.py @@ -0,0 +1,310 @@ +# coding=utf-8 +# Copyright 2020 The HuggingFace Team All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) on a text file or a dataset. + +Here is the full list of checkpoints on the hub that can be fine-tuned by this script: +https://huggingface.co/models?filter=masked-lm +""" +# You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. + +import logging +import math +import os +import sys +from dataclasses import dataclass, field +from typing import Optional + +from datasets import load_dataset + +import transformers +from transformers import ( + CONFIG_MAPPING, + MODEL_FOR_MASKED_LM_MAPPING, + AutoConfig, + AutoModelForMaskedLM, + AutoTokenizer, + DataCollatorForLanguageModeling, + HfArgumentParser, + Trainer, + TrainingArguments, + set_seed, +) +from transformers.trainer_utils import is_main_process + + +logger = logging.getLogger(__name__) +MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) +MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) + + +@dataclass +class ModelArguments: + """ + Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. + """ + + model_name_or_path: Optional[str] = field( + default=None, + metadata={ + "help": "The model checkpoint for weights initialization." + "Don't set if you want to train a model from scratch." + }, + ) + model_type: Optional[str] = field( + default=None, + metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, + ) + config_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} + ) + tokenizer_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} + ) + cache_dir: Optional[str] = field( + default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} + ) + use_fast_tokenizer: bool = field( + default=True, + metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, + ) + + +@dataclass +class DataTrainingArguments: + """ + Arguments pertaining to what data we are going to input our model for training and eval. + """ + + dataset_name: Optional[str] = field( + default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} + ) + dataset_config_name: Optional[str] = field( + default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} + ) + train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) + validation_file: Optional[str] = field( + default=None, + metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, + ) + overwrite_cache: bool = field( + default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} + ) + max_seq_length: Optional[int] = field( + default=None, + metadata={ + "help": "The maximum total input sequence length after tokenization. Sequences longer " + "than this will be truncated." + }, + ) + preprocessing_num_workers: Optional[int] = field( + default=None, + metadata={"help": "The number of processes to use for the preprocessing."}, + ) + mlm_probability: float = field( + default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} + ) + + def __post_init__(self): + if self.dataset_name is None and self.train_file is None and self.validation_file is None: + raise ValueError("Need either a dataset name or a training/validation file.") + else: + if self.train_file is not None: + extension = self.train_file.split(".")[-1] + assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." + if self.validation_file is not None: + extension = self.validation_file.split(".")[-1] + assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." + + +def main(): + # See all possible arguments in src/transformers/training_args.py + # or by passing the --help flag to this script. + # We now keep distinct sets of args, for a cleaner separation of concerns. + + parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) + if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + # If we pass only one argument to the script and it's the path to a json file, + # let's parse it to get our arguments. + model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) + else: + model_args, data_args, training_args = parser.parse_args_into_dataclasses() + + if ( + os.path.exists(training_args.output_dir) + and os.listdir(training_args.output_dir) + and training_args.do_train + and not training_args.overwrite_output_dir + ): + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty." + "Use --overwrite_output_dir to overcome." + ) + + # Setup logging + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN, + ) + + # Log on each process the small summary: + logger.warning( + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" + ) + # Set the verbosity to info of the Transformers logger (on main process only): + if is_main_process(training_args.local_rank): + transformers.utils.logging.set_verbosity_info() + logger.info("Training/evaluation parameters %s", training_args) + + # Set seed before initializing model. + set_seed(training_args.seed) + + # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) + # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ + # (the dataset will be downloaded automatically from the datasets Hub + # + # For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this + # behavior (see below) + # + # In distributed training, the load_dataset function guarantee that only one local process can concurrently + # download the dataset. + if data_args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) + else: + data_files = {} + if data_args.train_file is not None: + data_files["train"] = data_args.train_file + if data_args.validation_file is not None: + data_files["validation"] = data_args.train_file + extension = data_args.train_file.split(".")[-1] + if extension == "txt": + extension = "text" + datasets = load_dataset(extension, data_files=data_files) + # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at + # https://huggingface.co/docs/datasets/loading_datasets.html. + + # Load pretrained model and tokenizer + # + # Distributed training: + # The .from_pretrained methods guarantee that only one local process can concurrently + # download model & vocab. + if model_args.config_name: + config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir) + elif model_args.model_name_or_path: + config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) + else: + config = CONFIG_MAPPING[model_args.model_type]() + logger.warning("You are instantiating a new config instance from scratch.") + + if model_args.tokenizer_name: + tokenizer = AutoTokenizer.from_pretrained( + model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer + ) + elif model_args.model_name_or_path: + tokenizer = AutoTokenizer.from_pretrained( + model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer + ) + else: + raise ValueError( + "You are instantiating a new tokenizer from scratch. This is not supported by this script." + "You can do it from another script, save it, and load it from here, using --tokenizer_name." + ) + + if model_args.model_name_or_path: + model = AutoModelForMaskedLM.from_pretrained( + model_args.model_name_or_path, + from_tf=bool(".ckpt" in model_args.model_name_or_path), + config=config, + cache_dir=model_args.cache_dir, + ) + else: + logger.info("Training new model from scratch") + model = AutoModelForMaskedLM.from_config(config) + + model.resize_token_embeddings(len(tokenizer)) + + # Preprocessing the datasets. + # First we tokenize all the texts. + if training_args.do_train: + column_names = datasets["train"].column_names + else: + column_names = datasets["validation"].column_names + text_column_name = "text" if "text" in column_names else column_names[0] + + def tokenize_function(examples): + # Remove empty lines + examples["text"] = [line for line in examples["text"] if len(line) > 0 and not line.isspace()] + return tokenizer(examples["text"], truncation=True, max_length=data_args.max_seq_length) + + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=[text_column_name], + load_from_cache_file=not data_args.overwrite_cache, + ) + + # Data collator + # This one will take care of randomly masking the tokens. + data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability) + + # Initialize our Trainer + trainer = Trainer( + model=model, + args=training_args, + train_dataset=tokenized_datasets["train"] if training_args.do_train else None, + eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None, + tokenizer=tokenizer, + data_collator=data_collator, + ) + + # Training + if training_args.do_train: + trainer.train( + model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None + ) + trainer.save_model() # Saves the tokenizer too for easy upload + + # Evaluation + results = {} + if training_args.do_eval: + logger.info("*** Evaluate ***") + + eval_output = trainer.evaluate() + + perplexity = math.exp(eval_output["eval_loss"]) + results["perplexity"] = perplexity + + output_eval_file = os.path.join(training_args.output_dir, "eval_results_mlm.txt") + if trainer.is_world_process_zero(): + with open(output_eval_file, "w") as writer: + logger.info("***** Eval results *****") + for key, value in results.items(): + logger.info(f" {key} = {value}") + writer.write(f"{key} = {value}\n") + + return results + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/examples/language-modeling/run_plm.py b/examples/language-modeling/run_plm.py new file mode 100644 index 00000000000..ce9cfb27fd7 --- /dev/null +++ b/examples/language-modeling/run_plm.py @@ -0,0 +1,310 @@ +# coding=utf-8 +# Copyright 2020 The HuggingFace Team All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for permutation language modeling. +""" +# You can also adapt this script on your own permutation language modeling task. Pointers for this are left as comments. + +import logging +import math +import os +import sys +from dataclasses import dataclass, field +from typing import Optional + +from datasets import load_dataset + +import transformers +from transformers import ( + AutoConfig, + AutoTokenizer, + DataCollatorForPermutationLanguageModeling, + HfArgumentParser, + Trainer, + TrainingArguments, + XLNetConfig, + XLNetLMHeadModel, + set_seed, +) +from transformers.trainer_utils import is_main_process + + +logger = logging.getLogger(__name__) + + +@dataclass +class ModelArguments: + """ + Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. + """ + + model_name_or_path: Optional[str] = field( + default=None, + metadata={ + "help": "The model checkpoint for weights initialization." + "Don't set if you want to train a model from scratch." + }, + ) + config_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} + ) + tokenizer_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} + ) + cache_dir: Optional[str] = field( + default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} + ) + use_fast_tokenizer: bool = field( + default=True, + metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, + ) + + +@dataclass +class DataTrainingArguments: + """ + Arguments pertaining to what data we are going to input our model for training and eval. + """ + + dataset_name: Optional[str] = field( + default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} + ) + dataset_config_name: Optional[str] = field( + default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} + ) + train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) + validation_file: Optional[str] = field( + default=None, + metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, + ) + overwrite_cache: bool = field( + default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} + ) + max_seq_length: Optional[int] = field( + default=None, + metadata={ + "help": "The maximum total input sequence length after tokenization. Sequences longer " + "than this will be truncated." + }, + ) + preprocessing_num_workers: Optional[int] = field( + default=None, + metadata={"help": "The number of processes to use for the preprocessing."}, + ) + plm_probability: float = field( + default=1 / 6, + metadata={ + "help": "Ratio of length of a span of masked tokens to surrounding context length for " + "permutation language modeling." + }, + ) + max_span_length: int = field( + default=5, metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} + ) + + def __post_init__(self): + if self.dataset_name is None and self.train_file is None and self.validation_file is None: + raise ValueError("Need either a dataset name or a training/validation file.") + else: + if self.train_file is not None: + extension = self.train_file.split(".")[-1] + assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." + if self.validation_file is not None: + extension = self.validation_file.split(".")[-1] + assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." + + +def main(): + # See all possible arguments in src/transformers/training_args.py + # or by passing the --help flag to this script. + # We now keep distinct sets of args, for a cleaner separation of concerns. + + parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) + if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + # If we pass only one argument to the script and it's the path to a json file, + # let's parse it to get our arguments. + model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) + else: + model_args, data_args, training_args = parser.parse_args_into_dataclasses() + + if ( + os.path.exists(training_args.output_dir) + and os.listdir(training_args.output_dir) + and training_args.do_train + and not training_args.overwrite_output_dir + ): + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty." + "Use --overwrite_output_dir to overcome." + ) + + # Setup logging + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN, + ) + + # Log on each process the small summary: + logger.warning( + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" + ) + # Set the verbosity to info of the Transformers logger (on main process only): + if is_main_process(training_args.local_rank): + transformers.utils.logging.set_verbosity_info() + logger.info("Training/evaluation parameters %s", training_args) + + # Set seed before initializing model. + set_seed(training_args.seed) + + # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) + # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ + # (the dataset will be downloaded automatically from the datasets Hub + # + # For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this + # behavior (see below) + # + # In distributed training, the load_dataset function guarantee that only one local process can concurrently + # download the dataset. + if data_args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) + else: + data_files = {} + if data_args.train_file is not None: + data_files["train"] = data_args.train_file + if data_args.validation_file is not None: + data_files["validation"] = data_args.train_file + extension = data_args.train_file.split(".")[-1] + if extension == "txt": + extension = "text" + datasets = load_dataset(extension, data_files=data_files) + # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at + # https://huggingface.co/docs/datasets/loading_datasets.html. + + # Load pretrained model and tokenizer + # + # Distributed training: + # The .from_pretrained methods guarantee that only one local process can concurrently + # download model & vocab. + if model_args.config_name: + config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir) + elif model_args.model_name_or_path: + config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) + else: + config = XLNetConfig() + logger.warning("You are instantiating a new config instance from scratch.") + + if model_args.tokenizer_name: + tokenizer = AutoTokenizer.from_pretrained( + model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer + ) + elif model_args.model_name_or_path: + tokenizer = AutoTokenizer.from_pretrained( + model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer + ) + else: + raise ValueError( + "You are instantiating a new tokenizer from scratch. This is not supported by this script." + "You can do it from another script, save it, and load it from here, using --tokenizer_name." + ) + + if model_args.model_name_or_path: + model = XLNetLMHeadModel.from_pretrained( + model_args.model_name_or_path, + from_tf=bool(".ckpt" in model_args.model_name_or_path), + config=config, + cache_dir=model_args.cache_dir, + ) + else: + logger.info("Training new model from scratch") + model = XLNetLMHeadModel.from_config(config) + + model.resize_token_embeddings(len(tokenizer)) + + # Preprocessing the datasets. + # First we tokenize all the texts. + if training_args.do_train: + column_names = datasets["train"].column_names + else: + column_names = datasets["validation"].column_names + text_column_name = "text" if "text" in column_names else column_names[0] + + def tokenize_function(examples): + # Remove empty lines + examples["text"] = [line for line in examples["text"] if len(line) > 0 and not line.isspace()] + return tokenizer(examples["text"], truncation=True, max_length=data_args.max_seq_length) + + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=[text_column_name], + load_from_cache_file=not data_args.overwrite_cache, + ) + + # Data collator + data_collator = DataCollatorForPermutationLanguageModeling( + tokenizer=tokenizer, + plm_probability=data_args.plm_probability, + max_span_length=data_args.max_span_length, + ) + + # Initialize our Trainer + trainer = Trainer( + model=model, + args=training_args, + train_dataset=tokenized_datasets["train"] if training_args.do_train else None, + eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None, + tokenizer=tokenizer, + data_collator=data_collator, + ) + + # Training + if training_args.do_train: + trainer.train( + model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None + ) + trainer.save_model() # Saves the tokenizer too for easy upload + + # Evaluation + results = {} + if training_args.do_eval: + logger.info("*** Evaluate ***") + + eval_output = trainer.evaluate() + + perplexity = math.exp(eval_output["eval_loss"]) + results["perplexity"] = perplexity + + output_eval_file = os.path.join(training_args.output_dir, "eval_results_plm.txt") + if trainer.is_world_process_zero(): + with open(output_eval_file, "w") as writer: + logger.info("***** Eval results *****") + for key, value in results.items(): + logger.info(f" {key} = {value}") + writer.write(f"{key} = {value}\n") + + return results + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/examples/test_examples.py b/examples/test_examples.py index 240e7f010ad..4eda398537d 100644 --- a/examples/test_examples.py +++ b/examples/test_examples.py @@ -37,7 +37,7 @@ if SRC_DIRS is not None: import run_clm import run_generation import run_glue - import run_language_modeling + import run_mlm import run_pl_glue import run_squad @@ -160,31 +160,29 @@ class ExamplesTests(TestCasePlus): result = run_clm.main() self.assertLess(result["perplexity"], 100) - def test_run_language_modeling(self): + def test_run_mlm(self): stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" - run_language_modeling.py + run_mlm.py --model_name_or_path distilroberta-base - --model_type roberta - --mlm - --line_by_line - --train_data_file ./tests/fixtures/sample_text.txt - --eval_data_file ./tests/fixtures/sample_text.txt + --train_file ./tests/fixtures/sample_text.txt + --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval + --prediction_loss_only --num_train_epochs=1 - """.split() + """.split() if torch_device != "cuda": testargs.append("--no_cuda") with patch.object(sys, "argv", testargs): - result = run_language_modeling.main() + result = run_mlm.main() self.assertLess(result["perplexity"], 42) def test_run_squad(self): diff --git a/src/transformers/data/data_collator.py b/src/transformers/data/data_collator.py index c0a41ee8382..080aadca2b4 100644 --- a/src/transformers/data/data_collator.py +++ b/src/transformers/data/data_collator.py @@ -520,7 +520,7 @@ class DataCollatorForPermutationLanguageModeling: perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1))) ) & masked_indices[i] - return inputs, perm_mask, target_mapping, labels + return inputs.long(), perm_mask, target_mapping, labels.long() @dataclass diff --git a/templates/adding_a_new_example_script/README.md b/templates/adding_a_new_example_script/README.md index f924c097431..03be9448134 100644 --- a/templates/adding_a_new_example_script/README.md +++ b/templates/adding_a_new_example_script/README.md @@ -1,15 +1,22 @@ -# How to add a new example script in 🤗Transformers +# How to add a new example script in 🤗 Transformers -This folder provide a template for adding a new example script implementing a training or inference task with the models in the 🤗Transformers library. -Add tests! +This folder provide a template for adding a new example script implementing a training or inference task with the +models in the 🤗 Transformers library. To use it, you will need to install cookiecutter: +``` +pip install cookiecutter +``` +or refer to the installation page of the [cookiecutter documentation](https://cookiecutter.readthedocs.io/). +You can then run the following command inside the `examples` folder of the transformers repo: +``` +cookiecutter ../templates/adding_a_new_example_script/ +``` +and answer the questions asked, which will generate a new folder where you will find a pre-filled template for your +example following the best practices we recommend for them. -These folder can be put in a subdirectory under your example's name, like `examples/deebert`. +Adjust the way the data is preprocessed, the model is loaded or the Trainer is instantiated then when you're happy, add +a `README.md` in the folder (or complete the existing one if you added a script to an existing folder) telling a user +how to run your script. - -Best Practices: -- use `Trainer`/`TFTrainer` -- write an @slow test that checks that your model can train on one batch and get a low loss. - - this test should use cuda if it's available. (e.g. by checking `transformers.torch_device`) -- adding an `eval_xxx.py` script that can evaluate a pretrained checkpoint. -- tweet about your new example with a carbon screenshot of how to run it and tag @huggingface +Make a PR to the 🤗 Transformers repo. Don't forget to tweet about your new example with a carbon screenshot of how to +run it and tag @huggingface! diff --git a/templates/adding_a_new_example_script/cookiecutter.json b/templates/adding_a_new_example_script/cookiecutter.json new file mode 100644 index 00000000000..fbd3ca1029b --- /dev/null +++ b/templates/adding_a_new_example_script/cookiecutter.json @@ -0,0 +1,8 @@ +{ + "example_name": "text classification", + "directory_name": "{{cookiecutter.example_name|lower|replace(' ', '-')}}", + "example_shortcut": "{{cookiecutter.directory_name}}", + "model_class": "AutoModel", + "authors": "The HuggingFace Team", + "can_train_from_scratch": ["True", "False"] +} \ No newline at end of file diff --git a/templates/adding_a_new_example_script/run_xxx.py b/templates/adding_a_new_example_script/run_xxx.py deleted file mode 100644 index 77b1b1bafd5..00000000000 --- a/templates/adding_a_new_example_script/run_xxx.py +++ /dev/null @@ -1,702 +0,0 @@ -# coding=utf-8 -# Copyright 2018 XXX. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Finetuning the library models for task XXX.""" - - -import argparse -import glob -import logging -import os -import random - -import numpy as np -import torch -from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset -from torch.utils.data.distributed import DistributedSampler -from tqdm import tqdm, trange - -from transformers import ( - MODEL_FOR_QUESTION_ANSWERING_MAPPING, - WEIGHTS_NAME, - AdamW, - AutoConfig, - AutoModelForQuestionAnswering, - AutoTokenizer, - get_linear_schedule_with_warmup, -) -from utils_squad import ( - RawResult, - RawResultExtended, - convert_examples_to_features, - read_squad_examples, - write_predictions, - write_predictions_extended, -) - -# The following import is the official SQuAD evaluation script (2.0). -# You can remove it from the dependencies if you are using this script outside of the library -# We've added it here for automated tests (see examples/test_examples.py file) -from utils_squad_evaluate import EVAL_OPTS -from utils_squad_evaluate import main as evaluate_on_squad - - -try: - from torch.utils.tensorboard import SummaryWriter -except ImportError: - from tensorboardX import SummaryWriter - - -logger = logging.getLogger(__name__) - -MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) -MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) - - -def set_seed(args): - random.seed(args.seed) - np.random.seed(args.seed) - torch.manual_seed(args.seed) - if args.n_gpu > 0: - torch.cuda.manual_seed_all(args.seed) - - -def to_list(tensor): - return tensor.detach().cpu().tolist() - - -def train(args, train_dataset, model, tokenizer): - """ Train the model """ - if args.local_rank in [-1, 0]: - tb_writer = SummaryWriter() - - args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) - train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) - train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) - - if args.max_steps > 0: - t_total = args.max_steps - args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 - else: - t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs - - # Prepare optimizer and schedule (linear warmup and decay) - no_decay = ["bias", "LayerNorm.weight"] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], - "weight_decay": args.weight_decay, - }, - {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, - ] - optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) - scheduler = get_linear_schedule_with_warmup( - optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total - ) - if args.fp16: - try: - from apex import amp - except ImportError: - raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") - model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) - - # multi-gpu training (should be after apex fp16 initialization) - if args.n_gpu > 1: - model = torch.nn.DataParallel(model) - - # Distributed training (should be after apex fp16 initialization) - if args.local_rank != -1: - model = torch.nn.parallel.DistributedDataParallel( - model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True - ) - - # Train! - logger.info("***** Running training *****") - logger.info(" Num examples = %d", len(train_dataset)) - logger.info(" Num Epochs = %d", args.num_train_epochs) - logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) - logger.info( - " Total train batch size (w. parallel, distributed & accumulation) = %d", - args.train_batch_size - * args.gradient_accumulation_steps - * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), - ) - logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) - logger.info(" Total optimization steps = %d", t_total) - - global_step = 0 - tr_loss, logging_loss = 0.0, 0.0 - model.zero_grad() - train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) - set_seed(args) # Added here for reproductibility - for _ in train_iterator: - epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) - for step, batch in enumerate(epoch_iterator): - model.train() - batch = tuple(t.to(args.device) for t in batch) - inputs = { - "input_ids": batch[0], - "attention_mask": batch[1], - "start_positions": batch[3], - "end_positions": batch[4], - } - if args.model_type != "distilbert": - inputs["token_type_ids"] = None if args.model_type == "xlm" else batch[2] - if args.model_type in ["xlnet", "xlm"]: - inputs.update({"cls_index": batch[5], "p_mask": batch[6]}) - outputs = model(**inputs) - loss = outputs[0] # model outputs are always tuple in transformers (see doc) - - if args.n_gpu > 1: - loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training - if args.gradient_accumulation_steps > 1: - loss = loss / args.gradient_accumulation_steps - - if args.fp16: - with amp.scale_loss(loss, optimizer) as scaled_loss: - scaled_loss.backward() - else: - loss.backward() - - tr_loss += loss.item() - if (step + 1) % args.gradient_accumulation_steps == 0: - if args.fp16: - torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) - else: - torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) - - optimizer.step() - scheduler.step() # Update learning rate schedule - model.zero_grad() - global_step += 1 - - if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: - # Log metrics - if ( - args.local_rank == -1 and args.evaluate_during_training - ): # Only evaluate when single GPU otherwise metrics may not average well - results = evaluate(args, model, tokenizer) - for key, value in results.items(): - tb_writer.add_scalar("eval_{}".format(key), value, global_step) - tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) - tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) - logging_loss = tr_loss - - if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: - # Save model checkpoint - output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) - if not os.path.exists(output_dir): - os.makedirs(output_dir) - model_to_save = ( - model.module if hasattr(model, "module") else model - ) # Take care of distributed/parallel training - model_to_save.save_pretrained(output_dir) - torch.save(args, os.path.join(output_dir, "training_args.bin")) - logger.info("Saving model checkpoint to %s", output_dir) - - if args.max_steps > 0 and global_step > args.max_steps: - epoch_iterator.close() - break - if args.max_steps > 0 and global_step > args.max_steps: - train_iterator.close() - break - - if args.local_rank in [-1, 0]: - tb_writer.close() - - return global_step, tr_loss / global_step - - -def evaluate(args, model, tokenizer, prefix=""): - dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) - - if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: - os.makedirs(args.output_dir) - - args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) - # Note that DistributedSampler samples randomly - eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) - eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) - - # Eval! - logger.info("***** Running evaluation {} *****".format(prefix)) - logger.info(" Num examples = %d", len(dataset)) - logger.info(" Batch size = %d", args.eval_batch_size) - all_results = [] - for batch in tqdm(eval_dataloader, desc="Evaluating"): - model.eval() - batch = tuple(t.to(args.device) for t in batch) - with torch.no_grad(): - inputs = {"input_ids": batch[0], "attention_mask": batch[1]} - if args.model_type != "distilbert": - inputs["token_type_ids"] = None if args.model_type == "xlm" else batch[2] # XLM don't use segment_ids - example_indices = batch[3] - if args.model_type in ["xlnet", "xlm"]: - inputs.update({"cls_index": batch[4], "p_mask": batch[5]}) - outputs = model(**inputs) - - for i, example_index in enumerate(example_indices): - eval_feature = features[example_index.item()] - unique_id = int(eval_feature.unique_id) - if args.model_type in ["xlnet", "xlm"]: - # XLNet uses a more complex post-processing procedure - result = RawResultExtended( - unique_id=unique_id, - start_top_log_probs=to_list(outputs[0][i]), - start_top_index=to_list(outputs[1][i]), - end_top_log_probs=to_list(outputs[2][i]), - end_top_index=to_list(outputs[3][i]), - cls_logits=to_list(outputs[4][i]), - ) - else: - result = RawResult( - unique_id=unique_id, start_logits=to_list(outputs[0][i]), end_logits=to_list(outputs[1][i]) - ) - all_results.append(result) - - # Compute predictions - output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) - output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) - if args.version_2_with_negative: - output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) - else: - output_null_log_odds_file = None - - if args.model_type in ["xlnet", "xlm"]: - # XLNet uses a more complex post-processing procedure - write_predictions_extended( - examples, - features, - all_results, - args.n_best_size, - args.max_answer_length, - output_prediction_file, - output_nbest_file, - output_null_log_odds_file, - args.predict_file, - model.config.start_n_top, - model.config.end_n_top, - args.version_2_with_negative, - tokenizer, - args.verbose_logging, - ) - else: - write_predictions( - examples, - features, - all_results, - args.n_best_size, - args.max_answer_length, - args.do_lower_case, - output_prediction_file, - output_nbest_file, - output_null_log_odds_file, - args.verbose_logging, - args.version_2_with_negative, - args.null_score_diff_threshold, - ) - - # Evaluate with the official SQuAD script - evaluate_options = EVAL_OPTS( - data_file=args.predict_file, pred_file=output_prediction_file, na_prob_file=output_null_log_odds_file - ) - results = evaluate_on_squad(evaluate_options) - return results - - -def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): - if args.local_rank not in [-1, 0] and not evaluate: - torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, - # and the others will use the cache - - # Load data features from cache or dataset file - input_file = args.predict_file if evaluate else args.train_file - cached_features_file = os.path.join( - os.path.dirname(input_file), - "cached_{}_{}_{}".format( - "dev" if evaluate else "train", - list(filter(None, args.model_name_or_path.split("/"))).pop(), - str(args.max_seq_length), - ), - ) - if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples: - logger.info("Loading features from cached file %s", cached_features_file) - features = torch.load(cached_features_file) - else: - logger.info("Creating features from dataset file at %s", input_file) - examples = read_squad_examples( - input_file=input_file, is_training=not evaluate, version_2_with_negative=args.version_2_with_negative - ) - features = convert_examples_to_features( - examples=examples, - tokenizer=tokenizer, - max_seq_length=args.max_seq_length, - doc_stride=args.doc_stride, - max_query_length=args.max_query_length, - is_training=not evaluate, - ) - if args.local_rank in [-1, 0]: - logger.info("Saving features into cached file %s", cached_features_file) - torch.save(features, cached_features_file) - - if args.local_rank == 0 and not evaluate: - torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, - # and the others will use the cache - - # Convert to Tensors and build dataset - all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) - all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) - all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) - all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) - all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) - if evaluate: - all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) - dataset = TensorDataset( - all_input_ids, all_input_mask, all_segment_ids, all_example_index, all_cls_index, all_p_mask - ) - else: - all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) - all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) - dataset = TensorDataset( - all_input_ids, - all_input_mask, - all_segment_ids, - all_start_positions, - all_end_positions, - all_cls_index, - all_p_mask, - ) - - if output_examples: - return dataset, examples, features - return dataset - - -def main(): - parser = argparse.ArgumentParser() - - # Required parameters - parser.add_argument( - "--train_file", default=None, type=str, required=True, help="SQuAD json for training. E.g., train-v1.1.json" - ) - parser.add_argument( - "--predict_file", - default=None, - type=str, - required=True, - help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json", - ) - parser.add_argument( - "--model_type", - default=None, - type=str, - required=True, - help="Model type selected in the list: " + ", ".join(MODEL_TYPES), - ) - parser.add_argument( - "--model_name_or_path", - default=None, - type=str, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models", - ) - parser.add_argument( - "--output_dir", - default=None, - type=str, - required=True, - help="The output directory where the model checkpoints and predictions will be written.", - ) - - # Other parameters - parser.add_argument( - "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" - ) - parser.add_argument( - "--tokenizer_name", - default="", - type=str, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--cache_dir", - default="", - type=str, - help="Where do you want to store the pre-trained models downloaded from s3", - ) - - parser.add_argument( - "--version_2_with_negative", - action="store_true", - help="If true, the SQuAD examples contain some that do not have an answer.", - ) - parser.add_argument( - "--null_score_diff_threshold", - type=float, - default=0.0, - help="If null_score - best_non_null is greater than the threshold predict null.", - ) - - parser.add_argument( - "--max_seq_length", - default=384, - type=int, - help="The maximum total input sequence length after WordPiece tokenization. Sequences " - "longer than this will be truncated, and sequences shorter than this will be padded.", - ) - parser.add_argument( - "--doc_stride", - default=128, - type=int, - help="When splitting up a long document into chunks, how much stride to take between chunks.", - ) - parser.add_argument( - "--max_query_length", - default=64, - type=int, - help="The maximum number of tokens for the question. Questions longer than this will " - "be truncated to this length.", - ) - parser.add_argument("--do_train", action="store_true", help="Whether to run training.") - parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") - parser.add_argument( - "--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step." - ) - parser.add_argument( - "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." - ) - - parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") - parser.add_argument( - "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." - ) - parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") - parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument( - "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." - ) - parser.add_argument( - "--max_steps", - default=-1, - type=int, - help="If > 0: set total number of training steps to perform. Override num_train_epochs.", - ) - parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") - parser.add_argument( - "--n_best_size", - default=20, - type=int, - help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", - ) - parser.add_argument( - "--max_answer_length", - default=30, - type=int, - help="The maximum length of an answer that can be generated. This is needed because the start " - "and end predictions are not conditioned on one another.", - ) - parser.add_argument( - "--verbose_logging", - action="store_true", - help="If true, all of the warnings related to data processing will be printed. " - "A number of warnings are expected for a normal SQuAD evaluation.", - ) - - parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") - parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") - parser.add_argument( - "--eval_all_checkpoints", - action="store_true", - help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", - ) - parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") - parser.add_argument( - "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" - ) - parser.add_argument( - "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" - ) - parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") - - parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") - parser.add_argument( - "--fp16", - action="store_true", - help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", - ) - parser.add_argument( - "--fp16_opt_level", - type=str, - default="O1", - help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." - "See details at https://nvidia.github.io/apex/amp.html", - ) - parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") - parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") - args = parser.parse_args() - - if ( - os.path.exists(args.output_dir) - and os.listdir(args.output_dir) - and args.do_train - and not args.overwrite_output_dir - ): - raise ValueError( - "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( - args.output_dir - ) - ) - - # Setup distant debugging if needed - if args.server_ip and args.server_port: - # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script - import ptvsd - - print("Waiting for debugger attach") - ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) - ptvsd.wait_for_attach() - - # Setup CUDA, GPU & distributed training - if args.local_rank == -1 or args.no_cuda: - device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") - args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() - else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs - torch.cuda.set_device(args.local_rank) - device = torch.device("cuda", args.local_rank) - torch.distributed.init_process_group(backend="nccl") - args.n_gpu = 1 - args.device = device - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, - ) - logger.warning( - "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", - args.local_rank, - device, - args.n_gpu, - bool(args.local_rank != -1), - args.fp16, - ) - - # Set seed - set_seed(args) - - # Load pretrained model and tokenizer - if args.local_rank not in [-1, 0]: - torch.distributed.barrier() # Make sure only the first process in distributed training will - # download model & vocab - - args.model_type = args.model_type.lower() - config = AutoConfig.from_pretrained( - args.config_name if args.config_name else args.model_name_or_path, - cache_dir=args.cache_dir if args.cache_dir else None, - ) - tokenizer = AutoTokenizer.from_pretrained( - args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, - do_lower_case=args.do_lower_case, - cache_dir=args.cache_dir if args.cache_dir else None, - ) - model = AutoModelForQuestionAnswering.from_pretrained( - args.model_name_or_path, - from_tf=bool(".ckpt" in args.model_name_or_path), - config=config, - cache_dir=args.cache_dir if args.cache_dir else None, - ) - - if args.local_rank == 0: - torch.distributed.barrier() # Make sure only the first process in distributed training will - # download model & vocab - - model.to(args.device) - - logger.info("Training/evaluation parameters %s", args) - - # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum - # if args.fp16 is set. Otherwise it'll default to "promote" mode, and we'll get fp32 operations. - # Note that running `--fp16_opt_level="O2"` will remove the need for this code, but it is still valid. - if args.fp16: - try: - import apex - - apex.amp.register_half_function(torch, "einsum") - except ImportError: - raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") - - # Training - if args.do_train: - train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) - global_step, tr_loss = train(args, train_dataset, model, tokenizer) - logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) - - # Save the trained model and the tokenizer - if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): - logger.info("Saving model checkpoint to %s", args.output_dir) - # Save a trained model, configuration and tokenizer using `save_pretrained()`. - # They can then be reloaded using `from_pretrained()` - model_to_save = ( - model.module if hasattr(model, "module") else model - ) # Take care of distributed/parallel training - model_to_save.save_pretrained(args.output_dir) - tokenizer.save_pretrained(args.output_dir) - - # Good practice: save your training arguments together with the trained model - torch.save(args, os.path.join(args.output_dir, "training_args.bin")) - - # Load a trained model and vocabulary that you have fine-tuned - model = AutoModelForQuestionAnswering.from_pretrained(args.output_dir) - tokenizer = AutoTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) - model.to(args.device) - - # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory - results = {} - if args.do_eval and args.local_rank in [-1, 0]: - checkpoints = [args.output_dir] - if args.eval_all_checkpoints: - checkpoints = list( - os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) - ) - - logger.info("Evaluate the following checkpoints: %s", checkpoints) - - for checkpoint in checkpoints: - # Reload the model - global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" - model = AutoModelForQuestionAnswering.from_pretrained(checkpoint) - model.to(args.device) - - # Evaluate - result = evaluate(args, model, tokenizer, prefix=global_step) - - result = dict((k + ("_{}".format(global_step) if global_step else ""), v) for k, v in result.items()) - results.update(result) - - logger.info("Results: {}".format(results)) - - return results - - -if __name__ == "__main__": - main() diff --git a/templates/adding_a_new_example_script/utils_xxx.py b/templates/adding_a_new_example_script/utils_xxx.py deleted file mode 100644 index 653be61c6bd..00000000000 --- a/templates/adding_a_new_example_script/utils_xxx.py +++ /dev/null @@ -1,1011 +0,0 @@ -# coding=utf-8 -# Copyright 2018 XXX. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Load XXX dataset. """ - - -import collections -import json -import logging -import math - -from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize - -# Required by XLNet evaluation method to compute optimal threshold (see write_predictions_extended() method) -from utils_squad_evaluate import find_all_best_thresh_v2, get_raw_scores, make_qid_to_has_ans - - -logger = logging.getLogger(__name__) - - -class SquadExample(object): - """ - A single training/test example for the Squad dataset. - For examples without an answer, the start and end position are -1. - """ - - def __init__( - self, - qas_id, - question_text, - doc_tokens, - orig_answer_text=None, - start_position=None, - end_position=None, - is_impossible=None, - ): - self.qas_id = qas_id - self.question_text = question_text - self.doc_tokens = doc_tokens - self.orig_answer_text = orig_answer_text - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - def __str__(self): - return self.__repr__() - - def __repr__(self): - s = "" - s += "qas_id: %s" % (self.qas_id) - s += ", question_text: %s" % (self.question_text) - s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) - if self.start_position: - s += ", start_position: %d" % (self.start_position) - if self.end_position: - s += ", end_position: %d" % (self.end_position) - if self.is_impossible: - s += ", is_impossible: %r" % (self.is_impossible) - return s - - -class InputFeatures(object): - """A single set of features of data.""" - - def __init__( - self, - unique_id, - example_index, - doc_span_index, - tokens, - token_to_orig_map, - token_is_max_context, - input_ids, - input_mask, - segment_ids, - cls_index, - p_mask, - paragraph_len, - start_position=None, - end_position=None, - is_impossible=None, - ): - self.unique_id = unique_id - self.example_index = example_index - self.doc_span_index = doc_span_index - self.tokens = tokens - self.token_to_orig_map = token_to_orig_map - self.token_is_max_context = token_is_max_context - self.input_ids = input_ids - self.input_mask = input_mask - self.segment_ids = segment_ids - self.cls_index = cls_index - self.p_mask = p_mask - self.paragraph_len = paragraph_len - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - -def read_squad_examples(input_file, is_training, version_2_with_negative): - """Read a SQuAD json file into a list of SquadExample.""" - with open(input_file, "r", encoding="utf-8") as reader: - input_data = json.load(reader)["data"] - - def is_whitespace(c): - if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: - return True - return False - - examples = [] - for entry in input_data: - for paragraph in entry["paragraphs"]: - paragraph_text = paragraph["context"] - doc_tokens = [] - char_to_word_offset = [] - prev_is_whitespace = True - for c in paragraph_text: - if is_whitespace(c): - prev_is_whitespace = True - else: - if prev_is_whitespace: - doc_tokens.append(c) - else: - doc_tokens[-1] += c - prev_is_whitespace = False - char_to_word_offset.append(len(doc_tokens) - 1) - - for qa in paragraph["qas"]: - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - end_position = None - orig_answer_text = None - is_impossible = False - if is_training: - if version_2_with_negative: - is_impossible = qa["is_impossible"] - if (len(qa["answers"]) != 1) and (not is_impossible): - raise ValueError("For training, each question should have exactly 1 answer.") - if not is_impossible: - answer = qa["answers"][0] - orig_answer_text = answer["text"] - answer_offset = answer["answer_start"] - answer_length = len(orig_answer_text) - start_position = char_to_word_offset[answer_offset] - end_position = char_to_word_offset[answer_offset + answer_length - 1] - # Only add answers where the text can be exactly recovered from the - # document. If this CAN'T happen it's likely due to weird Unicode - # stuff so we will just skip the example. - # - # Note that this means for training mode, every example is NOT - # guaranteed to be preserved. - actual_text = " ".join(doc_tokens[start_position : (end_position + 1)]) - cleaned_answer_text = " ".join(whitespace_tokenize(orig_answer_text)) - if actual_text.find(cleaned_answer_text) == -1: - logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) - continue - else: - start_position = -1 - end_position = -1 - orig_answer_text = "" - - example = SquadExample( - qas_id=qas_id, - question_text=question_text, - doc_tokens=doc_tokens, - orig_answer_text=orig_answer_text, - start_position=start_position, - end_position=end_position, - is_impossible=is_impossible, - ) - examples.append(example) - return examples - - -def convert_examples_to_features( - examples, - tokenizer, - max_seq_length, - doc_stride, - max_query_length, - is_training, - cls_token_at_end=False, - cls_token="[CLS]", - sep_token="[SEP]", - pad_token=0, - sequence_a_segment_id=0, - sequence_b_segment_id=1, - cls_token_segment_id=0, - pad_token_segment_id=0, - mask_padding_with_zero=True, -): - """Loads a data file into a list of `InputBatch`s.""" - - unique_id = 1000000000 - # cnt_pos, cnt_neg = 0, 0 - # max_N, max_M = 1024, 1024 - # f = np.zeros((max_N, max_M), dtype=np.float32) - - features = [] - for (example_index, example) in enumerate(examples): - - # if example_index % 100 == 0: - # logger.info('Converting %s/%s pos %s neg %s', example_index, len(examples), cnt_pos, cnt_neg) - - query_tokens = tokenizer.tokenize(example.question_text) - - if len(query_tokens) > max_query_length: - query_tokens = query_tokens[0:max_query_length] - - tok_to_orig_index = [] - orig_to_tok_index = [] - all_doc_tokens = [] - for (i, token) in enumerate(example.doc_tokens): - orig_to_tok_index.append(len(all_doc_tokens)) - sub_tokens = tokenizer.tokenize(token) - for sub_token in sub_tokens: - tok_to_orig_index.append(i) - all_doc_tokens.append(sub_token) - - tok_start_position = None - tok_end_position = None - if is_training and example.is_impossible: - tok_start_position = -1 - tok_end_position = -1 - if is_training and not example.is_impossible: - tok_start_position = orig_to_tok_index[example.start_position] - if example.end_position < len(example.doc_tokens) - 1: - tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 - else: - tok_end_position = len(all_doc_tokens) - 1 - (tok_start_position, tok_end_position) = _improve_answer_span( - all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text - ) - - # The -3 accounts for [CLS], [SEP] and [SEP] - max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 - - # We can have documents that are longer than the maximum sequence length. - # To deal with this we do a sliding window approach, where we take chunks - # of the up to our max length with a stride of `doc_stride`. - _DocSpan = collections.namedtuple("DocSpan", ["start", "length"]) # pylint: disable=invalid-name - doc_spans = [] - start_offset = 0 - while start_offset < len(all_doc_tokens): - length = len(all_doc_tokens) - start_offset - if length > max_tokens_for_doc: - length = max_tokens_for_doc - doc_spans.append(_DocSpan(start=start_offset, length=length)) - if start_offset + length == len(all_doc_tokens): - break - start_offset += min(length, doc_stride) - - for (doc_span_index, doc_span) in enumerate(doc_spans): - tokens = [] - token_to_orig_map = {} - token_is_max_context = {} - segment_ids = [] - - # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) - # Original TF implem also keep the classification token (set to 0) (not sure why...) - p_mask = [] - - # CLS token at the beginning - if not cls_token_at_end: - tokens.append(cls_token) - segment_ids.append(cls_token_segment_id) - p_mask.append(0) - cls_index = 0 - - # Query - for token in query_tokens: - tokens.append(token) - segment_ids.append(sequence_a_segment_id) - p_mask.append(1) - - # SEP token - tokens.append(sep_token) - segment_ids.append(sequence_a_segment_id) - p_mask.append(1) - - # Paragraph - for i in range(doc_span.length): - split_token_index = doc_span.start + i - token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] - - is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) - token_is_max_context[len(tokens)] = is_max_context - tokens.append(all_doc_tokens[split_token_index]) - segment_ids.append(sequence_b_segment_id) - p_mask.append(0) - paragraph_len = doc_span.length - - # SEP token - tokens.append(sep_token) - segment_ids.append(sequence_b_segment_id) - p_mask.append(1) - - # CLS token at the end - if cls_token_at_end: - tokens.append(cls_token) - segment_ids.append(cls_token_segment_id) - p_mask.append(0) - cls_index = len(tokens) - 1 # Index of classification token - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) - - # Zero-pad up to the sequence length. - while len(input_ids) < max_seq_length: - input_ids.append(pad_token) - input_mask.append(0 if mask_padding_with_zero else 1) - segment_ids.append(pad_token_segment_id) - p_mask.append(1) - - assert ( - len(input_ids) == max_seq_length - ), f"Input ids and sequence have mismatched lengths {len(input_ids)} and {max_seq_length}" - assert ( - len(input_mask) == max_seq_length - ), f"Input mask and sequence have mismatched lengths {len(input_mask)} and {max_seq_length}" - assert ( - len(segment_ids) == max_seq_length - ), f"Segment ids and sequence have mismatched lengths {len(segment_ids)} and {max_seq_length}" - - span_is_impossible = example.is_impossible - start_position = None - end_position = None - if is_training and not span_is_impossible: - # For training, if our document chunk does not contain an annotation - # we throw it out, since there is nothing to predict. - doc_start = doc_span.start - doc_end = doc_span.start + doc_span.length - 1 - out_of_span = False - if not (tok_start_position >= doc_start and tok_end_position <= doc_end): - out_of_span = True - if out_of_span: - start_position = 0 - end_position = 0 - span_is_impossible = True - else: - doc_offset = len(query_tokens) + 2 - start_position = tok_start_position - doc_start + doc_offset - end_position = tok_end_position - doc_start + doc_offset - - if is_training and span_is_impossible: - start_position = cls_index - end_position = cls_index - - if example_index < 20: - logger.info("*** Example ***") - logger.info("unique_id: %s" % (unique_id)) - logger.info("example_index: %s" % (example_index)) - logger.info("doc_span_index: %s" % (doc_span_index)) - logger.info("tokens: %s" % " ".join(tokens)) - logger.info( - "token_to_orig_map: %s" % " ".join(["%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]) - ) - logger.info( - "token_is_max_context: %s" - % " ".join(["%d:%s" % (x, y) for (x, y) in token_is_max_context.items()]) - ) - logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) - logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) - logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) - if is_training and span_is_impossible: - logger.info("impossible example") - if is_training and not span_is_impossible: - answer_text = " ".join(tokens[start_position : (end_position + 1)]) - logger.info("start_position: %d" % (start_position)) - logger.info("end_position: %d" % (end_position)) - logger.info("answer: %s" % (answer_text)) - - features.append( - InputFeatures( - unique_id=unique_id, - example_index=example_index, - doc_span_index=doc_span_index, - tokens=tokens, - token_to_orig_map=token_to_orig_map, - token_is_max_context=token_is_max_context, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - cls_index=cls_index, - p_mask=p_mask, - paragraph_len=paragraph_len, - start_position=start_position, - end_position=end_position, - is_impossible=span_is_impossible, - ) - ) - unique_id += 1 - - return features - - -def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): - """Returns tokenized answer spans that better match the annotated answer.""" - - # The SQuAD annotations are character based. We first project them to - # whitespace-tokenized words. But then after WordPiece tokenization, we can - # often find a "better match". For example: - # - # Question: What year was John Smith born? - # Context: The leader was John Smith (1895-1943). - # Answer: 1895 - # - # The original whitespace-tokenized answer will be "(1895-1943).". However - # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match - # the exact answer, 1895. - # - # However, this is not always possible. Consider the following: - # - # Question: What country is the top exporter of electronics? - # Context: The Japanese electronics industry is the largest in the world. - # Answer: Japan - # - # In this case, the annotator chose "Japan" as a character sub-span of - # the word "Japanese". Since our WordPiece tokenizer does not split - # "Japanese", we just use "Japanese" as the annotation. This is fairly rare - # in SQuAD, but does happen. - tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) - - for new_start in range(input_start, input_end + 1): - for new_end in range(input_end, new_start - 1, -1): - text_span = " ".join(doc_tokens[new_start : (new_end + 1)]) - if text_span == tok_answer_text: - return (new_start, new_end) - - return (input_start, input_end) - - -def _check_is_max_context(doc_spans, cur_span_index, position): - """Check if this is the 'max context' doc span for the token.""" - - # Because of the sliding window approach taken to scoring documents, a single - # token can appear in multiple documents. E.g. - # Doc: the man went to the store and bought a gallon of milk - # Span A: the man went to the - # Span B: to the store and bought - # Span C: and bought a gallon of - # ... - # - # Now the word 'bought' will have two scores from spans B and C. We only - # want to consider the score with "maximum context", which we define as - # the *minimum* of its left and right context (the *sum* of left and - # right context will always be the same, of course). - # - # In the example the maximum context for 'bought' would be span C since - # it has 1 left context and 3 right context, while span B has 4 left context - # and 0 right context. - best_score = None - best_span_index = None - for (span_index, doc_span) in enumerate(doc_spans): - end = doc_span.start + doc_span.length - 1 - if position < doc_span.start: - continue - if position > end: - continue - num_left_context = position - doc_span.start - num_right_context = end - position - score = min(num_left_context, num_right_context) + 0.01 * doc_span.length - if best_score is None or score > best_score: - best_score = score - best_span_index = span_index - - return cur_span_index == best_span_index - - -RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) - - -def write_predictions( - all_examples, - all_features, - all_results, - n_best_size, - max_answer_length, - do_lower_case, - output_prediction_file, - output_nbest_file, - output_null_log_odds_file, - verbose_logging, - version_2_with_negative, - null_score_diff_threshold, -): - """Write final predictions to the json file and log-odds of null if needed.""" - logger.info("Writing predictions to: %s" % (output_prediction_file)) - logger.info("Writing nbest to: %s" % (output_nbest_file)) - - example_index_to_features = collections.defaultdict(list) - for feature in all_features: - example_index_to_features[feature.example_index].append(feature) - - unique_id_to_result = {} - for result in all_results: - unique_id_to_result[result.unique_id] = result - - _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name - "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"] - ) - - all_predictions = collections.OrderedDict() - all_nbest_json = collections.OrderedDict() - scores_diff_json = collections.OrderedDict() - - for (example_index, example) in enumerate(all_examples): - features = example_index_to_features[example_index] - - prelim_predictions = [] - # keep track of the minimum score of null start+end of position 0 - score_null = 1000000 # large and positive - min_null_feature_index = 0 # the paragraph slice with min null score - null_start_logit = 0 # the start logit at the slice with min null score - null_end_logit = 0 # the end logit at the slice with min null score - for (feature_index, feature) in enumerate(features): - result = unique_id_to_result[feature.unique_id] - start_indexes = _get_best_indexes(result.start_logits, n_best_size) - end_indexes = _get_best_indexes(result.end_logits, n_best_size) - # if we could have irrelevant answers, get the min score of irrelevant - if version_2_with_negative: - feature_null_score = result.start_logits[0] + result.end_logits[0] - if feature_null_score < score_null: - score_null = feature_null_score - min_null_feature_index = feature_index - null_start_logit = result.start_logits[0] - null_end_logit = result.end_logits[0] - for start_index in start_indexes: - for end_index in end_indexes: - # We could hypothetically create invalid predictions, e.g., predict - # that the start of the span is in the question. We throw out all - # invalid predictions. - if start_index >= len(feature.tokens): - continue - if end_index >= len(feature.tokens): - continue - if start_index not in feature.token_to_orig_map: - continue - if end_index not in feature.token_to_orig_map: - continue - if not feature.token_is_max_context.get(start_index, False): - continue - if end_index < start_index: - continue - length = end_index - start_index + 1 - if length > max_answer_length: - continue - prelim_predictions.append( - _PrelimPrediction( - feature_index=feature_index, - start_index=start_index, - end_index=end_index, - start_logit=result.start_logits[start_index], - end_logit=result.end_logits[end_index], - ) - ) - if version_2_with_negative: - prelim_predictions.append( - _PrelimPrediction( - feature_index=min_null_feature_index, - start_index=0, - end_index=0, - start_logit=null_start_logit, - end_logit=null_end_logit, - ) - ) - prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) - - _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name - "NbestPrediction", ["text", "start_logit", "end_logit"] - ) - - seen_predictions = {} - nbest = [] - for pred in prelim_predictions: - if len(nbest) >= n_best_size: - break - feature = features[pred.feature_index] - if pred.start_index > 0: # this is a non-null prediction - tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)] - orig_doc_start = feature.token_to_orig_map[pred.start_index] - orig_doc_end = feature.token_to_orig_map[pred.end_index] - orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)] - tok_text = " ".join(tok_tokens) - - # De-tokenize WordPieces that have been split off. - tok_text = tok_text.replace(" ##", "") - tok_text = tok_text.replace("##", "") - - # Clean whitespace - tok_text = tok_text.strip() - tok_text = " ".join(tok_text.split()) - orig_text = " ".join(orig_tokens) - - final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) - if final_text in seen_predictions: - continue - - seen_predictions[final_text] = True - else: - final_text = "" - seen_predictions[final_text] = True - - nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) - # if we didn't include the empty option in the n-best, include it - if version_2_with_negative: - if "" not in seen_predictions: - nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit)) - - # In very rare edge cases we could only have single null prediction. - # So we just create a nonce prediction in this case to avoid failure. - if len(nbest) == 1: - nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) - - # In very rare edge cases we could have no valid predictions. So we - # just create a nonce prediction in this case to avoid failure. - if not nbest: - nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) - - assert len(nbest) >= 1, "No valid predictions" - - total_scores = [] - best_non_null_entry = None - for entry in nbest: - total_scores.append(entry.start_logit + entry.end_logit) - if not best_non_null_entry: - if entry.text: - best_non_null_entry = entry - - probs = _compute_softmax(total_scores) - - nbest_json = [] - for (i, entry) in enumerate(nbest): - output = collections.OrderedDict() - output["text"] = entry.text - output["probability"] = probs[i] - output["start_logit"] = entry.start_logit - output["end_logit"] = entry.end_logit - nbest_json.append(output) - - assert len(nbest_json) >= 1, "No valid predictions" - - if not version_2_with_negative: - all_predictions[example.qas_id] = nbest_json[0]["text"] - else: - # predict "" iff the null score - the score of best non-null > threshold - score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit) - scores_diff_json[example.qas_id] = score_diff - if score_diff > null_score_diff_threshold: - all_predictions[example.qas_id] = "" - else: - all_predictions[example.qas_id] = best_non_null_entry.text - all_nbest_json[example.qas_id] = nbest_json - - with open(output_prediction_file, "w") as writer: - writer.write(json.dumps(all_predictions, indent=4) + "\n") - - with open(output_nbest_file, "w") as writer: - writer.write(json.dumps(all_nbest_json, indent=4) + "\n") - - if version_2_with_negative: - with open(output_null_log_odds_file, "w") as writer: - writer.write(json.dumps(scores_diff_json, indent=4) + "\n") - - return all_predictions - - -# For XLNet (and XLM which uses the same head) -RawResultExtended = collections.namedtuple( - "RawResultExtended", - ["unique_id", "start_top_log_probs", "start_top_index", "end_top_log_probs", "end_top_index", "cls_logits"], -) - - -def write_predictions_extended( - all_examples, - all_features, - all_results, - n_best_size, - max_answer_length, - output_prediction_file, - output_nbest_file, - output_null_log_odds_file, - orig_data_file, - start_n_top, - end_n_top, - version_2_with_negative, - tokenizer, - verbose_logging, -): - """XLNet write prediction logic (more complex than Bert's). - Write final predictions to the json file and log-odds of null if needed. - - Requires utils_squad_evaluate.py - """ - _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name - "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"] - ) - - _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name - "NbestPrediction", ["text", "start_log_prob", "end_log_prob"] - ) - - logger.info("Writing predictions to: %s", output_prediction_file) - # logger.info("Writing nbest to: %s" % (output_nbest_file)) - - example_index_to_features = collections.defaultdict(list) - for feature in all_features: - example_index_to_features[feature.example_index].append(feature) - - unique_id_to_result = {} - for result in all_results: - unique_id_to_result[result.unique_id] = result - - all_predictions = collections.OrderedDict() - all_nbest_json = collections.OrderedDict() - scores_diff_json = collections.OrderedDict() - - for (example_index, example) in enumerate(all_examples): - features = example_index_to_features[example_index] - - prelim_predictions = [] - # keep track of the minimum score of null start+end of position 0 - score_null = 1000000 # large and positive - - for (feature_index, feature) in enumerate(features): - result = unique_id_to_result[feature.unique_id] - - cur_null_score = result.cls_logits - - # if we could have irrelevant answers, get the min score of irrelevant - score_null = min(score_null, cur_null_score) - - for i in range(start_n_top): - for j in range(end_n_top): - start_log_prob = result.start_top_log_probs[i] - start_index = result.start_top_index[i] - - j_index = i * end_n_top + j - - end_log_prob = result.end_top_log_probs[j_index] - end_index = result.end_top_index[j_index] - - # We could hypothetically create invalid predictions, e.g., predict - # that the start of the span is in the question. We throw out all - # invalid predictions. - if start_index >= feature.paragraph_len - 1: - continue - if end_index >= feature.paragraph_len - 1: - continue - - if not feature.token_is_max_context.get(start_index, False): - continue - if end_index < start_index: - continue - length = end_index - start_index + 1 - if length > max_answer_length: - continue - - prelim_predictions.append( - _PrelimPrediction( - feature_index=feature_index, - start_index=start_index, - end_index=end_index, - start_log_prob=start_log_prob, - end_log_prob=end_log_prob, - ) - ) - - prelim_predictions = sorted( - prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True - ) - - seen_predictions = {} - nbest = [] - for pred in prelim_predictions: - if len(nbest) >= n_best_size: - break - feature = features[pred.feature_index] - - # XLNet un-tokenizer - # Let's keep it simple for now and see if we need all this later. - # - # tok_start_to_orig_index = feature.tok_start_to_orig_index - # tok_end_to_orig_index = feature.tok_end_to_orig_index - # start_orig_pos = tok_start_to_orig_index[pred.start_index] - # end_orig_pos = tok_end_to_orig_index[pred.end_index] - # paragraph_text = example.paragraph_text - # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip() - - # Previously used Bert untokenizer - tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)] - orig_doc_start = feature.token_to_orig_map[pred.start_index] - orig_doc_end = feature.token_to_orig_map[pred.end_index] - orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)] - tok_text = tokenizer.convert_tokens_to_string(tok_tokens) - - # Clean whitespace - tok_text = tok_text.strip() - tok_text = " ".join(tok_text.split()) - orig_text = " ".join(orig_tokens) - - final_text = get_final_text(tok_text, orig_text, tokenizer.do_lower_case, verbose_logging) - - if final_text in seen_predictions: - continue - - seen_predictions[final_text] = True - - nbest.append( - _NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob) - ) - - # In very rare edge cases we could have no valid predictions. So we - # just create a nonce prediction in this case to avoid failure. - if not nbest: - nbest.append(_NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6)) - - total_scores = [] - best_non_null_entry = None - for entry in nbest: - total_scores.append(entry.start_log_prob + entry.end_log_prob) - if not best_non_null_entry: - best_non_null_entry = entry - - probs = _compute_softmax(total_scores) - - nbest_json = [] - for (i, entry) in enumerate(nbest): - output = collections.OrderedDict() - output["text"] = entry.text - output["probability"] = probs[i] - output["start_log_prob"] = entry.start_log_prob - output["end_log_prob"] = entry.end_log_prob - nbest_json.append(output) - - assert len(nbest_json) >= 1, "No valid predictions" - assert best_non_null_entry is not None, "No valid predictions" - - score_diff = score_null - scores_diff_json[example.qas_id] = score_diff - # note(zhiliny): always predict best_non_null_entry - # and the evaluation script will search for the best threshold - all_predictions[example.qas_id] = best_non_null_entry.text - - all_nbest_json[example.qas_id] = nbest_json - - with open(output_prediction_file, "w") as writer: - writer.write(json.dumps(all_predictions, indent=4) + "\n") - - with open(output_nbest_file, "w") as writer: - writer.write(json.dumps(all_nbest_json, indent=4) + "\n") - - if version_2_with_negative: - with open(output_null_log_odds_file, "w") as writer: - writer.write(json.dumps(scores_diff_json, indent=4) + "\n") - - with open(orig_data_file, "r", encoding="utf-8") as reader: - orig_data = json.load(reader)["data"] - - qid_to_has_ans = make_qid_to_has_ans(orig_data) - exact_raw, f1_raw = get_raw_scores(orig_data, all_predictions) - out_eval = {} - - find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw, scores_diff_json, qid_to_has_ans) - - return out_eval - - -def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): - """Project the tokenized prediction back to the original text.""" - - # When we created the data, we kept track of the alignment between original - # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So - # now `orig_text` contains the span of our original text corresponding to the - # span that we predicted. - # - # However, `orig_text` may contain extra characters that we don't want in - # our prediction. - # - # For example, let's say: - # pred_text = steve smith - # orig_text = Steve Smith's - # - # We don't want to return `orig_text` because it contains the extra "'s". - # - # We don't want to return `pred_text` because it's already been normalized - # (the SQuAD eval script also does punctuation stripping/lower casing but - # our tokenizer does additional normalization like stripping accent - # characters). - # - # What we really want to return is "Steve Smith". - # - # Therefore, we have to apply a semi-complicated alignment heuristic between - # `pred_text` and `orig_text` to get a character-to-character alignment. This - # can fail in certain cases in which case we just return `orig_text`. - - def _strip_spaces(text): - ns_chars = [] - ns_to_s_map = collections.OrderedDict() - for (i, c) in enumerate(text): - if c == " ": - continue - ns_to_s_map[len(ns_chars)] = i - ns_chars.append(c) - ns_text = "".join(ns_chars) - return (ns_text, ns_to_s_map) - - # We first tokenize `orig_text`, strip whitespace from the result - # and `pred_text`, and check if they are the same length. If they are - # NOT the same length, the heuristic has failed. If they are the same - # length, we assume the characters are one-to-one aligned. - tokenizer = BasicTokenizer(do_lower_case=do_lower_case) - - tok_text = " ".join(tokenizer.tokenize(orig_text)) - - start_position = tok_text.find(pred_text) - if start_position == -1: - if verbose_logging: - logger.info("Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) - return orig_text - end_position = start_position + len(pred_text) - 1 - - (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) - (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) - - if len(orig_ns_text) != len(tok_ns_text): - if verbose_logging: - logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) - return orig_text - - # We then project the characters in `pred_text` back to `orig_text` using - # the character-to-character alignment. - tok_s_to_ns_map = {} - for (i, tok_index) in tok_ns_to_s_map.items(): - tok_s_to_ns_map[tok_index] = i - - orig_start_position = None - if start_position in tok_s_to_ns_map: - ns_start_position = tok_s_to_ns_map[start_position] - if ns_start_position in orig_ns_to_s_map: - orig_start_position = orig_ns_to_s_map[ns_start_position] - - if orig_start_position is None: - if verbose_logging: - logger.info("Couldn't map start position") - return orig_text - - orig_end_position = None - if end_position in tok_s_to_ns_map: - ns_end_position = tok_s_to_ns_map[end_position] - if ns_end_position in orig_ns_to_s_map: - orig_end_position = orig_ns_to_s_map[ns_end_position] - - if orig_end_position is None: - if verbose_logging: - logger.info("Couldn't map end position") - return orig_text - - output_text = orig_text[orig_start_position : (orig_end_position + 1)] - return output_text - - -def _get_best_indexes(logits, n_best_size): - """Get the n-best logits from a list.""" - index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) - - best_indexes = [] - for i in range(len(index_and_score)): - if i >= n_best_size: - break - best_indexes.append(index_and_score[i][0]) - return best_indexes - - -def _compute_softmax(scores): - """Compute softmax probability over raw logits.""" - if not scores: - return [] - - max_score = None - for score in scores: - if max_score is None or score > max_score: - max_score = score - - exp_scores = [] - total_sum = 0.0 - for score in scores: - x = math.exp(score - max_score) - exp_scores.append(x) - total_sum += x - - probs = [] - for score in exp_scores: - probs.append(score / total_sum) - return probs diff --git a/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py b/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py new file mode 100644 index 00000000000..7fc252e84d0 --- /dev/null +++ b/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py @@ -0,0 +1,339 @@ +# coding=utf-8 +# Copyright 2020 {{cookiecutter.authors}} All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for {{cookiecutter.example_name}}. +""" +# You can also adapt this script on your own {{cookiecutter.example_name}} task. Pointers for this are left as comments. + +import logging +import math +import os +import sys +from dataclasses import dataclass, field +from typing import Optional + +from datasets import load_dataset + +import transformers +from transformers import ( + CONFIG_MAPPING, + MODEL_MAPPING, + AutoConfig, + {{cookiecutter.model_class}}, + AutoTokenizer, + HfArgumentParser, + Trainer, + TrainingArguments, + default_data_collator, + set_seed, +) +from transformers.trainer_utils import is_main_process + + +logger = logging.getLogger(__name__) + + +{%- if cookiecutter.can_train_from_scratch == "True" %} +# You should update this to your particular problem to have better documentation of `model_type` +MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) +MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) + + +@dataclass +class ModelArguments: + """ + Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. + """ + + model_name_or_path: Optional[str] = field( + default=None, + metadata={ + "help": "The model checkpoint for weights initialization." + "Don't set if you want to train a model from scratch." + }, + ) + model_type: Optional[str] = field( + default=None, + metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, + ) + config_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} + ) + tokenizer_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} + ) + cache_dir: Optional[str] = field( + default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} + ) + use_fast_tokenizer: bool = field( + default=True, + metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, + ) +{%- elif cookiecutter.can_train_from_scratch == "False" %} +@dataclass +class ModelArguments: + """ + Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. + """ + + model_name_or_path: str = field( + metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} + ) + config_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} + ) + tokenizer_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} + ) + cache_dir: Optional[str] = field( + default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} + ) + use_fast_tokenizer: bool = field( + default=True, + metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, + ) +{% endif %} + + +@dataclass +class DataTrainingArguments: + """ + Arguments pertaining to what data we are going to input our model for training and eval. + """ + + dataset_name: Optional[str] = field( + default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} + ) + dataset_config_name: Optional[str] = field( + default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} + ) + train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) + validation_file: Optional[str] = field( + default=None, + metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, + ) + overwrite_cache: bool = field( + default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} + ) + preprocessing_num_workers: Optional[int] = field( + default=None, + metadata={"help": "The number of processes to use for the preprocessing."}, + ) + + def __post_init__(self): + if self.dataset_name is None and self.train_file is None and self.validation_file is None: + raise ValueError("Need either a dataset name or a training/validation file.") + else: + if self.train_file is not None: + extension = self.train_file.split(".")[-1] + assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." + if self.validation_file is not None: + extension = self.validation_file.split(".")[-1] + assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." + + +def main(): + # See all possible arguments in src/transformers/training_args.py + # or by passing the --help flag to this script. + # We now keep distinct sets of args, for a cleaner separation of concerns. + + parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) + if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + # If we pass only one argument to the script and it's the path to a json file, + # let's parse it to get our arguments. + model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) + else: + model_args, data_args, training_args = parser.parse_args_into_dataclasses() + + if ( + os.path.exists(training_args.output_dir) + and os.listdir(training_args.output_dir) + and training_args.do_train + and not training_args.overwrite_output_dir + ): + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty." + "Use --overwrite_output_dir to overcome." + ) + + # Setup logging + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN, + ) + + # Log on each process the small summary: + logger.warning( + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" + ) + # Set the verbosity to info of the Transformers logger (on main process only): + if is_main_process(training_args.local_rank): + transformers.utils.logging.set_verbosity_info() + logger.info("Training/evaluation parameters %s", training_args) + + # Set seed before initializing model. + set_seed(training_args.seed) + + # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) + # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ + # (the dataset will be downloaded automatically from the datasets Hub + # + # For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this + # behavior (see below) + # + # In distributed training, the load_dataset function guarantee that only one local process can concurrently + # download the dataset. + if data_args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) + else: + data_files = {} + if data_args.train_file is not None: + data_files["train"] = data_args.train_file + if data_args.validation_file is not None: + data_files["validation"] = data_args.train_file + extension = data_args.train_file.split(".")[-1] + if extension == "txt": + extension = "text" + datasets = load_dataset(extension, data_files=data_files) + # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at + # https://huggingface.co/docs/datasets/loading_datasets.html. + + # Load pretrained model and tokenizer + # + # Distributed training: + # The .from_pretrained methods guarantee that only one local process can concurrently + # download model & vocab. +{%- if cookiecutter.can_train_from_scratch == "True" %} + if model_args.config_name: + config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir) + elif model_args.model_name_or_path: + config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) + else: + config = CONFIG_MAPPING[model_args.model_type]() + logger.warning("You are instantiating a new config instance from scratch.") + + if model_args.tokenizer_name: + tokenizer = AutoTokenizer.from_pretrained( + model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer + ) + elif model_args.model_name_or_path: + tokenizer = AutoTokenizer.from_pretrained( + model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer + ) + else: + raise ValueError( + "You are instantiating a new tokenizer from scratch. This is not supported by this script." + "You can do it from another script, save it, and load it from here, using --tokenizer_name." + ) + + if model_args.model_name_or_path: + model = {{cookiecutter.model_class}}.from_pretrained( + model_args.model_name_or_path, + from_tf=bool(".ckpt" in model_args.model_name_or_path), + config=config, + cache_dir=model_args.cache_dir, + ) + else: + logger.info("Training new model from scratch") + model = {{cookiecutter.model_class}}.from_config(config) + + model.resize_token_embeddings(len(tokenizer)) +{%- elif cookiecutter.can_train_from_scratch == "False" %} + config = AutoConfig.from_pretrained( + model_args.config_name if model_args.config_name else model_args.model_name_or_path, + num_labels=num_labels, + finetuning_task=data_args.task_name, + cache_dir=model_args.cache_dir, + ) + tokenizer = AutoTokenizer.from_pretrained( + model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, + cache_dir=model_args.cache_dir, + use_fast=model_args.use_fast_tokenizer, + ) + model = AutoModelForSequenceClassification.from_pretrained( + model_args.model_name_or_path, + from_tf=bool(".ckpt" in model_args.model_name_or_path), + config=config, + cache_dir=model_args.cache_dir, + ) +{% endif %} + + # Preprocessing the datasets. + # First we tokenize all the texts. + if training_args.do_train: + column_names = datasets["train"].column_names + else: + column_names = datasets["validation"].column_names + text_column_name = "text" if "text" in column_names else column_names[0] + + def tokenize_function(examples): + return tokenizer(examples[text_column_name], padding="max_length", truncation=True) + + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=[text_column_name], + load_from_cache_file=not data_args.overwrite_cache, + ) + + # Data collator + data_collator=default_data_collator + + # Initialize our Trainer + trainer = Trainer( + model=model, + args=training_args, + train_dataset=tokenized_datasets["train"] if training_args.do_train else None, + eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None, + tokenizer=tokenizer, + data_collator=data_collator, + ) + + # Training + if training_args.do_train: + trainer.train( + model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None + ) + trainer.save_model() # Saves the tokenizer too for easy upload + + # Evaluation + results = {} + if training_args.do_eval: + logger.info("*** Evaluate ***") + + results = trainer.evaluate() + + output_eval_file = os.path.join(training_args.output_dir, "eval_results_{{cookiecutter.example_shortcut}}.txt") + if trainer.is_world_process_zero(): + with open(output_eval_file, "w") as writer: + logger.info("***** Eval results *****") + for key, value in results.items(): + logger.info(f" {key} = {value}") + writer.write(f"{key} = {value}\n") + + return results + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main()