mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
New TF GLUE example (#12028)
* Pushing partially-complete new GLUE example * First draft of the new TF GLUE example! Needs a little more testing to be sure but it's almost ready. * Fix to the fit() call * Bugfixes, making sure TPU and multi-GPU support is ready * Remove logger line that depends on Pytorch * Style pass * Deleting old TF GLUE example * Include label2id and id2label in the saved model config * Don't clobber the existing model.config.label2id * Style fixes * Update examples/tensorflow/text-classification/run_glue.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
This commit is contained in:
parent
9d2cee8b48
commit
73a532651a
557
examples/tensorflow/text-classification/run_glue.py
Normal file
557
examples/tensorflow/text-classification/run_glue.py
Normal file
@ -0,0 +1,557 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" Finetuning the library models for sequence classification on GLUE."""
|
||||
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from datasets import load_dataset, load_metric
|
||||
|
||||
import transformers
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
AutoTokenizer,
|
||||
HfArgumentParser,
|
||||
PretrainedConfig,
|
||||
TFAutoModelForSequenceClassification,
|
||||
TFTrainingArguments,
|
||||
set_seed,
|
||||
)
|
||||
from transformers.trainer_utils import get_last_checkpoint, is_main_process
|
||||
from transformers.utils import check_min_version
|
||||
|
||||
|
||||
# region Helper functions
|
||||
|
||||
|
||||
def convert_dataset_for_tensorflow(
|
||||
dataset, non_label_column_names, batch_size, dataset_mode="variable_batch", shuffle=True, drop_remainder=True
|
||||
):
|
||||
"""Converts a Hugging Face dataset to a Tensorflow Dataset. The dataset_mode controls whether we pad all batches
|
||||
to the maximum sequence length, or whether we only pad to the maximum length within that batch. The former
|
||||
is most useful when training on TPU, as a new graph compilation is required for each sequence length.
|
||||
"""
|
||||
|
||||
def densify_ragged_batch(features, label=None):
|
||||
features = {
|
||||
feature: ragged_tensor.to_tensor(shape=batch_shape[feature]) for feature, ragged_tensor in features.items()
|
||||
}
|
||||
if label is None:
|
||||
return features
|
||||
else:
|
||||
return features, label
|
||||
|
||||
feature_keys = list(set(dataset.features.keys()) - set(non_label_column_names + ["label"]))
|
||||
if dataset_mode == "variable_batch":
|
||||
batch_shape = {key: None for key in feature_keys}
|
||||
data = {key: tf.ragged.constant(dataset[key]) for key in feature_keys}
|
||||
elif dataset_mode == "constant_batch":
|
||||
data = {key: tf.ragged.constant(dataset[key]) for key in feature_keys}
|
||||
batch_shape = {
|
||||
key: tf.concat(([batch_size], ragged_tensor.bounding_shape()[1:]), axis=0)
|
||||
for key, ragged_tensor in data.items()
|
||||
}
|
||||
else:
|
||||
raise ValueError("Unknown dataset mode!")
|
||||
|
||||
if "label" in dataset.features:
|
||||
labels = tf.convert_to_tensor(np.array(dataset["label"]))
|
||||
tf_dataset = tf.data.Dataset.from_tensor_slices((data, labels))
|
||||
else:
|
||||
tf_dataset = tf.data.Dataset.from_tensor_slices(data)
|
||||
if shuffle:
|
||||
tf_dataset = tf_dataset.shuffle(buffer_size=len(dataset))
|
||||
tf_dataset = tf_dataset.batch(batch_size=batch_size, drop_remainder=drop_remainder).map(densify_ragged_batch)
|
||||
return tf_dataset
|
||||
|
||||
|
||||
class SavePretrainedCallback(tf.keras.callbacks.Callback):
|
||||
# Hugging Face models have a save_pretrained() method that saves both the weights and the necessary
|
||||
# metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback
|
||||
# that saves the model with this method after each epoch.
|
||||
def __init__(self, output_dir, **kwargs):
|
||||
super().__init__()
|
||||
self.output_dir = output_dir
|
||||
|
||||
def on_epoch_end(self, epoch, logs=None):
|
||||
self.model.save_pretrained(self.output_dir)
|
||||
|
||||
|
||||
# endregion
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.6.0.dev0")
|
||||
|
||||
task_to_keys = {
|
||||
"cola": ("sentence", None),
|
||||
"mnli": ("premise", "hypothesis"),
|
||||
"mrpc": ("sentence1", "sentence2"),
|
||||
"qnli": ("question", "sentence"),
|
||||
"qqp": ("question1", "question2"),
|
||||
"rte": ("sentence1", "sentence2"),
|
||||
"sst2": ("sentence", None),
|
||||
"stsb": ("sentence1", "sentence2"),
|
||||
"wnli": ("sentence1", "sentence2"),
|
||||
}
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# region Command-line arguments
|
||||
@dataclass
|
||||
class DataTrainingArguments:
|
||||
"""
|
||||
Arguments pertaining to what data we are going to input our model for training and eval.
|
||||
|
||||
Using `HfArgumentParser` we can turn this class
|
||||
into argparse arguments to be able to specify them on
|
||||
the command line.
|
||||
"""
|
||||
|
||||
task_name: str = field(
|
||||
metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())},
|
||||
)
|
||||
predict_file: str = field(
|
||||
metadata={"help": "A file containing user-supplied examples to make predictions for"},
|
||||
default=None,
|
||||
)
|
||||
max_seq_length: int = field(
|
||||
default=128,
|
||||
metadata={
|
||||
"help": "The maximum total input sequence length after tokenization. Sequences longer "
|
||||
"than this will be truncated, sequences shorter will be padded."
|
||||
},
|
||||
)
|
||||
overwrite_cache: bool = field(
|
||||
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
|
||||
)
|
||||
pad_to_max_length: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"help": "Whether to pad all samples to `max_seq_length`. "
|
||||
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
|
||||
},
|
||||
)
|
||||
max_train_samples: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
|
||||
"value if set."
|
||||
},
|
||||
)
|
||||
max_eval_samples: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
|
||||
"value if set."
|
||||
},
|
||||
)
|
||||
max_predict_samples: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
|
||||
"value if set."
|
||||
},
|
||||
)
|
||||
|
||||
def __post_init__(self):
|
||||
self.task_name = self.task_name.lower()
|
||||
if self.task_name not in task_to_keys.keys():
|
||||
raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys()))
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelArguments:
|
||||
"""
|
||||
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
|
||||
"""
|
||||
|
||||
model_name_or_path: str = field(
|
||||
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
|
||||
)
|
||||
config_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
|
||||
)
|
||||
tokenizer_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
|
||||
)
|
||||
cache_dir: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
|
||||
)
|
||||
use_fast_tokenizer: bool = field(
|
||||
default=True,
|
||||
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
|
||||
)
|
||||
model_revision: str = field(
|
||||
default="main",
|
||||
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
|
||||
)
|
||||
use_auth_token: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
|
||||
"with private models)."
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
# endregion
|
||||
|
||||
|
||||
def main():
|
||||
# region Argument parsing
|
||||
# See all possible arguments in src/transformers/training_args.py
|
||||
# or by passing the --help flag to this script.
|
||||
# We now keep distinct sets of args, for a cleaner separation of concerns.
|
||||
|
||||
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
|
||||
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
|
||||
# If we pass only one argument to the script and it's the path to a json file,
|
||||
# let's parse it to get our arguments.
|
||||
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
|
||||
else:
|
||||
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
||||
|
||||
if not (training_args.do_train or training_args.do_eval or training_args.do_predict):
|
||||
exit("Must specify at least one of --do_train, --do_eval or --do_predict!")
|
||||
# endregion
|
||||
|
||||
# region Checkpoints
|
||||
checkpoint = None
|
||||
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
|
||||
checkpoint = get_last_checkpoint(training_args.output_dir)
|
||||
if checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
|
||||
raise ValueError(
|
||||
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
|
||||
"Use --overwrite_output_dir to overcome."
|
||||
)
|
||||
elif checkpoint is not None and training_args.resume_from_checkpoint is None:
|
||||
logger.info(
|
||||
f"Checkpoint detected, resuming training at {checkpoint}. To avoid this behavior, change "
|
||||
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
|
||||
)
|
||||
# endregion
|
||||
|
||||
# region Logging
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||
datefmt="%m/%d/%Y %H:%M:%S",
|
||||
handlers=[logging.StreamHandler(sys.stdout)],
|
||||
)
|
||||
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
|
||||
|
||||
# Set the verbosity to info of the Transformers logger (on main process only):
|
||||
if is_main_process(training_args.local_rank):
|
||||
transformers.utils.logging.set_verbosity_info()
|
||||
transformers.utils.logging.enable_default_handler()
|
||||
transformers.utils.logging.enable_explicit_format()
|
||||
logger.info(f"Training/evaluation parameters {training_args}")
|
||||
# endregion
|
||||
|
||||
# region Dataset and labels
|
||||
# Set seed before initializing model.
|
||||
set_seed(training_args.seed)
|
||||
|
||||
# Downloading and loading a dataset from the hub. In distributed training, the load_dataset function guarantee
|
||||
# that only one local process can concurrently download the dataset.
|
||||
datasets = load_dataset("glue", data_args.task_name, cache_dir=model_args.cache_dir)
|
||||
# See more about loading any type of standard or custom dataset at
|
||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||
|
||||
is_regression = data_args.task_name == "stsb"
|
||||
if not is_regression:
|
||||
label_list = datasets["train"].features["label"].names
|
||||
num_labels = len(label_list)
|
||||
else:
|
||||
num_labels = 1
|
||||
|
||||
if data_args.predict_file is not None:
|
||||
logger.info("Preparing user-supplied file for predictions...")
|
||||
|
||||
data_files = {"data": data_args.predict_file}
|
||||
|
||||
for key in data_files.keys():
|
||||
logger.info(f"Loading a local file for {key}: {data_files[key]}")
|
||||
|
||||
if data_args.predict_file.endswith(".csv"):
|
||||
# Loading a dataset from local csv files
|
||||
user_dataset = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir)
|
||||
else:
|
||||
# Loading a dataset from local json files
|
||||
user_dataset = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir)
|
||||
needed_keys = task_to_keys[data_args.task_name]
|
||||
for key in needed_keys:
|
||||
assert key in user_dataset["data"].features, f"Your supplied predict_file is missing the {key} key!"
|
||||
datasets["user_data"] = user_dataset["data"]
|
||||
# endregion
|
||||
|
||||
# region Load model config and tokenizer
|
||||
#
|
||||
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
|
||||
# download model & vocab.
|
||||
config = AutoConfig.from_pretrained(
|
||||
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
|
||||
num_labels=num_labels,
|
||||
finetuning_task=data_args.task_name,
|
||||
cache_dir=model_args.cache_dir,
|
||||
revision=model_args.model_revision,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_fast=model_args.use_fast_tokenizer,
|
||||
revision=model_args.model_revision,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
# endregion
|
||||
|
||||
# region Dataset preprocessing
|
||||
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
|
||||
non_label_column_names = [name for name in datasets["train"].column_names if name != "label"]
|
||||
|
||||
# Padding strategy
|
||||
if data_args.pad_to_max_length:
|
||||
padding = "max_length"
|
||||
else:
|
||||
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
|
||||
padding = False
|
||||
|
||||
# Some models have set the order of the labels to use, so let's make sure we do use it.
|
||||
label_to_id = None
|
||||
if config.label2id != PretrainedConfig(num_labels=num_labels).label2id and not is_regression:
|
||||
# Some have all caps in their config, some don't.
|
||||
label_name_to_id = {k.lower(): v for k, v in config.label2id.items()}
|
||||
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
|
||||
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
|
||||
else:
|
||||
logger.warning(
|
||||
"Your model seems to have been trained with labels, but they don't match the dataset: ",
|
||||
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
|
||||
"\nIgnoring the model labels as a result.",
|
||||
)
|
||||
label_to_id = {label: i for i, label in enumerate(label_list)}
|
||||
if label_to_id is not None:
|
||||
config.label2id = label_to_id
|
||||
config.id2label = {id: label for label, id in config.label2id.items()}
|
||||
|
||||
if data_args.max_seq_length > tokenizer.model_max_length:
|
||||
logger.warning(
|
||||
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
|
||||
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
|
||||
)
|
||||
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
|
||||
|
||||
def preprocess_function(examples):
|
||||
# Tokenize the texts
|
||||
args = (
|
||||
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
|
||||
)
|
||||
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
|
||||
|
||||
return result
|
||||
|
||||
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache)
|
||||
|
||||
# endregion
|
||||
|
||||
# region Metric function
|
||||
metric = load_metric("glue", data_args.task_name)
|
||||
|
||||
def compute_metrics(preds, label_ids):
|
||||
preds = preds["logits"]
|
||||
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
|
||||
result = metric.compute(predictions=preds, references=label_ids)
|
||||
if len(result) > 1:
|
||||
result["combined_score"] = np.mean(list(result.values())).item()
|
||||
return result
|
||||
|
||||
# endregion
|
||||
|
||||
with training_args.strategy.scope():
|
||||
# region Load pretrained model
|
||||
if checkpoint is None:
|
||||
model_path = model_args.model_name_or_path
|
||||
else:
|
||||
model_path = checkpoint
|
||||
model = TFAutoModelForSequenceClassification.from_pretrained(
|
||||
model_path,
|
||||
config=config,
|
||||
cache_dir=model_args.cache_dir,
|
||||
revision=model_args.model_revision,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
# endregion
|
||||
|
||||
# region Optimizer, loss and compilation
|
||||
optimizer = tf.keras.optimizers.Adam(
|
||||
learning_rate=training_args.learning_rate,
|
||||
beta_1=training_args.adam_beta1,
|
||||
beta_2=training_args.adam_beta2,
|
||||
epsilon=training_args.adam_epsilon,
|
||||
clipnorm=training_args.max_grad_norm,
|
||||
)
|
||||
if is_regression:
|
||||
loss_fn = tf.keras.losses.MeanSquaredError()
|
||||
metrics = []
|
||||
else:
|
||||
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
|
||||
metrics = ["accuracy"]
|
||||
model.compile(optimizer=optimizer, loss=loss_fn, metrics=metrics)
|
||||
# endregion
|
||||
|
||||
# region Convert data to a tf.data.Dataset
|
||||
tf_data = dict()
|
||||
if isinstance(training_args.strategy, tf.distribute.TPUStrategy) or data_args.pad_to_max_length:
|
||||
logger.info("Padding all batches to max length because argument was set or we're on TPU.")
|
||||
dataset_mode = "constant_batch"
|
||||
else:
|
||||
dataset_mode = "variable_batch"
|
||||
max_samples = {
|
||||
"train": data_args.max_train_samples,
|
||||
"validation": data_args.max_eval_samples,
|
||||
"validation_matched": data_args.max_eval_samples,
|
||||
"validation_mismatched": data_args.max_eval_samples,
|
||||
"test": data_args.max_predict_samples,
|
||||
"test_matched": data_args.max_predict_samples,
|
||||
"test_mismatched": data_args.max_predict_samples,
|
||||
"user_data": None,
|
||||
}
|
||||
for key in datasets.keys():
|
||||
if key == "train" or key.startswith("validation"):
|
||||
assert "label" in datasets[key].features, f"Missing labels from {key} data!"
|
||||
if key == "train":
|
||||
shuffle = True
|
||||
batch_size = training_args.per_device_train_batch_size
|
||||
drop_remainder = True # Saves us worrying about scaling gradients for the last batch
|
||||
else:
|
||||
shuffle = False
|
||||
batch_size = training_args.per_device_eval_batch_size
|
||||
drop_remainder = False
|
||||
samples_limit = max_samples[key]
|
||||
dataset = datasets[key]
|
||||
if samples_limit is not None:
|
||||
dataset = dataset.select(range(samples_limit))
|
||||
data = convert_dataset_for_tensorflow(
|
||||
dataset,
|
||||
non_label_column_names,
|
||||
batch_size=batch_size,
|
||||
dataset_mode=dataset_mode,
|
||||
drop_remainder=drop_remainder,
|
||||
shuffle=shuffle,
|
||||
)
|
||||
tf_data[key] = data
|
||||
# endregion
|
||||
|
||||
# region Training and validation
|
||||
if training_args.do_train:
|
||||
callbacks = [SavePretrainedCallback(output_dir=training_args.output_dir)]
|
||||
if training_args.do_eval and not data_args.task_name == "mnli":
|
||||
# Do both evaluation and training in the Keras fit loop, unless the task is MNLI
|
||||
# because MNLI has two validation sets
|
||||
validation_data = tf_data["validation"]
|
||||
else:
|
||||
validation_data = None
|
||||
model.fit(
|
||||
tf_data["train"],
|
||||
validation_data=validation_data,
|
||||
epochs=int(training_args.num_train_epochs),
|
||||
callbacks=callbacks,
|
||||
)
|
||||
# endregion
|
||||
|
||||
# region Evaluation
|
||||
if training_args.do_eval:
|
||||
# We normally do validation as part of the Keras fit loop, but we run it independently
|
||||
# if there was no fit() step (because we didn't train the model) or if the task is MNLI,
|
||||
# because MNLI has a separate validation-mismatched validation set
|
||||
logger.info("*** Evaluate ***")
|
||||
|
||||
# Loop to handle MNLI double evaluation (matched, mis-matched)
|
||||
if data_args.task_name == "mnli":
|
||||
tasks = ["mnli", "mnli-mm"]
|
||||
tf_datasets = [tf_data["validation_matched"], tf_data["validation_mismatched"]]
|
||||
raw_datasets = [datasets["validation_matched"], datasets["validation_mismatched"]]
|
||||
else:
|
||||
tasks = [data_args.task_name]
|
||||
tf_datasets = [tf_data["validation"]]
|
||||
raw_datasets = [datasets["validation"]]
|
||||
|
||||
for raw_dataset, tf_dataset, task in zip(raw_datasets, tf_datasets, tasks):
|
||||
eval_predictions = model.predict(tf_dataset)
|
||||
eval_metrics = compute_metrics(eval_predictions, raw_dataset["label"])
|
||||
print(f"Evaluation metrics ({task}):")
|
||||
print(eval_metrics)
|
||||
|
||||
# endregion
|
||||
|
||||
# region Prediction
|
||||
if training_args.do_predict or data_args.predict_file:
|
||||
logger.info("*** Predict ***")
|
||||
|
||||
# Loop to handle MNLI double evaluation (matched, mis-matched)
|
||||
tasks = []
|
||||
tf_datasets = []
|
||||
raw_datasets = []
|
||||
if training_args.do_predict:
|
||||
if data_args.task_name == "mnli":
|
||||
tasks.extend(["mnli", "mnli-mm"])
|
||||
tf_datasets.extend([tf_data["test_matched"], tf_data["test_mismatched"]])
|
||||
raw_datasets.extend([datasets["test_matched"], datasets["test_mismatched"]])
|
||||
else:
|
||||
tasks.append(data_args.task_name)
|
||||
tf_datasets.append(tf_data["test"])
|
||||
raw_datasets.append(datasets["test"])
|
||||
if data_args.predict_file:
|
||||
tasks.append("user_data")
|
||||
tf_datasets.append(tf_data["user_data"])
|
||||
raw_datasets.append(datasets["user_data"])
|
||||
|
||||
for raw_dataset, tf_dataset, task in zip(raw_datasets, tf_datasets, tasks):
|
||||
test_predictions = model.predict(tf_dataset)
|
||||
if "label" in raw_dataset:
|
||||
test_metrics = compute_metrics(test_predictions, raw_dataset["label"])
|
||||
print(f"Test metrics ({task}):")
|
||||
print(test_metrics)
|
||||
|
||||
if is_regression:
|
||||
predictions_to_write = np.squeeze(test_predictions["logits"])
|
||||
else:
|
||||
predictions_to_write = np.argmax(test_predictions["logits"], axis=1)
|
||||
|
||||
output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt")
|
||||
with open(output_predict_file, "w") as writer:
|
||||
logger.info(f"***** Writing prediction results for {task} *****")
|
||||
writer.write("index\tprediction\n")
|
||||
for index, item in enumerate(predictions_to_write):
|
||||
if is_regression:
|
||||
writer.write(f"{index}\t{item:3.3f}\n")
|
||||
else:
|
||||
item = model.config.id2label[item]
|
||||
writer.write(f"{index}\t{item}\n")
|
||||
# endregion
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -205,7 +205,6 @@ class ModelArguments:
|
||||
"with private models)."
|
||||
},
|
||||
)
|
||||
tpu: Optional[str] = field(default=None, metadata={"help": "Name of the TPU resource to use, if available"})
|
||||
|
||||
|
||||
# endregion
|
||||
@ -439,10 +438,8 @@ def main():
|
||||
model.compile(optimizer=optimizer, loss=loss_fn, metrics=metrics)
|
||||
# endregion
|
||||
|
||||
# region Convert data to TF format
|
||||
# region Convert data to a tf.data.Dataset
|
||||
|
||||
# Convert data to a tf.keras.utils.Sequence object for training if we're not using a TPU
|
||||
# For TPU, convert to a tf.data.Dataset
|
||||
tf_data = dict()
|
||||
max_samples = {
|
||||
"train": data_args.max_train_samples,
|
||||
|
@ -1,265 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" Fine-tuning the library models for sequence classification."""
|
||||
|
||||
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import Dict, Optional
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
import tensorflow_datasets as tfds
|
||||
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
AutoTokenizer,
|
||||
EvalPrediction,
|
||||
HfArgumentParser,
|
||||
PreTrainedTokenizer,
|
||||
TFAutoModelForSequenceClassification,
|
||||
TFTrainer,
|
||||
TFTrainingArguments,
|
||||
glue_compute_metrics,
|
||||
glue_convert_examples_to_features,
|
||||
glue_output_modes,
|
||||
glue_processors,
|
||||
glue_tasks_num_labels,
|
||||
)
|
||||
from transformers.utils import logging as hf_logging
|
||||
|
||||
|
||||
hf_logging.set_verbosity_info()
|
||||
hf_logging.enable_default_handler()
|
||||
hf_logging.enable_explicit_format()
|
||||
|
||||
|
||||
class Split(Enum):
|
||||
train = "train"
|
||||
dev = "validation"
|
||||
test = "test"
|
||||
|
||||
|
||||
def get_tfds(
|
||||
task_name: str,
|
||||
tokenizer: PreTrainedTokenizer,
|
||||
max_seq_length: Optional[int] = None,
|
||||
mode: Split = Split.train,
|
||||
data_dir: str = None,
|
||||
):
|
||||
if task_name == "mnli-mm" and mode == Split.dev:
|
||||
tfds_name = "mnli_mismatched"
|
||||
elif task_name == "mnli-mm" and mode == Split.train:
|
||||
tfds_name = "mnli"
|
||||
elif task_name == "mnli" and mode == Split.dev:
|
||||
tfds_name = "mnli_matched"
|
||||
elif task_name == "sst-2":
|
||||
tfds_name = "sst2"
|
||||
elif task_name == "sts-b":
|
||||
tfds_name = "stsb"
|
||||
else:
|
||||
tfds_name = task_name
|
||||
|
||||
ds, info = tfds.load("glue/" + tfds_name, split=mode.value, with_info=True, data_dir=data_dir)
|
||||
ds = glue_convert_examples_to_features(ds, tokenizer, max_seq_length, task_name)
|
||||
ds = ds.apply(tf.data.experimental.assert_cardinality(info.splits[mode.value].num_examples))
|
||||
|
||||
return ds
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class GlueDataTrainingArguments:
|
||||
"""
|
||||
Arguments pertaining to what data we are going to input our model for training and eval.
|
||||
|
||||
Using `HfArgumentParser` we can turn this class
|
||||
into argparse arguments to be able to specify them on
|
||||
the command line.
|
||||
"""
|
||||
|
||||
task_name: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())})
|
||||
data_dir: Optional[str] = field(default=None, metadata={"help": "The input/output data dir for TFDS."})
|
||||
max_seq_length: int = field(
|
||||
default=128,
|
||||
metadata={
|
||||
"help": "The maximum total input sequence length after tokenization. Sequences longer "
|
||||
"than this will be truncated, sequences shorter will be padded."
|
||||
},
|
||||
)
|
||||
overwrite_cache: bool = field(
|
||||
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
|
||||
)
|
||||
|
||||
def __post_init__(self):
|
||||
self.task_name = self.task_name.lower()
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelArguments:
|
||||
"""
|
||||
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
|
||||
"""
|
||||
|
||||
model_name_or_path: str = field(
|
||||
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
|
||||
)
|
||||
config_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
|
||||
)
|
||||
tokenizer_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
|
||||
)
|
||||
use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."})
|
||||
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
|
||||
# or just modify its tokenizer_config.json.
|
||||
cache_dir: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
# See all possible arguments in src/transformers/training_args.py
|
||||
# or by passing the --help flag to this script.
|
||||
# We now keep distinct sets of args, for a cleaner separation of concerns.
|
||||
parser = HfArgumentParser((ModelArguments, GlueDataTrainingArguments, TFTrainingArguments))
|
||||
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
||||
|
||||
if (
|
||||
os.path.exists(training_args.output_dir)
|
||||
and os.listdir(training_args.output_dir)
|
||||
and training_args.do_train
|
||||
and not training_args.overwrite_output_dir
|
||||
):
|
||||
raise ValueError(
|
||||
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
|
||||
)
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||
datefmt="%m/%d/%Y %H:%M:%S",
|
||||
level=logging.INFO,
|
||||
)
|
||||
logger.info(
|
||||
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1)}, "
|
||||
f"16-bits training: {training_args.fp16}",
|
||||
)
|
||||
logger.info(f"Training/evaluation parameters {training_args}")
|
||||
|
||||
try:
|
||||
num_labels = glue_tasks_num_labels["mnli" if data_args.task_name == "mnli-mm" else data_args.task_name]
|
||||
output_mode = glue_output_modes[data_args.task_name]
|
||||
except KeyError:
|
||||
raise ValueError(f"Task not found: {data_args.task_name}")
|
||||
|
||||
# Load pretrained model and tokenizer
|
||||
#
|
||||
# Distributed training:
|
||||
# The .from_pretrained methods guarantee that only one local process can concurrently
|
||||
# download model & vocab.
|
||||
|
||||
config = AutoConfig.from_pretrained(
|
||||
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
|
||||
num_labels=num_labels,
|
||||
finetuning_task=data_args.task_name,
|
||||
cache_dir=model_args.cache_dir,
|
||||
)
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
|
||||
cache_dir=model_args.cache_dir,
|
||||
)
|
||||
|
||||
with training_args.strategy.scope():
|
||||
model = TFAutoModelForSequenceClassification.from_pretrained(
|
||||
model_args.model_name_or_path,
|
||||
from_pt=bool(".bin" in model_args.model_name_or_path),
|
||||
config=config,
|
||||
cache_dir=model_args.cache_dir,
|
||||
)
|
||||
|
||||
# Get datasets
|
||||
train_dataset = (
|
||||
get_tfds(
|
||||
task_name=data_args.task_name,
|
||||
tokenizer=tokenizer,
|
||||
max_seq_length=data_args.max_seq_length,
|
||||
data_dir=data_args.data_dir,
|
||||
)
|
||||
if training_args.do_train
|
||||
else None
|
||||
)
|
||||
eval_dataset = (
|
||||
get_tfds(
|
||||
task_name=data_args.task_name,
|
||||
tokenizer=tokenizer,
|
||||
max_seq_length=data_args.max_seq_length,
|
||||
mode=Split.dev,
|
||||
data_dir=data_args.data_dir,
|
||||
)
|
||||
if training_args.do_eval
|
||||
else None
|
||||
)
|
||||
|
||||
def compute_metrics(p: EvalPrediction) -> Dict:
|
||||
if output_mode == "classification":
|
||||
preds = np.argmax(p.predictions, axis=1)
|
||||
elif output_mode == "regression":
|
||||
preds = np.squeeze(p.predictions)
|
||||
return glue_compute_metrics(data_args.task_name, preds, p.label_ids)
|
||||
|
||||
# Initialize our Trainer
|
||||
trainer = TFTrainer(
|
||||
model=model,
|
||||
args=training_args,
|
||||
train_dataset=train_dataset,
|
||||
eval_dataset=eval_dataset,
|
||||
compute_metrics=compute_metrics,
|
||||
)
|
||||
|
||||
# Training
|
||||
if training_args.do_train:
|
||||
trainer.train()
|
||||
trainer.save_model()
|
||||
tokenizer.save_pretrained(training_args.output_dir)
|
||||
|
||||
# Evaluation
|
||||
results = {}
|
||||
if training_args.do_eval:
|
||||
logger.info("*** Evaluate ***")
|
||||
|
||||
result = trainer.evaluate()
|
||||
output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt")
|
||||
|
||||
with open(output_eval_file, "w") as writer:
|
||||
logger.info("***** Eval results *****")
|
||||
|
||||
for key, value in result.items():
|
||||
logger.info(f" {key} = {value}")
|
||||
writer.write(f"{key} = {value}\n")
|
||||
|
||||
results.update(result)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
Reference in New Issue
Block a user