mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-03 21:00:08 +06:00
Remove deprecated evalutate_during_training
(#8852)
* Remove deprecated `evalutate_during_training` * Update src/transformers/training_args_tf.py Co-authored-by: Lysandre Debut <lysandre@huggingface.co> Co-authored-by: Lysandre Debut <lysandre@huggingface.co>
This commit is contained in:
parent
773849415a
commit
5530299096
@ -3,7 +3,8 @@
|
||||
python finetune_trainer.py \
|
||||
--learning_rate=3e-5 \
|
||||
--fp16 \
|
||||
--do_train --do_eval --do_predict --evaluate_during_training \
|
||||
--do_train --do_eval --do_predict \
|
||||
--evaluation_strategy steps \
|
||||
--predict_with_generate \
|
||||
--n_val 1000 \
|
||||
"$@"
|
||||
|
@ -5,7 +5,8 @@ export TPU_NUM_CORES=8
|
||||
python xla_spawn.py --num_cores $TPU_NUM_CORES \
|
||||
finetune_trainer.py \
|
||||
--learning_rate=3e-5 \
|
||||
--do_train --do_eval --evaluate_during_training \
|
||||
--do_train --do_eval \
|
||||
--evaluation_strategy steps \
|
||||
--prediction_loss_only \
|
||||
--n_val 1000 \
|
||||
"$@"
|
||||
|
@ -16,7 +16,8 @@ python finetune_trainer.py \
|
||||
--num_train_epochs=6 \
|
||||
--save_steps 3000 --eval_steps 3000 \
|
||||
--max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \
|
||||
--do_train --do_eval --do_predict --evaluate_during_training\
|
||||
--do_train --do_eval --do_predict \
|
||||
--evaluation_strategy steps \
|
||||
--predict_with_generate --logging_first_step \
|
||||
--task translation --label_smoothing 0.1 \
|
||||
"$@"
|
||||
|
@ -17,7 +17,8 @@ python xla_spawn.py --num_cores $TPU_NUM_CORES \
|
||||
--save_steps 500 --eval_steps 500 \
|
||||
--logging_first_step --logging_steps 200 \
|
||||
--max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \
|
||||
--do_train --do_eval --evaluate_during_training \
|
||||
--do_train --do_eval \
|
||||
--evaluation_strategy steps \
|
||||
--prediction_loss_only \
|
||||
--task translation --label_smoothing 0.1 \
|
||||
"$@"
|
||||
|
@ -19,6 +19,7 @@ python finetune_trainer.py \
|
||||
--save_steps 3000 --eval_steps 3000 \
|
||||
--logging_first_step \
|
||||
--max_target_length 56 --val_max_target_length $MAX_TGT_LEN --test_max_target_length $MAX_TGT_LEN \
|
||||
--do_train --do_eval --do_predict --evaluate_during_training \
|
||||
--do_train --do_eval --do_predict \
|
||||
--evaluation_strategy steps \
|
||||
--predict_with_generate --sortish_sampler \
|
||||
"$@"
|
||||
|
@ -15,7 +15,8 @@ python finetune_trainer.py \
|
||||
--sortish_sampler \
|
||||
--num_train_epochs 6 \
|
||||
--save_steps 25000 --eval_steps 25000 --logging_steps 1000 \
|
||||
--do_train --do_eval --do_predict --evaluate_during_training \
|
||||
--do_train --do_eval --do_predict \
|
||||
--evaluation_strategy steps \
|
||||
--predict_with_generate --logging_first_step \
|
||||
--task translation \
|
||||
"$@"
|
||||
|
@ -2,6 +2,7 @@
|
||||
import math
|
||||
import os
|
||||
|
||||
from .trainer_utils import EvaluationStrategy
|
||||
from .utils import logging
|
||||
|
||||
|
||||
@ -212,13 +213,13 @@ def run_hp_search_ray(trainer, n_trials: int, direction: str, **kwargs) -> BestR
|
||||
# Check for `do_eval` and `eval_during_training` for schedulers that require intermediate reporting.
|
||||
if isinstance(
|
||||
kwargs["scheduler"], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining)
|
||||
) and (not trainer.args.do_eval or not trainer.args.evaluate_during_training):
|
||||
) and (not trainer.args.do_eval or trainer.args.evaluation_strategy == EvaluationStrategy.NO):
|
||||
raise RuntimeError(
|
||||
"You are using {cls} as a scheduler but you haven't enabled evaluation during training. "
|
||||
"This means your trials will not report intermediate results to Ray Tune, and "
|
||||
"can thus not be stopped early or used to exploit other trials parameters. "
|
||||
"If this is what you want, do not use {cls}. If you would like to use {cls}, "
|
||||
"make sure you pass `do_eval=True` and `evaluate_during_training=True` in the "
|
||||
"make sure you pass `do_eval=True` and `evaluation_strategy='steps'` in the "
|
||||
"Trainer `args`.".format(cls=type(kwargs["scheduler"]).__name__)
|
||||
)
|
||||
|
||||
|
@ -19,7 +19,7 @@ from tensorflow.python.distribute.values import PerReplica
|
||||
|
||||
from .modeling_tf_utils import TFPreTrainedModel
|
||||
from .optimization_tf import GradientAccumulator, create_optimizer
|
||||
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed
|
||||
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, EvaluationStrategy, PredictionOutput, set_seed
|
||||
from .training_args_tf import TFTrainingArguments
|
||||
from .utils import logging
|
||||
|
||||
@ -561,7 +561,7 @@ class TFTrainer:
|
||||
|
||||
if (
|
||||
self.args.eval_steps > 0
|
||||
and self.args.evaluate_during_training
|
||||
and self.args.evaluate_strategy == EvaluationStrategy.STEPS
|
||||
and self.global_step % self.args.eval_steps == 0
|
||||
):
|
||||
self.evaluate()
|
||||
|
@ -34,8 +34,12 @@ class TFTrainingArguments(TrainingArguments):
|
||||
Whether to run evaluation on the dev set or not.
|
||||
do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
||||
Whether to run predictions on the test set or not.
|
||||
evaluate_during_training (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
||||
Whether to run evaluation during training at each logging step or not.
|
||||
evaluation_strategy (:obj:`str` or :class:`~transformers.trainer_utils.EvaluationStrategy`, `optional`, defaults to :obj:`"no"`):
|
||||
The evaluation strategy to adopt during training. Possible values are:
|
||||
|
||||
* :obj:`"no"`: No evaluation is done during training.
|
||||
* :obj:`"steps"`: Evaluation is done (and logged) every :obj:`eval_steps`.
|
||||
|
||||
per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8):
|
||||
The batch size per GPU/TPU core/CPU for training.
|
||||
per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8):
|
||||
|
Loading…
Reference in New Issue
Block a user