Rename sanity_evaluation to eval_on_start (#31192)

* Rename sanity_evaluation to eval_on_start

* move arg back to last
This commit is contained in:
Qubitium 2024-06-03 23:32:21 +08:00 committed by GitHub
parent c230504b36
commit c6c78733d7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 4 additions and 4 deletions

View File

@ -2175,7 +2175,7 @@ class Trainer:
grad_norm: Optional[float] = None
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
if args.sanity_evaluation:
if args.eval_on_start:
self._evaluate(trial, ignore_keys_for_eval, skip_scheduler=True)
total_batched_samples = 0

View File

@ -772,8 +772,8 @@ class TrainingArguments:
that takes a boolean argument `compute_result`, which when passed `True`, will trigger the final global
summary statistics from the batch-level summary statistics you've accumulated over the evaluation set.
sanity_evaluation(`bool`, *optional*, defaults to `False`):
Whether or not to perform a sanity check to ensure that the validation steps works correctly. It will be performed before the training.
eval_on_start(`bool`, *optional*, defaults to `False`):
Whether to perform a evaluation step (sanity check) before the training to ensure the validation steps works correctly.
"""
framework = "pt"
@ -1457,7 +1457,7 @@ class TrainingArguments:
metadata={"help": "Break eval metrics calculation into batches to save memory."},
)
sanity_evaluation: bool = field(
eval_on_start: bool = field(
default=False,
metadata={
"help": "Whether to run through the entire `evaluation` step at the very beginning of training as a sanity check."