Fixed evaluation_strategy on epoch end bug (#7340)

* Fixed evaluation_strategy on epoch end bug

move the evaluation script outside the the iteration loop

* black formatting
This commit is contained in:
Wissam Antoun 2020-09-23 20:17:00 +03:00 committed by GitHub
parent 28cf873036
commit 58405a527b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -826,13 +826,15 @@ class Trainer:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
epoch_pbar.update(1)
if self.args.evaluation_strategy == EvaluationStrategy.EPOCH:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.args.max_steps > 0 and self.global_step >= self.args.max_steps:
break
epoch_pbar.close()
train_pbar.update(1)
if self.args.evaluation_strategy == EvaluationStrategy.EPOCH:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)