Log the right train_batch_size if using auto_find_batch_size and also log the adjusted value seperately. (#23800)

* Log right bs

* Log

* Diff message
This commit is contained in:
Zachary Mueller 2023-05-26 15:09:05 -04:00 committed by GitHub
parent e724246935
commit edf7772826
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -1704,6 +1704,7 @@ class Trainer:
self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None
):
self._train_batch_size = batch_size
logger.debug(f"Currently training with a batch size of: {self._train_batch_size}")
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
@ -1811,7 +1812,7 @@ class Trainer:
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples:,}")
logger.info(f" Num Epochs = {num_train_epochs:,}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size:,}")
logger.info(f" Instantaneous batch size per device = {self._train_batch_size:,}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size:,}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps:,}")