fix a bug of evaluating

This commit is contained in:
erenup 2019-08-19 16:38:52 +08:00
parent b8fde43868
commit 4270d3da1b

View File

@ -253,7 +253,7 @@ def evaluate(args, model, tokenizer, prefix=""):
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
writer.write("model =%s\n" % str(args.model_name_or_path))
writer.write("total batch size=%d\n" % (args.train_batch_size * args.gradient_accumulation_steps *
writer.write("total batch size=%d\n" % (args.per_gpu_train_batch_size * args.gradient_accumulation_steps *
(torch.distributed.get_world_size() if args.local_rank != -1 else 1)))
writer.write("train num epochs=%d\n" % args.num_train_epochs)
writer.write("fp16 =%s\n" % args.fp16)