diff --git a/run_classifier_pytorch.py b/run_classifier_pytorch.py index c8ec8ab6e23..3410a09b045 100644 --- a/run_classifier_pytorch.py +++ b/run_classifier_pytorch.py @@ -427,7 +427,10 @@ def main(): type=int, default=-1, help="local_rank for distributed training on gpus") - + parser.add_argument('--seed', + type=int, + default=42, + help="random seed for initialization") args = parser.parse_args() processors = { @@ -444,7 +447,12 @@ def main(): n_gpu = 1 # print("Initializing the distributed backend: NCCL") print("device", device, "n_gpu", n_gpu) - + + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if n_gpu>0: torch.cuda.manual_seed_all(args.seed) + if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") diff --git a/run_squad_pytorch.py b/run_squad_pytorch.py index 2a67262d96e..a1db682cd4f 100644 --- a/run_squad_pytorch.py +++ b/run_squad_pytorch.py @@ -745,6 +745,10 @@ def main(): type=int, default=-1, help="local_rank for distributed training on gpus") + parser.add_argument('--seed', + type=int, + default=42, + help="random seed for initialization") args = parser.parse_args() @@ -757,6 +761,11 @@ def main(): # print("Initializing the distributed backend: NCCL") print("device", device, "n_gpu", n_gpu) + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if n_gpu>0: torch.cuda.manual_seed_all(args.seed) + if not args.do_train and not args.do_predict: raise ValueError("At least one of `do_train` or `do_predict` must be True.")