mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
Clarify definition of seed argument in TrainingArguments (#9903)
* Clarify definition of seed argument in Trainer * Update src/transformers/training_args.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/training_args_tf.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Fix style * Update src/transformers/training_args.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
This commit is contained in:
parent
40cfc355f1
commit
22121e813e
@ -148,7 +148,9 @@ class TrainingArguments:
|
||||
no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
||||
Whether to not use CUDA even when it is available or not.
|
||||
seed (:obj:`int`, `optional`, defaults to 42):
|
||||
Random seed for initialization.
|
||||
Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the
|
||||
:func:`~transformers.Trainer.model_init` function to instantiate the model if it has some randomly
|
||||
initialized parameters.
|
||||
fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
||||
Whether to use 16-bit (mixed) precision training (through NVIDIA Apex) instead of 32-bit training.
|
||||
fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'):
|
||||
@ -337,7 +339,7 @@ class TrainingArguments:
|
||||
},
|
||||
)
|
||||
no_cuda: bool = field(default=False, metadata={"help": "Do not use CUDA even when it is available"})
|
||||
seed: int = field(default=42, metadata={"help": "random seed for initialization"})
|
||||
seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."})
|
||||
|
||||
fp16: bool = field(
|
||||
default=False,
|
||||
|
@ -111,7 +111,7 @@ class TFTrainingArguments(TrainingArguments):
|
||||
no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
||||
Whether to not use CUDA even when it is available or not.
|
||||
seed (:obj:`int`, `optional`, defaults to 42):
|
||||
Random seed for initialization.
|
||||
Random seed that will be set at the beginning of training.
|
||||
fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
||||
Whether to use 16-bit (mixed) precision training (through NVIDIA Apex) instead of 32-bit training.
|
||||
fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'):
|
||||
|
Loading…
Reference in New Issue
Block a user