mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-06 14:20:04 +06:00

* [WIP] add support for bf16 mode * prep for bf16 * prep for bf16 * fix; zero2/bf16 is ok * check bf16 is available * test fixes * enable zero3_bf16 * config files * docs * split stage_dtype; merge back to non-dtype-specific config file * fix doc * cleanup * cleanup * bfloat16 => bf16 to match the PR changes * s/zero_gather_fp16_weights_on_model_save/zero_gather_16bit_weights_on_model_save/; s/save_fp16_model/save_16bit_model/ * test fixes/skipping * move * fix * Update docs/source/main_classes/deepspeed.mdx Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * backticks * cleanup * cleanup * cleanup * new version * add note about grad accum in bf16 Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
58 lines
1.4 KiB
JSON
58 lines
1.4 KiB
JSON
{
|
|
"fp16": {
|
|
"enabled": "auto",
|
|
"loss_scale": 0,
|
|
"loss_scale_window": 1000,
|
|
"initial_scale_power": 16,
|
|
"hysteresis": 2,
|
|
"min_loss_scale": 1
|
|
},
|
|
|
|
"optimizer": {
|
|
"type": "AdamW",
|
|
"params": {
|
|
"lr": "auto",
|
|
"betas": "auto",
|
|
"eps": "auto",
|
|
"weight_decay": "auto"
|
|
}
|
|
},
|
|
|
|
"scheduler": {
|
|
"type": "WarmupLR",
|
|
"params": {
|
|
"warmup_min_lr": "auto",
|
|
"warmup_max_lr": "auto",
|
|
"warmup_num_steps": "auto"
|
|
}
|
|
},
|
|
|
|
"zero_optimization": {
|
|
"stage": 3,
|
|
"offload_optimizer": {
|
|
"device": "cpu",
|
|
"pin_memory": true
|
|
},
|
|
"offload_param": {
|
|
"device": "cpu",
|
|
"pin_memory": true
|
|
},
|
|
"overlap_comm": true,
|
|
"contiguous_gradients": true,
|
|
"sub_group_size": 1e9,
|
|
"reduce_bucket_size": "auto",
|
|
"stage3_prefetch_bucket_size": "auto",
|
|
"stage3_param_persistence_threshold": "auto",
|
|
"stage3_max_live_parameters": 1e9,
|
|
"stage3_max_reuse_distance": 1e9,
|
|
"stage3_gather_16bit_weights_on_model_save": true
|
|
},
|
|
|
|
"gradient_accumulation_steps": "auto",
|
|
"gradient_clipping": "auto",
|
|
"steps_per_print": 2000,
|
|
"train_batch_size": "auto",
|
|
"train_micro_batch_size_per_gpu": "auto",
|
|
"wall_clock_breakdown": false
|
|
}
|