mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 10:12:23 +06:00

* deepspeed integration * style * add test * ds wants to do its own backward * fp16 assert * Update src/transformers/training_args.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * style * for clarity extract what args are being passed to deepspeed * introduce the concept of self.wrapped_model * s/self.wrapped_model/self.model_wrapped/ * complete transition to self.wrapped_model / self.model * fix * doc * give ds its own init * add custom overrides, handle bs correctly * fix test * clean up model_init logic, fix small bug * complete fix * collapse --deepspeed_config into --deepspeed * style * start adding doc notes * style * implement hf2ds optimizer and scheduler configuration remapping * oops * call get_num_training_steps absolutely when needed * workaround broken auto-formatter * deepspeed_config arg is no longer needed - fixed in deepspeed master * use hf's fp16 args in config * clean * start on the docs * rebase cleanup * finish up --fp16 * clarify the supported stages * big refactor thanks to discovering deepspeed.init_distributed * cleanup * revert fp16 part * add checkpoint-support * more init ds into integrations * extend docs * cleanup * unfix docs * clean up old code * imports * move docs * fix logic * make it clear which file it's referring to * document nodes/gpus * style * wrong format * style * deepspeed handles gradient clipping * easier to read * major doc rewrite * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * docs * switch to AdamW optimizer * style * Apply suggestions from code review Co-authored-by: Lysandre Debut <lysandre@huggingface.co> * clarify doc Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Lysandre Debut <lysandre@huggingface.co>
48 lines
911 B
JSON
48 lines
911 B
JSON
{
|
|
"fp16": {
|
|
"enabled": true,
|
|
"loss_scale": 0,
|
|
"loss_scale_window": 1000,
|
|
"hysteresis": 2,
|
|
"min_loss_scale": 1
|
|
},
|
|
|
|
"zero_optimization": {
|
|
"stage": 2,
|
|
"allgather_partitions": true,
|
|
"allgather_bucket_size": 2e8,
|
|
"overlap_comm": true,
|
|
"reduce_scatter": true,
|
|
"reduce_bucket_size": 2e8,
|
|
"contiguous_gradients": true,
|
|
"cpu_offload": true
|
|
},
|
|
|
|
"zero_allow_untested_optimizer": true,
|
|
|
|
"optimizer": {
|
|
"type": "AdamW",
|
|
"params": {
|
|
"lr": 3e-5,
|
|
"betas": [
|
|
0.8,
|
|
0.999
|
|
],
|
|
"eps": 1e-8,
|
|
"weight_decay": 3e-7
|
|
}
|
|
},
|
|
|
|
"scheduler": {
|
|
"type": "WarmupLR",
|
|
"params": {
|
|
"warmup_min_lr": 0,
|
|
"warmup_max_lr": 3e-5,
|
|
"warmup_num_steps": 500
|
|
}
|
|
},
|
|
|
|
"steps_per_print": 2000,
|
|
"wall_clock_breakdown": false
|
|
}
|