mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-23 06:20:22 +06:00
parent
f82b19cb6f
commit
dc10f7906a
@ -1618,7 +1618,10 @@ class Trainer:
|
|||||||
"gradient_clipping": float(optim_args.get("gradient_clipping", 1.0)),
|
"gradient_clipping": float(optim_args.get("gradient_clipping", 1.0)),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
elif args.optim == OptimizerNames.ADAMW_TORCH_4BIT:
|
elif args.optim in [
|
||||||
|
OptimizerNames.ADAMW_TORCH_4BIT,
|
||||||
|
OptimizerNames.ADAMW_TORCH_8BIT,
|
||||||
|
]:
|
||||||
if not is_torchao_available() or version.parse(importlib.metadata.version("torchao")) < version.parse(
|
if not is_torchao_available() or version.parse(importlib.metadata.version("torchao")) < version.parse(
|
||||||
"0.4.0"
|
"0.4.0"
|
||||||
):
|
):
|
||||||
@ -1631,9 +1634,14 @@ class Trainer:
|
|||||||
"You need to have `torch>2.4` in order to use torch 4-bit optimizers. "
|
"You need to have `torch>2.4` in order to use torch 4-bit optimizers. "
|
||||||
"Install it with `pip install --upgrade torch` it is available on pipy. Otherwise, you need to install torch nightly."
|
"Install it with `pip install --upgrade torch` it is available on pipy. Otherwise, you need to install torch nightly."
|
||||||
)
|
)
|
||||||
from torchao.prototype.low_bit_optim import AdamW4bit
|
from torchao.prototype.low_bit_optim import AdamW4bit, AdamW8bit
|
||||||
|
|
||||||
optimizer_cls = AdamW4bit
|
if args.optim == OptimizerNames.ADAMW_TORCH_4BIT:
|
||||||
|
optimizer_cls = AdamW4bit
|
||||||
|
elif args.optim == OptimizerNames.ADAMW_TORCH_8BIT:
|
||||||
|
optimizer_cls = AdamW8bit
|
||||||
|
else:
|
||||||
|
raise ValueError("Invalid optimizer")
|
||||||
optimizer_kwargs.update(adam_kwargs)
|
optimizer_kwargs.update(adam_kwargs)
|
||||||
elif args.optim in [
|
elif args.optim in [
|
||||||
OptimizerNames.SCHEDULE_FREE_ADAMW,
|
OptimizerNames.SCHEDULE_FREE_ADAMW,
|
||||||
|
@ -154,6 +154,7 @@ class OptimizerNames(ExplicitEnum):
|
|||||||
ADAFACTOR = "adafactor"
|
ADAFACTOR = "adafactor"
|
||||||
ADAMW_ANYPRECISION = "adamw_anyprecision"
|
ADAMW_ANYPRECISION = "adamw_anyprecision"
|
||||||
ADAMW_TORCH_4BIT = "adamw_torch_4bit"
|
ADAMW_TORCH_4BIT = "adamw_torch_4bit"
|
||||||
|
ADAMW_TORCH_8BIT = "adamw_torch_8bit"
|
||||||
ADEMAMIX = "ademamix"
|
ADEMAMIX = "ademamix"
|
||||||
SGD = "sgd"
|
SGD = "sgd"
|
||||||
ADAGRAD = "adagrad"
|
ADAGRAD = "adagrad"
|
||||||
|
@ -5017,6 +5017,13 @@ if is_torch_available():
|
|||||||
default_adam_kwargs,
|
default_adam_kwargs,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
optim_test_params.append(
|
||||||
|
(
|
||||||
|
TrainingArguments(optim=OptimizerNames.ADAMW_TORCH_8BIT, output_dir="None"),
|
||||||
|
torchao.prototype.low_bit_optim.AdamW8bit,
|
||||||
|
default_adam_kwargs,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@require_torch
|
@require_torch
|
||||||
|
Loading…
Reference in New Issue
Block a user