mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-15 18:48:24 +06:00
190 lines
8.4 KiB
Python
190 lines
8.4 KiB
Python
# coding=utf-8
|
|
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""PyTorch optimization for BERT model."""
|
|
|
|
import logging
|
|
import math
|
|
|
|
import torch
|
|
from torch.optim import Optimizer
|
|
from torch.optim.lr_scheduler import LambdaLR
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class ConstantLRSchedule(LambdaLR):
|
|
""" Constant learning rate schedule.
|
|
"""
|
|
def __init__(self, optimizer, last_epoch=-1):
|
|
super(ConstantLRSchedule, self).__init__(optimizer, lambda _: 1.0, last_epoch=last_epoch)
|
|
|
|
|
|
class WarmupConstantSchedule(LambdaLR):
|
|
""" Linear warmup and then constant.
|
|
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.
|
|
Keeps learning rate schedule equal to 1. after warmup_steps.
|
|
"""
|
|
def __init__(self, optimizer, warmup_steps, last_epoch=-1):
|
|
self.warmup_steps = warmup_steps
|
|
super(WarmupConstantSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
|
|
|
|
def lr_lambda(self, step):
|
|
if step < self.warmup_steps:
|
|
return float(step) / float(max(1.0, self.warmup_steps))
|
|
return 1.
|
|
|
|
|
|
class WarmupLinearSchedule(LambdaLR):
|
|
""" Linear warmup and then linear decay.
|
|
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
|
|
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.
|
|
"""
|
|
def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):
|
|
self.warmup_steps = warmup_steps
|
|
self.t_total = t_total
|
|
super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
|
|
|
|
def lr_lambda(self, step):
|
|
if step < self.warmup_steps:
|
|
return float(step) / float(max(1, self.warmup_steps))
|
|
return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))
|
|
|
|
|
|
class WarmupCosineSchedule(LambdaLR):
|
|
""" Linear warmup and then cosine decay.
|
|
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
|
|
Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps following a cosine curve.
|
|
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
|
|
"""
|
|
def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1):
|
|
self.warmup_steps = warmup_steps
|
|
self.t_total = t_total
|
|
self.cycles = cycles
|
|
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
|
|
|
|
def lr_lambda(self, step):
|
|
if step < self.warmup_steps:
|
|
return float(step) / float(max(1.0, self.warmup_steps))
|
|
# progress after warmup
|
|
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
|
|
return max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))
|
|
|
|
|
|
class WarmupCosineWithHardRestartsSchedule(LambdaLR):
|
|
""" Linear warmup and then cosine cycles with hard restarts.
|
|
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
|
|
If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying
|
|
learning rate (with hard restarts).
|
|
"""
|
|
def __init__(self, optimizer, warmup_steps, t_total, cycles=1., last_epoch=-1):
|
|
self.warmup_steps = warmup_steps
|
|
self.t_total = t_total
|
|
self.cycles = cycles
|
|
super(WarmupCosineWithHardRestartsSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
|
|
|
|
def lr_lambda(self, step):
|
|
if step < self.warmup_steps:
|
|
return float(step) / float(max(1, self.warmup_steps))
|
|
# progress after warmup
|
|
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
|
|
if progress >= 1.0:
|
|
return 0.0
|
|
return max(0.0, 0.5 * (1. + math.cos(math.pi * ((float(self.cycles) * progress) % 1.0))))
|
|
|
|
|
|
|
|
class AdamW(Optimizer):
|
|
""" Implements Adam algorithm with weight decay fix.
|
|
|
|
Parameters:
|
|
lr (float): learning rate. Default 1e-3.
|
|
betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)
|
|
eps (float): Adams epsilon. Default: 1e-6
|
|
weight_decay (float): Weight decay. Default: 0.0
|
|
correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.
|
|
"""
|
|
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):
|
|
if lr < 0.0:
|
|
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
|
|
if not 0.0 <= betas[0] < 1.0:
|
|
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
|
|
if not 0.0 <= betas[1] < 1.0:
|
|
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
|
|
if not 0.0 <= eps:
|
|
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
|
|
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
|
|
correct_bias=correct_bias)
|
|
super(AdamW, self).__init__(params, defaults)
|
|
|
|
def step(self, closure=None):
|
|
"""Performs a single optimization step.
|
|
|
|
Arguments:
|
|
closure (callable, optional): A closure that reevaluates the model
|
|
and returns the loss.
|
|
"""
|
|
loss = None
|
|
if closure is not None:
|
|
loss = closure()
|
|
|
|
for group in self.param_groups:
|
|
for p in group['params']:
|
|
if p.grad is None:
|
|
continue
|
|
grad = p.grad.data
|
|
if grad.is_sparse:
|
|
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
|
|
|
|
state = self.state[p]
|
|
|
|
# State initialization
|
|
if len(state) == 0:
|
|
state['step'] = 0
|
|
# Exponential moving average of gradient values
|
|
state['exp_avg'] = torch.zeros_like(p.data)
|
|
# Exponential moving average of squared gradient values
|
|
state['exp_avg_sq'] = torch.zeros_like(p.data)
|
|
|
|
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
|
|
beta1, beta2 = group['betas']
|
|
|
|
state['step'] += 1
|
|
|
|
# Decay the first and second moment running average coefficient
|
|
# In-place operations to update the averages at the same time
|
|
exp_avg.mul_(beta1).add_(1.0 - beta1, grad)
|
|
exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)
|
|
denom = exp_avg_sq.sqrt().add_(group['eps'])
|
|
|
|
step_size = group['lr']
|
|
if group['correct_bias']: # No bias correction for Bert
|
|
bias_correction1 = 1.0 - beta1 ** state['step']
|
|
bias_correction2 = 1.0 - beta2 ** state['step']
|
|
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
|
|
|
|
p.data.addcdiv_(-step_size, exp_avg, denom)
|
|
|
|
# Just adding the square of the weights to the loss function is *not*
|
|
# the correct way of using L2 regularization/weight decay with Adam,
|
|
# since that will interact with the m and v parameters in strange ways.
|
|
#
|
|
# Instead we want to decay the weights in a manner that doesn't interact
|
|
# with the m/v parameters. This is equivalent to adding the square
|
|
# of the weights to the loss with plain (non-momentum) SGD.
|
|
# Add weight decay at the end (fixed version)
|
|
if group['weight_decay'] > 0.0:
|
|
p.data.add_(-group['lr'] * group['weight_decay'], p.data)
|
|
|
|
return loss
|