* Add MADGRAD code * Fix Lamb (non-fused variant) to work w/ PyTorch XLA * Tweak optimizer factory args (lr/learning_rate and opt/optimizer_name), may break compat * Use newer fn signatures for all add,addcdiv, addcmul in optimizers * Use upcoming PyTorch native Nadam if it's available * Cleanup lookahead opt * Add optimizer tests * Remove novograd.py impl as it was messy, keep nvnovograd * Make AdamP/SGDP work in channels_last layout * Add rectified adablief mode (radabelief) * Support a few more PyTorch optim, adamax, adagradpull/813/head
parent
3cdaf5ed56
commit
ac469b50da
@ -0,0 +1,175 @@
|
||||
# Copyright (c) Facebook, Inc. and its affiliates.
|
||||
#
|
||||
# This source code is licensed under the MIT license found in the
|
||||
# LICENSE file in the root directory of this source tree.
|
||||
|
||||
import math
|
||||
from typing import TYPE_CHECKING, Any, Callable, Optional
|
||||
|
||||
import torch
|
||||
import torch.optim
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from torch.optim.optimizer import _params_t
|
||||
else:
|
||||
_params_t = Any
|
||||
|
||||
|
||||
class MADGRAD(torch.optim.Optimizer):
|
||||
"""
|
||||
MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic
|
||||
Optimization.
|
||||
|
||||
.. _MADGRAD: https://arxiv.org/abs/2101.11075
|
||||
|
||||
MADGRAD is a general purpose optimizer that can be used in place of SGD or
|
||||
Adam may converge faster and generalize better. Currently GPU-only.
|
||||
Typically, the same learning rate schedule that is used for SGD or Adam may
|
||||
be used. The overall learning rate is not comparable to either method and
|
||||
should be determined by a hyper-parameter sweep.
|
||||
|
||||
MADGRAD requires less weight decay than other methods, often as little as
|
||||
zero. Momentum values used for SGD or Adam's beta1 should work here also.
|
||||
|
||||
On sparse problems both weight_decay and momentum should be set to 0.
|
||||
|
||||
Arguments:
|
||||
params (iterable):
|
||||
Iterable of parameters to optimize or dicts defining parameter groups.
|
||||
lr (float):
|
||||
Learning rate (default: 1e-2).
|
||||
momentum (float):
|
||||
Momentum value in the range [0,1) (default: 0.9).
|
||||
weight_decay (float):
|
||||
Weight decay, i.e. a L2 penalty (default: 0).
|
||||
eps (float):
|
||||
Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, params: _params_t, lr: float = 1e-2, momentum: float = 0.9, weight_decay: float = 0, eps: float = 1e-6,
|
||||
):
|
||||
if momentum < 0 or momentum >= 1:
|
||||
raise ValueError(f"Momentum {momentum} must be in the range [0,1]")
|
||||
if lr <= 0:
|
||||
raise ValueError(f"Learning rate {lr} must be positive")
|
||||
if weight_decay < 0:
|
||||
raise ValueError(f"Weight decay {weight_decay} must be non-negative")
|
||||
if eps < 0:
|
||||
raise ValueError(f"Eps must be non-negative")
|
||||
|
||||
defaults = dict(lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay)
|
||||
super().__init__(params, defaults)
|
||||
|
||||
@property
|
||||
def supports_memory_efficient_fp16(self) -> bool:
|
||||
return False
|
||||
|
||||
@property
|
||||
def supports_flat_params(self) -> bool:
|
||||
return True
|
||||
|
||||
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
|
||||
"""Performs a single optimization step.
|
||||
|
||||
Arguments:
|
||||
closure (callable, optional): A closure that reevaluates the model
|
||||
and returns the loss.
|
||||
"""
|
||||
loss = None
|
||||
if closure is not None:
|
||||
loss = closure()
|
||||
|
||||
# step counter must be stored in state to ensure correct behavior under
|
||||
# optimizer sharding
|
||||
if 'k' not in self.state:
|
||||
self.state['k'] = torch.tensor([0], dtype=torch.long)
|
||||
k = self.state['k'].item()
|
||||
|
||||
for group in self.param_groups:
|
||||
eps = group["eps"]
|
||||
lr = group["lr"] + eps
|
||||
decay = group["weight_decay"]
|
||||
momentum = group["momentum"]
|
||||
|
||||
ck = 1 - momentum
|
||||
lamb = lr * math.pow(k + 1, 0.5)
|
||||
|
||||
for p in group["params"]:
|
||||
if p.grad is None:
|
||||
continue
|
||||
grad = p.grad.data
|
||||
state = self.state[p]
|
||||
|
||||
if "grad_sum_sq" not in state:
|
||||
state["grad_sum_sq"] = torch.zeros_like(p.data).detach()
|
||||
state["s"] = torch.zeros_like(p.data).detach()
|
||||
if momentum != 0:
|
||||
state["x0"] = torch.clone(p.data).detach()
|
||||
|
||||
if momentum != 0.0 and grad.is_sparse:
|
||||
raise RuntimeError("momentum != 0 is not compatible with sparse gradients")
|
||||
|
||||
grad_sum_sq = state["grad_sum_sq"]
|
||||
s = state["s"]
|
||||
|
||||
# Apply weight decay
|
||||
if decay != 0:
|
||||
if grad.is_sparse:
|
||||
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
|
||||
|
||||
grad.add_(p.data, alpha=decay)
|
||||
|
||||
if grad.is_sparse:
|
||||
grad = grad.coalesce()
|
||||
grad_val = grad._values()
|
||||
|
||||
p_masked = p.sparse_mask(grad)
|
||||
grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad)
|
||||
s_masked = s.sparse_mask(grad)
|
||||
|
||||
# Compute x_0 from other known quantities
|
||||
rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps)
|
||||
x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1)
|
||||
|
||||
# Dense + sparse op
|
||||
grad_sq = grad * grad
|
||||
grad_sum_sq.add_(grad_sq, alpha=lamb)
|
||||
grad_sum_sq_masked.add_(grad_sq, alpha=lamb)
|
||||
|
||||
rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps)
|
||||
|
||||
s.add_(grad, alpha=lamb)
|
||||
s_masked._values().add_(grad_val, alpha=lamb)
|
||||
|
||||
# update masked copy of p
|
||||
p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1)
|
||||
# Copy updated masked p to dense p using an add operation
|
||||
p_masked._values().add_(p_kp1_masked_vals, alpha=-1)
|
||||
p.data.add_(p_masked, alpha=-1)
|
||||
else:
|
||||
if momentum == 0:
|
||||
# Compute x_0 from other known quantities
|
||||
rms = grad_sum_sq.pow(1 / 3).add_(eps)
|
||||
x0 = p.data.addcdiv(s, rms, value=1)
|
||||
else:
|
||||
x0 = state["x0"]
|
||||
|
||||
# Accumulate second moments
|
||||
grad_sum_sq.addcmul_(grad, grad, value=lamb)
|
||||
rms = grad_sum_sq.pow(1 / 3).add_(eps)
|
||||
|
||||
# Update s
|
||||
s.data.add_(grad, alpha=lamb)
|
||||
|
||||
# Step
|
||||
if momentum == 0:
|
||||
p.data.copy_(x0.addcdiv(s, rms, value=-1))
|
||||
else:
|
||||
z = x0.addcdiv(s, rms, value=-1)
|
||||
|
||||
# p is a moving average of z
|
||||
p.data.mul_(1 - ck).add_(z, alpha=ck)
|
||||
|
||||
self.state['k'] += 1
|
||||
return loss
|
@ -1,77 +0,0 @@
|
||||
"""NovoGrad Optimizer.
|
||||
Original impl by Masashi Kimura (Convergence Lab): https://github.com/convergence-lab/novograd
|
||||
Paper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks`
|
||||
- https://arxiv.org/abs/1905.11286
|
||||
"""
|
||||
|
||||
import torch
|
||||
from torch.optim.optimizer import Optimizer
|
||||
import math
|
||||
|
||||
|
||||
class NovoGrad(Optimizer):
|
||||
def __init__(self, params, grad_averaging=False, lr=0.1, betas=(0.95, 0.98), eps=1e-8, weight_decay=0):
|
||||
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
|
||||
super(NovoGrad, self).__init__(params, defaults)
|
||||
self._lr = lr
|
||||
self._beta1 = betas[0]
|
||||
self._beta2 = betas[1]
|
||||
self._eps = eps
|
||||
self._wd = weight_decay
|
||||
self._grad_averaging = grad_averaging
|
||||
|
||||
self._momentum_initialized = False
|
||||
|
||||
def step(self, closure=None):
|
||||
loss = None
|
||||
if closure is not None:
|
||||
loss = closure()
|
||||
|
||||
if not self._momentum_initialized:
|
||||
for group in self.param_groups:
|
||||
for p in group['params']:
|
||||
if p.grad is None:
|
||||
continue
|
||||
state = self.state[p]
|
||||
grad = p.grad.data
|
||||
if grad.is_sparse:
|
||||
raise RuntimeError('NovoGrad does not support sparse gradients')
|
||||
|
||||
v = torch.norm(grad)**2
|
||||
m = grad/(torch.sqrt(v) + self._eps) + self._wd * p.data
|
||||
state['step'] = 0
|
||||
state['v'] = v
|
||||
state['m'] = m
|
||||
state['grad_ema'] = None
|
||||
self._momentum_initialized = True
|
||||
|
||||
for group in self.param_groups:
|
||||
for p in group['params']:
|
||||
if p.grad is None:
|
||||
continue
|
||||
state = self.state[p]
|
||||
state['step'] += 1
|
||||
|
||||
step, v, m = state['step'], state['v'], state['m']
|
||||
grad_ema = state['grad_ema']
|
||||
|
||||
grad = p.grad.data
|
||||
g2 = torch.norm(grad)**2
|
||||
grad_ema = g2 if grad_ema is None else grad_ema * \
|
||||
self._beta2 + g2 * (1. - self._beta2)
|
||||
grad *= 1.0 / (torch.sqrt(grad_ema) + self._eps)
|
||||
|
||||
if self._grad_averaging:
|
||||
grad *= (1. - self._beta1)
|
||||
|
||||
g2 = torch.norm(grad)**2
|
||||
v = self._beta2*v + (1. - self._beta2)*g2
|
||||
m = self._beta1*m + (grad / (torch.sqrt(v) + self._eps) + self._wd * p.data)
|
||||
bias_correction1 = 1 - self._beta1 ** step
|
||||
bias_correction2 = 1 - self._beta2 ** step
|
||||
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
|
||||
|
||||
state['v'], state['m'] = v, m
|
||||
state['grad_ema'] = grad_ema
|
||||
p.data.add_(-step_size, m)
|
||||
return loss
|
Loading…
Reference in new issue