2022-06-14 21:17:50 +08:00

1368 lines
58 KiB
Python

# Copyright 2022 Xiaomi Corp. (authors: Daniel Povey)
#
# See ../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Union, Tuple, List
from lhotse.utils import fix_random_seed
import torch
import random
from torch import Tensor
from torch.optim import Optimizer
class NeutralGradient(Optimizer):
"""
Implements 'Neutral Gradient' update (a play on "natural gradient"). The aim is to
multiply the gradient by a symmetric positive definite matrix that transforms
the covariance of gradients to be the same as the covariance of parameters.
(Then multiply by the learning rate). Since, at any given time, we only have
one parameter tensor, in a sense it doesn't make sense to speak of the covariance
matrix of parameters; but the idea is, we do this separately for every axis of
every tensor and view the indexes into all the other axes of that tensor, as
a collection of vectors that we can compute the covariance of. There may
not be enough such vectors to estimate a non-singular covariance matrix; in
any case, we smooth with a diagonal matrix. (If the dimension is too large,
we may just use the diagonal only).
Args:
params: The parameters or param_groups to optimize (like other Optimizer subclasses)
lr: The learning rate. We will typically use a learning rate schedule that starts
at 0.03 and decreases over time, i.e. much higher than other common
optimizers.
beta: Momentum constants for regular momentum, and stats of gradients.
scale_speed: This scales the learning rate for purposes of learning the overall scale
of each tensor
eps: An epsilon to prevent division by zero
param_eps: An epsilon on the rms value of the parameter, such that when the parameter
gets smaller than this we'll start using a fixed learning rate, not
decreasing with the parameter norm.
cond_eps: An epsilon that limits the condition number of gradient and parameter
covariance matrices
param_max: To prevent parameter tensors getting too large, we will clip elements to
-param_max..param_max.
max_size: We will only use the full-covariance (not-diagonal) update for
dimension with size <= max_size; for those larger, we will use a diagonal
formula
stats_period: Every `stats_period` steps, we will accumulate stats for one of the dimensions
of the tensor
estimate_period: Every `estimate_period` steps, we will re-estimate projections for
the dimensions we are not treating diagonally
"""
def __init__(
self,
params,
lr=1e-2,
betas=(0.9, 0.98, 0.99),
scale_speed=0.05,
eps=1e-8,
param_eps=1.0e-05,
cond_eps=1.0e-10,
param_max=10.0,
min_diag_smooth=0.2,
max_size=1023,
stats_period=1,
estimate_period=200,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0])
)
if not 0.0 < betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1])
)
if not 0.0 < betas[2] < 1.0:
raise ValueError(
"Invalid beta parameter at index 2: {}".format(betas[2])
)
if not 0 < scale_speed < 1.0:
raise ValueError("Invalid scale_speed value: {}".format(scale_speed))
if not 0.0 < param_eps < 0.1:
raise ValueError("Invalid param_eps value: {}".format(param_eps))
if not 0.1 < param_max <= 10000.0:
raise ValueError("Invalid param_max value: {}".format(param_max))
if not stats_period > 0:
raise ValueError("Invalid stats_period value: {}".format(stats_period))
if not estimate_period > 0:
raise ValueError("Invalid estimate_period value: {}".format(estimate_period))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
scale_speed=0.05,
param_eps=param_eps,
cond_eps=cond_eps,
min_diag_smooth=min_diag_smooth,
param_max=param_max,
max_size=max_size,
stats_period=stats_period,
estimate_period=estimate_period,
)
super(NeutralGradient, self).__init__(params, defaults)
def __setstate__(self, state):
super(NeutralGradient, self).__setstate__(state)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
beta1, beta2, beta3 = group["betas"]
lr = group["lr"]
scale_speed = group["scale_speed"]
param_eps = group["param_eps"]
eps = group["eps"]
cond_eps = group["cond_eps"]
min_diag_smooth = group["min_diag_smooth"]
param_max = group["param_max"]
max_size = group["max_size"]
stats_period = group["stats_period"]
estimate_period = group["estimate_period"]
for p in group["params"]:
if p.grad is None:
continue
# Perform optimization step
grad = p.grad
if grad.is_sparse:
raise RuntimeError(
"Cain does not support sparse gradients"
)
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Parameter step (used to implement momentum)
state["delta"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
# Exponential moving average of squared gradient value
state["exp_avg_sq"] = torch.zeros_like(p)
kwargs = {'device':p.device, 'dtype':p.dtype}
if p.numel() > 1:
is_one_axis = (p.numel() in list(p.shape)) # e.g. a bias
if not is_one_axis:
state["ref_exp_avg_sq"] = torch.ones_like(p)
else:
state["param_rms"] = torch.zeros((), **kwargs)
# we learn the scale on the parameters in a way that
# also emulates Eve. scale_exp_avg_sq is the
# moving-average square of the gradient w.r.t. this
# scalar scale.
state["scale_exp_avg_sq"] = torch.zeros((), **kwargs)
for dim in range(p.ndim):
size = p.shape[dim]
if size == 1 or size == p.numel():
continue
## TODO: if size == p.numel(), it is the "simple" update
elif size > max_size:
# diagonal only...
state[f"grad_cov_{dim}"] = torch.zeros(size, **kwargs)
state[f"proj_{dim}"] = torch.ones(size, **kwargs)
else:
state[f"grad_cov_{dim}"] = torch.zeros(size, size,
**kwargs)
state[f"proj_{dim}"] = torch.eye(size, **kwargs)
delta = state["delta"]
step = state["step"]
delta.mul_(beta1)
if p.numel() > 1:
is_one_axis = (p.numel() in list(p.shape)) # e.g. a bias
if True:
# This block learns the scale on the parameters.
# scale_deriv is the derivative w.r.t. a log-space scale
# on the parameters, i.e. if we imagine: p =
# underlying_param * scale.exp(), scale_deriv is the
# derivative w.r.t. scale (it does not matter
# what value `scale` currently has).
scale_deriv = (p * grad).sum()
scale_exp_avg_sq = state["scale_exp_avg_sq"]
scale_exp_avg_sq.mul_(beta2).addcmul_(scale_deriv, scale_deriv,
value=1 - beta2)
# should actually be step + 1, so on 1st minibatch we are not learning
# anything here. May fix this at some point.
scale_bias_correction2 = 1 - beta2 ** (step + 1)
scale_denom = (scale_exp_avg_sq.sqrt()).add_(eps)
scale_alpha = -lr * (scale_bias_correction2 ** 0.5) * scale_speed * (1-beta1)
# scale_delta is going to be the change in log-scale (before momentum), i.e. if
# p = underlying_param * scale.exp(),
# delta is the change in `scale`.
scale_delta = scale_alpha * (scale_deriv / scale_denom)
delta.add_(p, alpha=scale_delta)
exp_avg_sq = state["exp_avg_sq"]
# Decay the second moment running average coefficient
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
bias_correction2 = 1 - beta2 ** (step + 1)
if is_one_axis:
param_rms = state["param_rms"]
if step % 10 == 0:
param_rms.copy_((p**2).mean().sqrt())
grad_rms = (exp_avg_sq.sqrt()).add_(eps)
this_delta = grad / grad_rms
alpha = (-(1-beta1) * lr * bias_correction2) * param_rms.clamp(min=param_eps)
delta.add_(this_delta, alpha=alpha)
else:
# The full update.
# First, if needed, accumulate per-dimension stats from the grad
if step % stats_period == 0:
self._accumulate_per_dim_stats(grad, state, beta3, eps)
if step % estimate_period == 0 or step in [50, 200, 400]:
self._estimate(p, state, beta3, max_size,
stats_period, estimate_period,
eps, param_eps,
cond_eps, min_diag_smooth)
ref_exp_avg_sq = state["ref_exp_avg_sq"] # computed in self._estimate()
# do ** 0.25, not ** 0.5, because we divide this into two factors:
# one to be applied before the grad preconditioning, and one after.
# This keeps the overall transformation on the gradient symmetric
# (if viewed as a matrix of shape (p.numel(), p.numel())
# Note: ref_exp_avg_sq had eps*eps added to it when we created it.
grad_scale = (ref_exp_avg_sq / (exp_avg_sq/bias_correction2 + eps*eps)) ** 0.25
if random.random() < 0.02:
print(f"grad_scale mean = {grad_scale.mean()}, shape = {p.shape}")
cur_grad = grad * grad_scale
cur_grad = self._precondition_grad(cur_grad, state)
cur_grad *= grad_scale
if True: # testing
# in principle, the cur_grad is supposed to have the same rms as params, on average.
cur_grad_rms = (cur_grad**2).mean().sqrt()
param_rms = (p**2).mean().sqrt()
#print(f"cur_grad_rms={cur_grad_rms}, param_rms={param_rms}")
if random.random() < 0.1:
prod = (grad*cur_grad).mean()
cos_angle = prod / ((grad**2).mean() * (cur_grad**2).mean()).sqrt()
if random.random() < 0.01 or cos_angle < 0.01:
print(f"cos_angle = {cos_angle}, shape={grad.shape}")
alpha = -lr * (1-beta1)
delta.add_(cur_grad, alpha=alpha)
else:
# p.numel() == 1
# Decay the second moment running average coefficient
exp_avg_sq = state["exp_avg_sq"]
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
bias_correction2 = 1 - beta2 ** (step + 1)
denom = (exp_avg_sq.sqrt()).add_(eps)
this_delta = grad / denom
alpha = -lr*(1-beta1)*(bias_correction2 ** 0.5)
delta.add_(this_delta, alpha=alpha)
if random.random() < 0.0005:
print(f"Delta rms = {(delta**2).mean().item()}, shape = {delta.shape}")
max_abs = delta.abs().max()
if max_abs > 1.0:
print("Max_abs_change = ", max_abs)
p.add_(delta)
if step % 10 == 0:
p.clamp_(min=-param_max, max=param_max)
state["step"] += 1
return loss
def _accumulate_per_dim_stats(self,
grad: Tensor,
state: dict,
beta3: float,
eps: float) -> None:
"""
Accumulate some stats for each dimension of the tensor, about the covariance of gradients.
"""
ndim = grad.ndim
for dim in range(ndim):
size = grad.shape[dim]
if size == 1:
continue
grad_cov = state[f"grad_cov_{dim}"]
if grad_cov.ndim == 1:
# We are treating this dimension diagonally because it is too big.
other_dims = [ i for i in range(ndim) if i != dim]
grad_cov.mul_(beta3).add_((grad**2).mean(dim=other_dims))
else:
# full-covariance stats.
this_beta3 = self._get_this_beta3(beta3, grad.numel(), size)
grad_cov.mul_(this_beta3).add_(self._get_cov(grad, dim),
alpha=(1.0-this_beta3))
def _estimate(self,
p: Tensor,
state: dict,
beta3: float,
max_size: int,
stats_period: int,
estimate_period: int,
eps: float,
param_eps: float,
cond_eps: float,
min_diag_smooth: float
) -> Tensor:
"""
Estimate the per-dimension projections proj_0, proj_1 and so on, and
ref_exp_avg_sq.
"""
kwargs = {'device':p.device, 'dtype':p.dtype}
ref_exp_avg_sq = torch.ones([1] * p.ndim, **kwargs)
ndim = p.ndim
step = state["step"]
assert step % stats_period == 0
norm_step = step // stats_period
def update_ref_exp_avg_sq(ref_exp_avg_sq, grad_cov_smoothed, dim):
"""
Updates ref_exp_avg_sq, taking into account `grad_cov_smoothed` which is
a tensor of shape (p.shape[dim],) containing the smoothed diagonal covariance
of grads for dimension `dim`.
Returns new ref_exp_avg_sq
"""
if dim > 0:
# We only want to "count" the overall scale of the gradients' variance
# (i.e. mean variance) once, so for all dims other than 0 we normalize
# this out.
grad_cov_smoothed = grad_cov_smoothed / grad_cov_smoothed.mean()
for _ in range(dim + 1, ndim):
grad_cov_smoothed = grad_cov_smoothed.unsqueeze(-1)
return ref_exp_avg_sq * grad_cov_smoothed
for dim in range(ndim):
size = p.shape[dim]
if size == 1:
continue
grad_cov = state[f"grad_cov_{dim}"]
proj = state[f"proj_{dim}"]
if grad_cov.ndim == 1:
# This dim is treated diagonally, because it is too large.
other_dims = [ i for i in range(ndim) if i != dim]
bias_correction3 = 1 - beta3 ** (norm_step + 1)
# _smoothed means we have the eps terms.
param_var_smoothed = (p**2).mean(dim=other_dims) + param_eps*param_eps
if bias_correction3 < 0.9999:
grad_cov = grad_cov / bias_correction3
grad_var_smoothed = grad_cov + eps*eps
ref_exp_avg_sq = update_ref_exp_avg_sq(ref_exp_avg_sq,
grad_var_smoothed,
dim)
if dim != 0:
# If dim != 0, make the grad_var_smoothed and
# param_var_smoothed have the same mean, because when
# preconditioning gradients, we only want to count the
# overall change in scale once.
grad_var_smoothed *= (param_var_smoothed.sum() / grad_var_smoothed.sum())
proj[:] = (param_var_smoothed / grad_var_smoothed).sqrt()
else:
param_cov_smoothed = self._estimate_and_smooth_param_cov(p, dim,
param_eps,
cond_eps=cond_eps,
min_diag_smooth=min_diag_smooth)
grad_cov_smoothed = self._smooth_grad_cov(p, grad_cov,
eps, norm_step,
beta3,
cond_eps=cond_eps,
min_diag_smooth=min_diag_smooth)
ref_exp_avg_sq = update_ref_exp_avg_sq(ref_exp_avg_sq,
grad_cov_smoothed.diag(),
dim)
if dim != 0:
# If dim != 0, make the traces of grad_cov_smoothed and
# param_cov_smoothed be the same, because when
# preconditioning gradients, we only want to count the
# overall change in scale once.
grad_cov_smoothed *= (param_cov_smoothed.diag().sum() /
grad_cov_smoothed.diag().sum())
proj[:] = self._estimate_proj(grad_cov_smoothed,
param_cov_smoothed)
state["ref_exp_avg_sq"][:] = ref_exp_avg_sq
def _get_this_beta3(self, beta3: float, numel: int, size: int):
"""
Get a discounting value beta3 (e.g. 0.99) that is used in computing moving
averages of stats. The idea is that the stats are accumulated from a tensor
x that has `numel` elements and `size == x.shape[dim]`, and we want the covariance
of shape (dim, dim). We may need to make beta3 closer to 1 if this means
we'll be getting only very rank-deficient stats each time.
"""
rank_per_iter = numel // size # maximum rank of each iteration's covaraince
safety_factor = 4.0 # should be > 1.0
grad_num_steps_needed = safety_factor * size / rank_per_iter
return max(beta3, 1 - 1. / grad_num_steps_needed)
def _precondition_grad(self,
grad: Tensor,
state: dict) -> Tensor:
"""
Multiply the grad by a positive-semidefinite matrix for each dimension, to
try to make its covariance the same as that of the parameters (up to a
scalar constant).
We know at this point that p has more than one non-trivial dimension/axis
(i.e. >1 dimension with size != 1).
Args:
p: the Parameter that the grad is for; may be needed if we re-estimating
the learning-rate matrix
grad: the gradient to precondition (will not be modified by this function)
state: the state-dict where will look up the projections (we may update
the stats here, and re-estimate the projections)
"""
cur_grad = grad
ndim = grad.ndim
step = state["step"]
for dim in range(ndim):
size = grad.shape[dim]
if size == 1:
continue
proj = state[f"proj_{dim}"]
if proj.ndim == 1:
# We are treating this dimension diagonally because it is too big.
proj = proj.reshape(size, *([1] * (ndim-1-dim)))
cur_grad = cur_grad * proj
else:
cur_grad = self._multiply_on_dim(cur_grad, proj, dim)
return cur_grad
def _estimate_and_smooth_param_cov(self, p: Tensor, dim: int,
param_eps: float,
cond_eps: float = 1.0e-10,
min_diag_smooth: float = 0.2) -> Tensor:
"""
Compute a smoothed version of a covariance matrix for one dimension of
a parameter tensor, estimated by treating the other
dimensions as a batch index.
p: The parameter tensor from which we are estimating the covariance
dim: The dimenion that we want the covariances for.
param_eps: A small epsilon value that represents the minimum root-mean-square
parameter value that we'll estimate.
cond_eps: An epsilon value that limits the condition number of the resulting
matrix. Caution: this applies to the variance, not the rms value,
so should be quite small.
Returns:
A non-singular covariance matrix of shape (p.shape[dim], p.shape[dim])
"""
p = p.transpose(dim, -1)
p = p.reshape(-1, p.shape[-1])
(num_outer_products, size) = p.shape
param_cov = torch.matmul(p.t(), p) / p.shape[0]
# later we may be able to find a more principled formula for this.
diag_smooth = max(min_diag_smooth, size / (size + num_outer_products))
diag = param_cov.diag()
extra_diag = (diag * diag_smooth) + (diag.max() * cond_eps +
param_eps * param_eps)
param_cov.mul_(1-diag_smooth).add_(extra_diag.diag())
return param_cov
def _smooth_grad_cov(self,
p: Tensor,
grad_cov: Tensor,
eps: float,
norm_step: int,
beta3: float,
cond_eps: float = 1.0e-10,
min_diag_smooth: float = 0.2) -> Tensor:
"""
Compute a smoothed version of a covariance matrix for one dimension of
a gradient tensor. The covariance stats are supplied; this function
is responsible for making sure it is nonsingular and smoothed with its
diagonal in a reasonably smart way.
p: The parameter tensor from which we are estimating the covariance
of the gradient (over one of its dimensions); only needed for the shape
grad_cov: The moving-average covariance statistics, to be smoothed
eps: An epsilon value that represents a root-mean-square gradient
magnitude, used to avoid zero values
norm_step: The step count that reflects how many times we updated
grad_cov, we assume we have updated it (norm_step + 1) times; this
is just step divided by stats_perio.
beta3: The user-supplied beta value for decaying the gradient covariance
stats
cond_eps: An epsilon value that limits the condition number of the resulting
matrix. Caution: this applies to the variance, not the rms value,
so should be quite small.
min_diag_smooth: A minimum proportion by which we smooth the covariance
with the diagonal.
Returns:
A non-singular covariance matrix of gradients, of shape (p.shape[dim], p.shape[dim])
"""
size = grad_cov.shape[0]
this_beta3 = self._get_this_beta3(beta3, p.numel(), size)
bias_correction3 = 1 - this_beta3 ** (norm_step + 1)
if bias_correction3 < 0.9999:
grad_cov = grad_cov / bias_correction3
rank_per_iter = p.numel() // size # maximum rank of each iteration's covaraince
# the second part of the following formula roughly represents the number of
# frames that have a "large" weight in the stats.
num_iters_in_stats = min(norm_step + 1, 1.0 / (1 - beta3))
# grad_cov_rank is generally suposed to represent the number of
# individual outer products that are represented with significant weight
# in grad_cov
num_outer_products = rank_per_iter * num_iters_in_stats
diag_smooth = max(min_diag_smooth,
size / (size + num_outer_products))
diag = grad_cov.diag()
extra_diag = (diag * diag_smooth) + (diag.max() * cond_eps +
eps * eps)
grad_cov.mul_(1-diag_smooth).add_(extra_diag.diag())
return grad_cov
def _get_cov(self, x: Tensor, dim: int) -> Tensor:
"""
Computes the covariance of x over dimension `dim`, returning something
of shape (x.shape[dim], x.shape[dim])
"""
x = x.transpose(dim, -1)
x = x.reshape(-1, x.shape[-1])
return torch.matmul(x.t(), x) / x.shape[0]
def _multiply_on_dim(self, x: Tensor, proj: Tensor, dim: int) -> Tensor:
"""
Matrix-multiplies x by `proj` on x's dimension numbered `dim`
Args:
x: Tensor to multiply, of arbitrary shape
proj: Symmetric matrix to multiply x by, of shape (x.shape[dim], x.shape[dim])
"""
return torch.matmul(x.transpose(-1, dim),
proj).transpose(-1, dim)
def _estimate_proj(self, grad_cov: Tensor, param_cov: Tensor) -> Tensor:
"""
Return a symmetric positive definite matrix P such that
(P grad_cov P^T == param_cov),
i.e. a descent direction that makes the covariance of steps look
like the covariance of parameter. This will be normalizes so
that the mean of its diagonal is 1.0 (since we'll have a separate
such projection per dim of the tensor, and we don't want to count
the scalar factor many times).
Args:
grad_cov: Covariance of gradients, a symmetric-positive-definite
matrix [except for roundoff!] of shape (size, size)
param_cov: Covariance of parameters, a symmetric-positive-definite
matrix [except for roundoff!] of shape (size, size)
Returns:
A matrix P such that (alpha P grad_cov P^T == param_cov)
"""
# symmetrize grad_cov and param_cov. The projection P only cares about the
# relative sizes, so doubling both of them does not matter.
G = grad_cov + grad_cov.t()
C = param_cov + param_cov.t()
U, S, V = C.svd()
# Using G == grad_cov and C == param_cov, and not writing transpose because
# all these quantities are symmetric, we can use:
#
# P = C^{0.5} (C^{0.5} G C^{0.5})^{-0.5} C^{0.5}
#
# You can verify fairly easily that P G P == C: multiply on left and right
# by C^{-0.5} on each side and you can see that both sides equal I.
#
# We can reduce the amount of roundoff by, after decomposing C
# with SVD as (U S U^T), writing C^{0.5} = U Q = Q^T U^T, with Q = S^0.5 U^T.
#
# Then we can write P as follows:
# P = Q^T U^T (U Q G Q^T U^T)^{-0.5} U Q,
# and we easily show that for orthogonal U, (U X U^T)^{-0.5} = U X^{-0.5} U^T, so we
# can do this and cancel U^T U = U U^T = I, giving:
# P = Q^T (Q G Q^T)^{-0.5} Q.
# because C is symmetric, C == U S U^T, we can ignore V.
Q = (U * S.sqrt()).t()
X = torch.matmul(Q, torch.matmul(G, Q.t()))
U2, S2, _ = X.svd()
# Because X^{-0.5} = U2 S2^{-0.5} U2^T, we have
# P = Q^T U2 S2^{-0.5} U2^T Q
# = Y Y^T, where
# Y = Q^T U2 S2^{-0.25}
S2_pow = (S2 + 1.0e-35) ** -0.25
Y = torch.matmul(Q.t(), U2) * S2_pow
P = torch.matmul(Y, Y.t())
if random.random() < 0.1:
# TODO: remove this testing code.
assert (P - P.t()).abs().mean() < 0.01 # make sure symmetric.
# testing... note, this is only true modulo "eps"
C_check = torch.matmul(torch.matmul(P, G), P)
# C_check should equal C
C_diff = C_check - C
# Roundoff can cause significant differences, so use a fairly large
# threshold of 0.001. We may increase this later or even remove the check.
assert C_diff.abs().mean() < 0.01 * C.diag().mean()
return P
class Cain(Optimizer):
"""Implements Cain algorithm, which is a substantially modified form of the
Adam optimizer.
The modifications can be summarized as follows:
(i) change Adam to what we called "Eve", which adds an extra configuration
value, "target_rms" (default value: 0.1); and for non-scalar parameters,
if their root-mean-square value exceeds "target_rms", we apply weight
decay (aggressive enough weight decay that the rms stays at target_rms).
This weight decay is applied in the same way as in AdamW, i.e. by
parameter shrinkage rather than by modifying the gradients.
(ii) By limiting the rms to 0.1, we reduce our modeling power; we restore
it by replacing all weights and biases-- in fact, all model parameters--
with expressions like
weight * weight_scale.exp() or bias * bias_scale.exp().
This has the benefit of making the learnable offsets and scales of
BatchNorm/LayerNorm unnecessary.
Below we describe the changes from Eve to Cain:
(iii) We removed all the weight_scale and bias_scale parameters from
the model and "moved them into the optimizer". Now we allow
parameters to have any rms value (that's <= 10), but we:
(a) make the update speed for a parameter proportional to
its rms value.
(b) Multiply the learning rate by 10 because we are getting rid of
target_rms=0.1
(c) simulate the effect of learning the weight_scale or bias_scale
in log space using standard Adam, with a scale to prevent it
from learning too fast. (see scale_exp_avg_sq in the
code below).
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
Eve is unpublished so far.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
scale_speed (float, optional): we multiply the learning rate by this value
when learning (in log space) the scales of the parameter tensors
rms_eps (float, optional): epsilon value such that when parameter
rms value goes below this, we stop learning slower for that parameter,
i.e. we treat the rms as if it were rms_eps. In practice, rms values
will not go much below this.
param_max (float, optional): maximum absolute value for any parameter;
we will clamp parameters to satisfy -param_max <= param <= param_max
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params,
lr=1e-2,
betas=(0.9, 0.98),
eps=1e-8,
scale_speed=0.05,
rms_eps=1.0e-05,
param_max=20.0,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0])
)
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1])
)
if not 0 < scale_speed < 1.0:
raise ValueError("Invalid scale_speed value: {}".format(scale_speed))
if not 0.1 < param_max <= 10000.0:
raise ValueError("Invalid param_max value: {}".format(param_max))
if not 0.0 < rms_eps < 0.1:
raise ValueError("Invalid rms_eps value: {}".format(rms_eps))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
scale_speed=0.05,
rms_eps=rms_eps,
param_max=param_max,
)
super(Cain, self).__init__(params, defaults)
def __setstate__(self, state):
super(Cain, self).__setstate__(state)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
beta1, beta2 = group["betas"]
lr = group["lr"]
scale_speed = group["scale_speed"]
rms_eps = group["rms_eps"]
eps = group["eps"]
param_max = group["param_max"]
for p in group["params"]:
if p.grad is None:
continue
# Perform optimization step
grad = p.grad
if grad.is_sparse:
raise RuntimeError(
"Cain does not support sparse gradients"
)
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["delta"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
if p.numel() > 1:
# we keep track of the RMS value of the parameters to
# help set the learning rate... this emulates Eve.
state["param_rms"] = torch.ones_like(p)
# we learn the scale on the parameters in a way that
# also emulates Eve. scale_exp_avg_sq is the
# moving-average square of the gradient w.r.t. this
# scalar scale.
state["scale_exp_avg_sq"] = torch.zeros((), device=p.device,
dtype=p.dtype)
delta, exp_avg_sq = state["delta"], state["exp_avg_sq"]
step = state["step"]
delta.mul_(beta1)
if p.numel() > 1:
if True:
# This block learns the scale on the parameters.
# scale_deriv is the derivative w.r.t. a log-space scale
# on the parameters, i.e. if we imagine: p =
# underlying_param * scale.exp(), scale_deriv is the
# derivative w.r.t. scale (it does not matter
# what value `scale` currently has).
scale_deriv = (p * grad).sum()
scale_exp_avg_sq = state["scale_exp_avg_sq"]
scale_exp_avg_sq.mul_(beta2).addcmul_(scale_deriv, scale_deriv,
value=1 - beta2)
# should actually be step + 1, so on 1st minibatch we are not learning
# anything here. May fix this at some point.
scale_bias_correction2 = 1 - beta2 ** (step + 1)
scale_denom = (scale_exp_avg_sq.sqrt()).add_(eps)
scale_alpha = -lr * (scale_bias_correction2 ** 0.5) * scale_speed * (1-beta1)
# scale_delta is going to be the change in log-scale (before momentum), i.e. if
# p = underlying_param * scale.exp(),
# delta is the change in `scale`.
scale_delta = scale_alpha * (scale_deriv / scale_denom)
delta.add_(p, alpha=scale_delta)
# Decay the second moment running average coefficient
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
bias_correction2 = 1 - beta2 ** step
grad_rms = (exp_avg_sq.sqrt()).add_(eps)
this_delta = grad / grad_rms
param_rms = state["param_rms"]
self._update_param_rms(p, param_rms, step, rms_eps)
alpha = (-(1-beta1) * lr * bias_correction2)
# geometrically interpolate the per-element param_rms with its mean
#param_rms_half = (param_rms * param_rms.mean()) ** 0.5
delta.add_(this_delta * param_rms, alpha=alpha)
# below, will do: delta.add_(this_delta, alpha=alpha)
else:
# p.numel() == 1
# Decay the second moment running average coefficient
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
bias_correction2 = 1 - beta2 ** step
denom = (exp_avg_sq.sqrt()).add_(eps)
this_delta = grad / denom
alpha = -lr*(1-beta1)*(bias_correction2 ** 0.5)
delta.add_(this_delta, alpha=alpha)
if random.random() < 0.0005:
print(f"Delta rms = {(delta**2).mean().item()}, shape = {delta.shape}")
p.add_(delta)
if step % 10 == 0:
p.clamp_(min=-param_max, max=param_max)
state["step"] += 1
return loss
def _update_param_rms(self, p: Tensor, param_rms: Tensor,
step: int, rms_eps: float):
"""
Updates `param_rms`. Require p.numel() > 1
Args:
p: parameter whose magnitude/rms we are measuring
param_rms: a Tensor containing non-negative values, with the same
shape as p. Contains an estimate of the root-mean-square
value of the parameter. This has a special structure where
it must be a product of one-dimensional quantities,
one for each axis of p.
We exclude certain axes to avoid estimating the rms value
from a too-small number of parameters.
step: the step of the algorithm
rms_eps: epsilon such that we'll aim to prevent param_rms from
going much lower than this.
"""
if step % 1000 == 0:
# Periodically refresh param_rms to keep the invariance that it is a
# product over its axes (numerical roundoff might destroy this)
param_rms.fill_(1.0)
for dim in range(p.ndim):
self._update_param_rms_for_dim(p, param_rms, dim, rms_eps)
else:
dim = step % p.ndim
self._update_param_rms_for_dim(p, param_rms, dim, rms_eps)
def _update_param_rms_for_dim(self, p: Tensor,
param_rms: Tensor,
dim: int,
rms_eps: float):
"""
Updates `param_rms` on one of its axes/dimensions.
Args:
p: parameter whose magnitude/rms we are measuring
param_rms: a Tensor containing non-negative values, with the same
shape as p. Contains an estimate of the root-mean-square
value of the parameter. This has a special structure where
it must be a product of one-dimensional quantities,
one for each axis of p.
We exclude certain axes to avoid estimating the rms value
from a too-small number of parameters.
dim: with 0 <= dim < p.ndim, the
dimension/axis on which to update the scale factor
rms_eps: epsilon such that we'll aim to prevent param_rms from
going much lower than this.
"""
# var_factor is the square of the factor to change param_rms by. p will
# not be super large, we limit it to -param_max <= p <= param_max.
# Clamping p**2 to min=1.0e-10 should prevent param_rms from getting much
# smaller than 1.0e-05, which should prevent division by zero here.
var_factor = (p ** 2).clamp(min=rms_eps*rms_eps) / (param_rms ** 2)
if p.numel() <= 2 * p.shape[dim]:
var_factor = var_factor.mean()
else:
dims = tuple(d for d in range(p.ndim) if d != dim)
var_factor = var_factor.mean(dim=dims, keepdim=True)
#if random.random() < 0.01:
# print(f"shape={p.shape}, var_factor={var_factor}")
param_rms.mul_(var_factor.sqrt())
class LRScheduler(object):
"""
Base-class for learning rate schedulers where the learning-rate depends on both the
batch and the epoch.
"""
def __init__(self, optimizer: Optimizer, verbose: bool = False):
# Attach optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError(
"{} is not an Optimizer".format(type(optimizer).__name__)
)
self.optimizer = optimizer
self.verbose = verbose
for group in optimizer.param_groups:
group.setdefault("initial_lr", group["lr"])
self.base_lrs = [
group["initial_lr"] for group in optimizer.param_groups
]
self.epoch = 0
self.batch = 0
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {
"base_lrs": self.base_lrs,
"epoch": self.epoch,
"batch": self.batch,
}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Args:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_last_lr(self) -> List[float]:
"""Return last computed learning rate by current scheduler. Will be a list of float."""
return self._last_lr
def get_lr(self):
# Compute list of learning rates from self.epoch and self.batch and
# self.base_lrs; this must be overloaded by the user.
# e.g. return [some_formula(self.batch, self.epoch, base_lr) for base_lr in self.base_lrs ]
raise NotImplementedError
def step_batch(self, batch: Optional[int] = None) -> None:
# Step the batch index, or just set it. If `batch` is specified, it
# must be the batch index from the start of training, i.e. summed over
# all epochs.
# You can call this in any order; if you don't provide 'batch', it should
# of course be called once per batch.
if batch is not None:
self.batch = batch
else:
self.batch = self.batch + 1
self._set_lrs()
def step_epoch(self, epoch: Optional[int] = None):
# Step the epoch index, or just set it. If you provide the 'epoch' arg,
# you should call this at the start of the epoch; if you don't provide the 'epoch'
# arg, you should call it at the end of the epoch.
if epoch is not None:
self.epoch = epoch
else:
self.epoch = self.epoch + 1
self._set_lrs()
def _set_lrs(self):
values = self.get_lr()
assert len(values) == len(self.optimizer.param_groups)
for i, data in enumerate(zip(self.optimizer.param_groups, values)):
param_group, lr = data
param_group["lr"] = lr
self.print_lr(self.verbose, i, lr)
self._last_lr = [group["lr"] for group in self.optimizer.param_groups]
def print_lr(self, is_verbose, group, lr):
"""Display the current learning rate."""
if is_verbose:
print(
f"Epoch={self.epoch}, batch={self.batch}: adjusting learning rate"
f" of group {group} to {lr:.4e}."
)
class Eve(Optimizer):
"""
Implements Eve algorithm. This is a modified version of AdamW with a special
way of setting the weight-decay / shrinkage-factor, which is designed to make the
rms of the parameters approach a particular target_rms (default: 0.1). This is
for use with networks with 'scaled' versions of modules (see scaling.py), which
will be close to invariant to the absolute scale on the parameter matrix.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
Eve is unpublished so far.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 3e-4;
this value means that the weight would decay significantly after
about 3k minibatches. Is not multiplied by learning rate, but
is conditional on RMS-value of parameter being > target_rms.
target_rms (float, optional): target root-mean-square value of
parameters, if they fall below this we will stop applying weight decay.
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.98),
eps=1e-8,
weight_decay=1e-3,
target_rms=0.1,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0])
)
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1])
)
if not 0 <= weight_decay <= 0.1:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay)
)
if not 0 < target_rms <= 10.0:
raise ValueError("Invalid target_rms value: {}".format(target_rms))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
target_rms=target_rms,
)
super(Eve, self).__init__(params, defaults)
def __setstate__(self, state):
super(Eve, self).__setstate__(state)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
# Perform optimization step
grad = p.grad
if grad.is_sparse:
raise RuntimeError(
"AdamW does not support sparse gradients"
)
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
denom = (exp_avg_sq.sqrt() * (bias_correction2 ** -0.5)).add_(
group["eps"]
)
step_size = group["lr"] / bias_correction1
target_rms = group["target_rms"]
weight_decay = group["weight_decay"]
if p.numel() > 1:
# avoid applying this weight-decay on "scaling factors"
# (which are scalar).
is_above_target_rms = p.norm() > (
target_rms * (p.numel() ** 0.5)
)
p.mul_(1 - (weight_decay * is_above_target_rms))
p.addcdiv_(exp_avg, denom, value=-step_size)
if random.random() < 0.0005:
step = (exp_avg/denom) * step_size
print(f"Delta rms = {(step**2).mean().item()}, shape = {step.shape}")
return loss
class Eden(LRScheduler):
"""
Eden scheduler.
lr = initial_lr * (((batch**2 + lr_batches**2) / lr_batches**2) ** -0.25 *
(((epoch**2 + lr_epochs**2) / lr_epochs**2) ** -0.25))
E.g. suggest initial-lr = 0.003 (passed to optimizer).
Args:
optimizer: the optimizer to change the learning rates on
lr_batches: the number of batches after which we start significantly
decreasing the learning rate, suggest 5000.
lr_epochs: the number of epochs after which we start significantly
decreasing the learning rate, suggest 6 if you plan to do e.g.
20 to 40 epochs, but may need smaller number if dataset is huge
and you will do few epochs.
"""
def __init__(
self,
optimizer: Optimizer,
lr_batches: Union[int, float],
lr_epochs: Union[int, float],
verbose: bool = False,
):
super(Eden, self).__init__(optimizer, verbose)
self.lr_batches = lr_batches
self.lr_epochs = lr_epochs
def get_lr(self):
factor = (
(self.batch ** 2 + self.lr_batches ** 2) / self.lr_batches ** 2
) ** -0.25 * (
((self.epoch ** 2 + self.lr_epochs ** 2) / self.lr_epochs ** 2)
** -0.25
)
return [x * factor for x in self.base_lrs]
def _test_eden():
m = torch.nn.Linear(100, 100)
optim = Cain(m.parameters(), lr=0.003)
scheduler = Eden(optim, lr_batches=100, lr_epochs=2, verbose=True)
for epoch in range(10):
scheduler.step_epoch(epoch) # sets epoch to `epoch`
for step in range(20):
x = torch.randn(200, 100).detach()
x.requires_grad = True
y = m(x)
dy = torch.randn(200, 100).detach()
f = (y * dy).sum()
f.backward()
optim.step()
scheduler.step_batch()
optim.zero_grad()
print("last lr = ", scheduler.get_last_lr())
print("state dict = ", scheduler.state_dict())
def _test_eve_cain():
import timeit
from scaling import ScaledLinear
E = 100
B = 4
T = 2
print("in test_eve_cain")
device = torch.device('cuda')
dtype = torch.float32
fix_random_seed(42)
# these input_magnitudes and output_magnitudes are to test that
# Abel is working as we expect and is able to adjust scales of
# different dims differently.
input_magnitudes = (1.0 * torch.randn(E, dtype=dtype, device=device)).exp()
output_magnitudes = (1.0 * torch.randn(E, dtype=dtype, device=device)).exp()
for iter in [3, 2, 1, 0]:
fix_random_seed(42)
Linear = torch.nn.Linear if iter == 0 else ScaledLinear
m = torch.nn.Sequential(Linear(E, 200),
torch.nn.ReLU(),
Linear(200, E)).to(device)
train_pairs = [ (100.0 * torch.randn(B, T, E, device=device, dtype=dtype) * input_magnitudes,
torch.randn(B, T, E, device=device, dtype=dtype) * output_magnitudes) for _ in range(20) ]
if iter == 0: optim = Eve(m.parameters(), lr=0.003)
elif iter == 1: optim = Cain(m.parameters(), lr=0.03)
elif iter == 2: optim = NeutralGradient(m.parameters(), lr=0.03, max_size=10)
elif iter == 3: optim = NeutralGradient(m.parameters(), lr=0.05, max_size=1000)
scheduler = Eden(optim, lr_batches=200, lr_epochs=10, verbose=False)
start = timeit.default_timer()
for epoch in range(150):
scheduler.step_epoch()
for n, (x,y) in enumerate(train_pairs):
y_out = m(x)
loss = ((y_out - y)**2).mean() * 100.0
if n == 0 and epoch % 10 == 0:
norm1 = '%.2e' % (m[0].weight**2).mean().sqrt().item()
norm1b = '%.2e' % (m[0].bias**2).mean().sqrt().item()
norm2 = '%.2e' % (m[2].weight**2).mean().sqrt().item()
norm2b = '%.2e' % (m[2].bias**2).mean().sqrt().item()
#scale1 = '%.2e' % (m[0].weight_scale.exp().item())
#scale1b = '%.2e' % (m[0].bias_scale.exp().item())
#scale2 = '%.2e' % (m[2].weight_scale.exp().item())
#scale2b = '%.2e' % (m[2].bias_scale.exp().item())
print(f"Iter {iter}, epoch {epoch}, batch {n}, loss {loss.item()}, norms={norm1,norm1b,norm2,norm2b}") # scales={scale1,scale1b,scale2,scale2b}
loss.log().backward()
optim.step()
optim.zero_grad()
scheduler.step_batch()
stop = timeit.default_timer()
print(f"Iter={iter}, Time taken: {stop - start}")
print("last lr = ", scheduler.get_last_lr())
#print("state dict = ", scheduler.state_dict())
#print("optim state_dict = ", optim.state_dict())
print("input_magnitudes = ", input_magnitudes)
print("output_magnitudes = ", output_magnitudes)
def stddev(x):
return ((x-x.mean())**2).mean().sqrt()
print("Un-normalized input col magnitudes log-stddev: ", stddev((m[0].weight**2).sum(dim=0).sqrt().log()))
print("Normalized input col magnitudes log-stddev: ", stddev(((m[0].weight**2).sum(dim=0).sqrt() * input_magnitudes).log()))
print("Un-normalized 0-output row magnitudes log-stddev: ", stddev((m[0].weight**2).sum(dim=1).sqrt().log()))
print("Un-normalized 2-input col magnitudes log-stddev: ", stddev((m[2].weight**2).sum(dim=0).sqrt().log()))
print("Un-normalized 2-output row magnitudes log-stddev: ", stddev((m[2].weight**2).sum(dim=1).sqrt().log()))
print("Normalized output row magnitudes log-stddev: ", stddev(((m[2].weight**2).sum(dim=1).sqrt() / output_magnitudes).log()))
if __name__ == "__main__":
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
_test_eve_cain()
#_test_eden()