cast grad_scale in whiten to float

This commit is contained in:
Teo 2024-06-23 00:20:11 +09:00
parent 3059eb4511
commit d1dfde2c9d

View File

@ -1032,7 +1032,7 @@ class WhiteningPenaltyFunction(torch.autograd.Function):
w.prob = w.max_prob w.prob = w.max_prob
metric.backward() metric.backward()
penalty_grad = x_detached.grad penalty_grad = x_detached.grad
scale = w.grad_scale * ( scale = float(w.grad_scale) * (
x_grad.to(torch.float32).norm() x_grad.to(torch.float32).norm()
/ (penalty_grad.norm() + 1.0e-20) / (penalty_grad.norm() + 1.0e-20)
) )
@ -1074,7 +1074,7 @@ class Whiten(nn.Module):
super(Whiten, self).__init__() super(Whiten, self).__init__()
assert num_groups >= 1 assert num_groups >= 1
assert float(whitening_limit) >= 1 assert float(whitening_limit) >= 1
assert grad_scale >= 0 assert float(grad_scale) >= 0
self.num_groups = num_groups self.num_groups = num_groups
self.whitening_limit = whitening_limit self.whitening_limit = whitening_limit
self.grad_scale = grad_scale self.grad_scale = grad_scale