mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-26 18:24:18 +00:00
fix cast in zipformer_lora
This commit is contained in:
parent
d1dfde2c9d
commit
c14fd3872d
@ -1137,7 +1137,7 @@ class WhiteningPenaltyFunction(torch.autograd.Function):
|
|||||||
w.prob = w.max_prob
|
w.prob = w.max_prob
|
||||||
metric.backward()
|
metric.backward()
|
||||||
penalty_grad = x_detached.grad
|
penalty_grad = x_detached.grad
|
||||||
scale = w.grad_scale * (
|
scale = float(w.grad_scale) * (
|
||||||
x_grad.to(torch.float32).norm()
|
x_grad.to(torch.float32).norm()
|
||||||
/ (penalty_grad.norm() + 1.0e-20)
|
/ (penalty_grad.norm() + 1.0e-20)
|
||||||
)
|
)
|
||||||
@ -1179,7 +1179,7 @@ class Whiten(nn.Module):
|
|||||||
super(Whiten, self).__init__()
|
super(Whiten, self).__init__()
|
||||||
assert num_groups >= 1
|
assert num_groups >= 1
|
||||||
assert float(whitening_limit) >= 1
|
assert float(whitening_limit) >= 1
|
||||||
assert grad_scale >= 0
|
assert float(grad_scale) >= 0
|
||||||
self.num_groups = num_groups
|
self.num_groups = num_groups
|
||||||
self.whitening_limit = whitening_limit
|
self.whitening_limit = whitening_limit
|
||||||
self.grad_scale = grad_scale
|
self.grad_scale = grad_scale
|
||||||
|
Loading…
x
Reference in New Issue
Block a user