Simplify formula for Swoosh and make it pass through 0; make max_abs of ConvolutionModule a constant.

This commit is contained in:
Daniel Povey 2022-12-03 00:13:09 +08:00
parent 862e5828c5
commit bd1b1dd7e3
2 changed files with 5 additions and 5 deletions

View File

@ -1216,7 +1216,7 @@ class TanSwish(torch.nn.Module):
class SwooshFunction(torch.autograd.Function):
"""
swoosh(x) = log(1 + exp(x-4)) - 0.08*x - 0.15
swoosh(x) = log(1 + exp(x-1)) - 0.08*x - 0.313261687
derivatives are between -0.08 and 0.92.
"""
@ -1235,7 +1235,7 @@ class SwooshFunction(torch.autograd.Function):
with torch.enable_grad():
x = x.detach()
x.requires_grad = True
y = torch.logaddexp(zero, x - 1.125) - 0.08 * x - 0.3
y = torch.logaddexp(zero, x - 1.) - 0.08 * x - 0.313261687
if not requires_grad:
return y
@ -1273,7 +1273,7 @@ class Swoosh(torch.nn.Module):
"""
if torch.jit.is_scripting():
zero = torch.tensor(0.0, dtype=x.dtype, device=x.device)
return torch.logaddexp(zero, x - 1.125) - 0.08 * x - 0.3
return torch.logaddexp(zero, x - 1.) - 0.08 * x - 0.313261687
return SwooshFunction.apply(x)

View File

@ -1551,7 +1551,7 @@ class ConvolutionModule(nn.Module):
def __init__(
self, channels: int, kernel_size: int,
) -> None:
"""Construct an ConvolutionModule object."""
"""Construct a ConvolutionModule object."""
super(ConvolutionModule, self).__init__()
# kernerl_size should be a odd number for 'SAME' padding
assert (kernel_size - 1) % 2 == 0
@ -1601,7 +1601,7 @@ class ConvolutionModule(nn.Module):
min_positive=ScheduledFloat((0.0, 0.1), (8000.0, 0.05)),
max_positive=1.0,
min_abs=0.4,
max_abs=ScheduledFloat((0.0, 10.0), (8000.0, 20.0), default=10),
max_abs=10.0,
)
self.activation = Swoosh()