Move balancer of NonlinAttentionModule to output; add an extra whitener at output.

This commit is contained in:
Daniel Povey 2022-12-07 17:49:53 +08:00
parent 8f841e5b2b
commit 8859177bfa

View File

@ -427,8 +427,7 @@ class ZipformerEncoderLayer(nn.Module):
dropout)
self.nonlin_attention_module = NonlinAttentionModule(embed_dim,
hidden_channels=embed_dim // 4,
ratio=1)
hidden_channels=embed_dim // 4)
self.conv_module = ConvolutionModule(embed_dim,
@ -1463,22 +1462,19 @@ class NonlinAttentionModule(nn.Module):
self,
channels: int,
hidden_channels: int,
ratio: int = 1,
) -> None:
super().__init__()
self.ratio = ratio
self.hidden_channels = hidden_channels
assert channels % (ratio * 2) == 0
self.in_proj = nn.Linear(channels, hidden_channels + hidden_channels // ratio, bias=True)
self.in_proj = nn.Linear(channels, hidden_channels * 2, bias=True)
# balancer that goes before the sigmoid. Have quite a large min_abs value, at 2.0,
# because we noticed that well-trained instances of this module have abs-value before the sigmoid
# starting from about 3, and poorly-trained instances of the module have smaller abs values
# before the sigmoid.
self.balancer1 = ActivationBalancer(
hidden_channels // ratio, channel_dim=-1,
hidden_channels, channel_dim=-1,
min_positive=ScheduledFloat((0.0, 0.1), (8000.0, 0.05)),
max_positive=1.0,
min_abs=0.75,
@ -1494,14 +1490,10 @@ class NonlinAttentionModule(nn.Module):
# Have very tight limits on min_positive and max_positive so that it beomes
# close to zero mean, as we found that large mean offsets after the
# multiplication are associated with poor convergence.
# We don't need min_abs and max_abs limits because sharing the in_proj
# between the sigmoid-input and activations dictates the scale of the
# activations at this point. The code applies those anyway, it's not optional
# right now, so just use the default values.
self.balancer2 = ActivationBalancer(
hidden_channels // ratio, channel_dim=-1,
min_positive=0.4, max_positive=0.6,
min_abs=0.5,
channels, channel_dim=-1,
min_positive=0.45, max_positive=0.55,
min_abs=0.01,
)
self.whiten = Whiten(num_groups=1,
@ -1534,8 +1526,7 @@ attn_weights: a Tensor of shape (num_heads, batch_size, seq_len, seq_len)
s = self.balancer1(s)
s = self.tanh(s)
s = s.unsqueeze(-1).expand(-1, -1, -1, self.ratio).reshape(seq_len, batch_size,
hidden_channels)
s = s.unsqueeze(-1).reshape(seq_len, batch_size, hidden_channels)
x = self.activation(x) # diagnostics only, it's the identity.
x = x * s
@ -1549,9 +1540,10 @@ attn_weights: a Tensor of shape (num_heads, batch_size, seq_len, seq_len)
# now x: (num_heads, batch_size, seq_len, head_dim)
x = x.permute(2, 1, 0, 3).reshape(seq_len, batch_size, -1)
x = self.out_proj(x)
x = self.balancer2(x)
x = self.whiten(x)
x = self.out_proj(x)
return x