From 36a2f33a6ffe4866ba632f741a9837196ce671de Mon Sep 17 00:00:00 2001 From: Daniel Povey Date: Mon, 28 Nov 2022 21:55:06 +0800 Subject: [PATCH] Have value dim in NonlinAttentionModule be half of num_channels --- .../ASR/pruned_transducer_stateless7/zipformer.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless7/zipformer.py b/egs/librispeech/ASR/pruned_transducer_stateless7/zipformer.py index f3e306225..66d5db892 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless7/zipformer.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless7/zipformer.py @@ -1464,15 +1464,15 @@ class NonlinAttentionModule(nn.Module): super().__init__() self.ratio = ratio - assert channels % ratio == 0 - self.in_proj = nn.Linear(channels, channels + channels // ratio, bias=True) + assert channels % (ratio * 2) == 0 + self.in_proj = nn.Linear(channels, (channels + channels // ratio) // 2, bias=True) # balancer that goes before the sigmoid. Have quite a large min_abs value, at 2.0, # because we noticed that well-trained instances of this module have abs-value before the sigmoid # starting from about 3, and poorly-trained instances of the module have smaller abs values # before the sigmoid. self.balancer = ActivationBalancer( - channels // ratio, channel_dim=-1, + channels // (2 * ratio), channel_dim=-1, min_positive=ScheduledFloat((0.0, 0.1), (8000.0, 0.05)), max_positive=1.0, min_abs=2.0, @@ -1481,7 +1481,7 @@ class NonlinAttentionModule(nn.Module): self.sigmoid = nn.Sigmoid() self.activation = Identity() # for diagnostics. - self.out_proj = ScaledLinear(channels, channels, + self.out_proj = ScaledLinear(channels // 2, channels, bias=True, initial_scale=0.05) @@ -1508,13 +1508,13 @@ attn_weights: a Tensor of shape (num_heads, batch_size, seq_len, seq_len) x = self.in_proj(x) - s = x[..., num_channels:] - x = x[..., :num_channels] + s = x[..., num_channels // 2:] + x = x[..., :num_channels // 2] s = self.balancer(s) s = self.sigmoid(s) - s = s.unsqueeze(-1).expand(-1, -1, -1, self.ratio).reshape(seq_len, batch_size, num_channels) + s = s.unsqueeze(-1).expand(-1, -1, -1, self.ratio).reshape(seq_len, batch_size, num_channels // 2) x = x * s