Merge branch 'scaled_adam_exp569' into scaled_adam_exp585

This commit is contained in:
Daniel Povey 2022-12-01 19:14:26 +08:00
commit d8185201e9

View File

@ -1471,10 +1471,10 @@ class NonlinAttentionModule(nn.Module):
hidden_channels // ratio, channel_dim=-1, hidden_channels // ratio, channel_dim=-1,
min_positive=ScheduledFloat((0.0, 0.1), (8000.0, 0.05)), min_positive=ScheduledFloat((0.0, 0.1), (8000.0, 0.05)),
max_positive=1.0, max_positive=1.0,
min_abs=1.5, min_abs=0.75,
max_abs=ScheduledFloat((0.0, 5.0), (8000.0, 10.0), default=1.0), max_abs=ScheduledFloat((0.0, 2.5), (8000.0, 5.0), default=1.0),
) )
self.sigmoid = nn.Sigmoid() self.tanh = nn.Tanh()
self.activation = Identity() # for diagnostics. self.activation = Identity() # for diagnostics.
self.out_proj = ScaledLinear(hidden_channels, channels, self.out_proj = ScaledLinear(hidden_channels, channels,
@ -1509,7 +1509,7 @@ attn_weights: a Tensor of shape (num_heads, batch_size, seq_len, seq_len)
x = x[..., :hidden_channels] x = x[..., :hidden_channels]
s = self.balancer(s) s = self.balancer(s)
s = self.sigmoid(s) s = self.tanh(s)
s = s.unsqueeze(-1).expand(-1, -1, -1, self.ratio).reshape(seq_len, batch_size, s = s.unsqueeze(-1).expand(-1, -1, -1, self.ratio).reshape(seq_len, batch_size,
hidden_channels) hidden_channels)