Increase probs of activation balancer and make it decay slower.

This commit is contained in:
Daniel Povey 2022-10-22 22:18:38 +08:00
parent 2964628ae1
commit e0c1dc66da
2 changed files with 3 additions and 2 deletions

View File

@ -1260,7 +1260,8 @@ class FeedforwardModule(nn.Module):
super(FeedforwardModule, self).__init__()
self.in_proj = nn.Linear(d_model, feedforward_dim)
self.balancer = ActivationBalancer(feedforward_dim,
channel_dim=-1, max_abs=10.0)
channel_dim=-1, max_abs=10.0,
min_prob=0.25)
self.activation = DoubleSwish()
self.dropout = nn.Dropout(dropout)
self.out_proj = ScaledLinear(feedforward_dim, d_model,

View File

@ -520,7 +520,7 @@ class ActivationBalancer(torch.nn.Module):
# the prob of doing some work exponentially decreases from 0.5 till it hits
# a floor at min_prob (==0.1, by default)
prob = max(self.min_prob, 0.5 ** (1 + (count/2000.0)))
prob = max(self.min_prob, 0.5 ** (1 + (count/4000.0)))
if random.random() < prob:
sign_gain_factor = 0.5