Replace norm on input layer with scale of 0.1.

This commit is contained in:
Daniel Povey 2022-03-07 11:24:04 +08:00
parent a37d98463a
commit e2ace9d545
2 changed files with 2 additions and 3 deletions

View File

@ -58,7 +58,6 @@ class Conv2dSubsampling(nn.Module):
ExpScaleRelu(odim, 1, 1, speed=20.0),
)
self.out = nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim)
self.out_norm = nn.LayerNorm(odim, elementwise_affine=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Subsample x.
@ -77,7 +76,7 @@ class Conv2dSubsampling(nn.Module):
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
# Now x is of shape (N, ((T-1)//2 - 1))//2, odim)
x = self.out_norm(x)
x = x * 0.1
return x

View File

@ -110,7 +110,7 @@ def get_parser():
parser.add_argument(
"--exp-dir",
type=str,
default="transducer_stateless/specaugmod_baseline_randcombine1_expscale3_brelu2swish2",
default="transducer_stateless/specaugmod_baseline_randcombine1_expscale3_brelu2swish2_0.1",
help="""The experiment dir.
It specifies the directory where all training related
files, e.g., checkpoints, log, etc, are saved