mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-08 17:42:21 +00:00
Change max_factor in DerivBalancer from 0.025 to 0.01; fix scaling code.
This commit is contained in:
parent
2940d3106f
commit
bcf417fce2
@ -48,13 +48,13 @@ class Conv2dSubsampling(nn.Module):
|
|||||||
in_channels=1, out_channels=odim, kernel_size=3, stride=2
|
in_channels=1, out_channels=odim, kernel_size=3, stride=2
|
||||||
),
|
),
|
||||||
DerivBalancer(channel_dim=1, threshold=0.05,
|
DerivBalancer(channel_dim=1, threshold=0.05,
|
||||||
max_factor=0.025),
|
max_factor=0.01),
|
||||||
ExpScaleRelu(odim, 1, 1, speed=20.0),
|
ExpScaleRelu(odim, 1, 1, speed=20.0),
|
||||||
nn.Conv2d(
|
nn.Conv2d(
|
||||||
in_channels=odim, out_channels=odim, kernel_size=3, stride=2
|
in_channels=odim, out_channels=odim, kernel_size=3, stride=2
|
||||||
),
|
),
|
||||||
DerivBalancer(channel_dim=1, threshold=0.05,
|
DerivBalancer(channel_dim=1, threshold=0.05,
|
||||||
max_factor=0.025),
|
max_factor=0.01),
|
||||||
ExpScaleRelu(odim, 1, 1, speed=20.0),
|
ExpScaleRelu(odim, 1, 1, speed=20.0),
|
||||||
)
|
)
|
||||||
self.out = nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim)
|
self.out = nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim)
|
||||||
|
@ -159,7 +159,7 @@ class ConformerEncoderLayer(nn.Module):
|
|||||||
self.feed_forward = nn.Sequential(
|
self.feed_forward = nn.Sequential(
|
||||||
nn.Linear(d_model, dim_feedforward),
|
nn.Linear(d_model, dim_feedforward),
|
||||||
DerivBalancer(channel_dim=-1, threshold=0.05,
|
DerivBalancer(channel_dim=-1, threshold=0.05,
|
||||||
max_factor=0.025),
|
max_factor=0.01),
|
||||||
SwishExpScale(dim_feedforward, speed=20.0),
|
SwishExpScale(dim_feedforward, speed=20.0),
|
||||||
nn.Dropout(dropout),
|
nn.Dropout(dropout),
|
||||||
nn.Linear(dim_feedforward, d_model),
|
nn.Linear(dim_feedforward, d_model),
|
||||||
@ -168,7 +168,7 @@ class ConformerEncoderLayer(nn.Module):
|
|||||||
self.feed_forward_macaron = nn.Sequential(
|
self.feed_forward_macaron = nn.Sequential(
|
||||||
nn.Linear(d_model, dim_feedforward),
|
nn.Linear(d_model, dim_feedforward),
|
||||||
DerivBalancer(channel_dim=-1, threshold=0.05,
|
DerivBalancer(channel_dim=-1, threshold=0.05,
|
||||||
max_factor=0.025),
|
max_factor=0.01),
|
||||||
SwishExpScale(dim_feedforward, speed=20.0),
|
SwishExpScale(dim_feedforward, speed=20.0),
|
||||||
nn.Dropout(dropout),
|
nn.Dropout(dropout),
|
||||||
nn.Linear(dim_feedforward, d_model),
|
nn.Linear(dim_feedforward, d_model),
|
||||||
@ -720,7 +720,7 @@ class RelPositionMultiheadAttention(nn.Module):
|
|||||||
)
|
)
|
||||||
key_padding_mask = key_padding_mask.to(torch.bool)
|
key_padding_mask = key_padding_mask.to(torch.bool)
|
||||||
|
|
||||||
q = (q.contiguous() * scaling).view(tgt_len, bsz, num_heads, head_dim)
|
q = (q * scaling).contiguous().view(tgt_len, bsz, num_heads, head_dim)
|
||||||
k = k.contiguous().view(-1, bsz, num_heads, head_dim)
|
k = k.contiguous().view(-1, bsz, num_heads, head_dim)
|
||||||
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
|
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user