mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-09 01:52:41 +00:00
Fix q*scaling logic
This commit is contained in:
parent
137eae0b95
commit
2940d3106f
@ -616,7 +616,6 @@ class RelPositionMultiheadAttention(nn.Module):
|
|||||||
), "embed_dim must be divisible by num_heads"
|
), "embed_dim must be divisible by num_heads"
|
||||||
|
|
||||||
scaling = float(head_dim) ** -0.5
|
scaling = float(head_dim) ** -0.5
|
||||||
q = q * scaling
|
|
||||||
|
|
||||||
if torch.equal(query, key) and torch.equal(key, value):
|
if torch.equal(query, key) and torch.equal(key, value):
|
||||||
# self-attention
|
# self-attention
|
||||||
@ -721,7 +720,7 @@ class RelPositionMultiheadAttention(nn.Module):
|
|||||||
)
|
)
|
||||||
key_padding_mask = key_padding_mask.to(torch.bool)
|
key_padding_mask = key_padding_mask.to(torch.bool)
|
||||||
|
|
||||||
q = q.contiguous().view(tgt_len, bsz, num_heads, head_dim)
|
q = (q.contiguous() * scaling).view(tgt_len, bsz, num_heads, head_dim)
|
||||||
k = k.contiguous().view(-1, bsz, num_heads, head_dim)
|
k = k.contiguous().view(-1, bsz, num_heads, head_dim)
|
||||||
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
|
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user