Fix q*scaling logic

This commit is contained in:
Daniel Povey 2022-03-11 14:43:57 +08:00
parent 137eae0b95
commit 2940d3106f

View File

@ -616,7 +616,6 @@ class RelPositionMultiheadAttention(nn.Module):
), "embed_dim must be divisible by num_heads" ), "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5 scaling = float(head_dim) ** -0.5
q = q * scaling
if torch.equal(query, key) and torch.equal(key, value): if torch.equal(query, key) and torch.equal(key, value):
# self-attention # self-attention
@ -721,7 +720,7 @@ class RelPositionMultiheadAttention(nn.Module):
) )
key_padding_mask = key_padding_mask.to(torch.bool) key_padding_mask = key_padding_mask.to(torch.bool)
q = q.contiguous().view(tgt_len, bsz, num_heads, head_dim) q = (q.contiguous() * scaling).view(tgt_len, bsz, num_heads, head_dim)
k = k.contiguous().view(-1, bsz, num_heads, head_dim) k = k.contiguous().view(-1, bsz, num_heads, head_dim)
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)