fix conv_emformer2, when using right_context_length=0 (#1076)

This commit is contained in:
Zengwei Yao 2023-05-21 20:31:54 +08:00 committed by GitHub
parent 30fcd16c7d
commit 8070258ec5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -1358,12 +1358,7 @@ class EmformerEncoder(nn.Module):
output_lengths = torch.clamp(lengths - self.right_context_length, min=0)
attention_mask = self._gen_attention_mask(utterance)
M = (
right_context.size(0) // self.right_context_length - 1
if self.use_memory
else 0
)
padding_mask = make_pad_mask(M + right_context.size(0) + output_lengths)
padding_mask = make_pad_mask(attention_mask.shape[1] - U + output_lengths)
output = utterance
for layer in self.emformer_layers: