From e5666628bd655901402517c7cd163d4b258bc60e Mon Sep 17 00:00:00 2001 From: Daniel Povey Date: Wed, 28 Sep 2022 20:58:34 +0800 Subject: [PATCH] Bug fix --- egs/librispeech/ASR/pruned_transducer_stateless7/conformer.py | 1 - 1 file changed, 1 deletion(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless7/conformer.py b/egs/librispeech/ASR/pruned_transducer_stateless7/conformer.py index 9facae5ce..bd0e625f0 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless7/conformer.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless7/conformer.py @@ -474,7 +474,6 @@ class AttentionDownsample(torch.nn.Module): Require out_channels > in_channels. """ super(AttentionDownsample, self).__init__() - assert out_channels > in_channels self.query = nn.Parameter(torch.randn(in_channels) * (in_channels ** -0.5)) # fill in the extra dimensions with a projection of the input