From 878026864cfa004002f90f22346e17ec2bffce1d Mon Sep 17 00:00:00 2001 From: Quandwang Date: Thu, 21 Jul 2022 20:12:19 +0800 Subject: [PATCH] check whether q k v weight is None --- egs/librispeech/ASR/conformer_ctc2/attention.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/egs/librispeech/ASR/conformer_ctc2/attention.py b/egs/librispeech/ASR/conformer_ctc2/attention.py index d37d80225..b5929ecf6 100644 --- a/egs/librispeech/ASR/conformer_ctc2/attention.py +++ b/egs/librispeech/ASR/conformer_ctc2/attention.py @@ -183,13 +183,13 @@ class MultiheadAttention(nn.Module): if not self._qkv_same_embed_dim: q_proj_weight = ( - self.q_proj_weight.get_weight() if self.q_proj_weight else None + self.q_proj_weight.get_weight() if self.q_proj_weight is not None else None ) k_proj_weight = ( - self.k_proj_weight.get_weight() if self.k_proj_weight else None + self.k_proj_weight.get_weight() if self.k_proj_weight is not None else None ) v_proj_weight = ( - self.v_proj_weight.get_weight() if self.v_proj_weight else None + self.v_proj_weight.get_weight() if self.v_proj_weight is not None else None ) ( attn_output,