From 6c16d08b4f44057eba71d7a4ed5b29f89059a12d Mon Sep 17 00:00:00 2001 From: Daniel Povey Date: Sat, 12 Nov 2022 14:37:23 +0800 Subject: [PATCH] Add bias in interior of SelfAttn module --- egs/librispeech/ASR/pruned_transducer_stateless7/zipformer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless7/zipformer.py b/egs/librispeech/ASR/pruned_transducer_stateless7/zipformer.py index 405d85139..d6c74a360 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless7/zipformer.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless7/zipformer.py @@ -1282,7 +1282,7 @@ class SelfAttention(nn.Module): super().__init__() self.in_proj = nn.Linear(embed_dim, num_heads * value_head_dim, - bias=False) + bias=True) # attempt to make the output of `in_proj` uncorrelated within each head # and all heads having roughly the same magnitude. the hope is to