From 17ad6c2959d3ba65a54be1adfb4562d29cbe9e9c Mon Sep 17 00:00:00 2001 From: zr_jin <60612200+JinZr@users.noreply.github.com> Date: Sun, 23 Jul 2023 20:24:58 +0800 Subject: [PATCH] Update alignment_attention_module.py --- .../zipformer_label_level_algn/alignment_attention_module.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/egs/librispeech/ASR/zipformer_label_level_algn/alignment_attention_module.py b/egs/librispeech/ASR/zipformer_label_level_algn/alignment_attention_module.py index e287c363c..51a09db5a 100644 --- a/egs/librispeech/ASR/zipformer_label_level_algn/alignment_attention_module.py +++ b/egs/librispeech/ASR/zipformer_label_level_algn/alignment_attention_module.py @@ -179,14 +179,11 @@ class RelPositionMultiheadAttentionWeights(nn.Module): if use_pos_scores: pos_emb = self.linear_pos(pos_emb) - print("pos_emb before proj", pos_emb.shape) seq_len2 = 2 * seq_len - 1 pos_emb = pos_emb.reshape(-1, seq_len2, num_heads, pos_head_dim).permute( 2, 0, 3, 1 ) # pos shape now: (head, {1 or batch_size}, pos_dim, seq_len2) - print("p", p.shape) - print("pos_emb after proj", pos_emb.shape) # (head, batch, time1, pos_dim) x (head, 1, pos_dim, seq_len2) -> (head, batch, time1, seq_len2) # [where seq_len2 represents relative position.]