From 9cf447646d6a0b2739ef2c81e1aa75c2af74c77a Mon Sep 17 00:00:00 2001 From: yaozengwei Date: Tue, 26 Jul 2022 22:31:06 +0800 Subject: [PATCH] modify the doc of RelPositionMultiheadAttention --- egs/librispeech/ASR/pruned_transducer_stateless7/conformer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless7/conformer.py b/egs/librispeech/ASR/pruned_transducer_stateless7/conformer.py index 1401b623a..3ae7e8d61 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless7/conformer.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless7/conformer.py @@ -405,7 +405,7 @@ class RelPositionalEncoding(torch.nn.Module): class RelPositionMultiheadAttention(nn.Module): - r"""Multi-Head Attention layer with relative position encoding + r"""Multi-Head Attention layer with simplified relative position encoding See reference: "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"