modify the doc of RelPositionMultiheadAttention

This commit is contained in:
yaozengwei 2022-07-26 22:31:06 +08:00
parent 6fa0ef1e8d
commit 9cf447646d

View File

@ -405,7 +405,7 @@ class RelPositionalEncoding(torch.nn.Module):
class RelPositionMultiheadAttention(nn.Module):
r"""Multi-Head Attention layer with relative position encoding
r"""Multi-Head Attention layer with simplified relative position encoding
See reference: "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"