diff --git a/egs/aishell/ASR/conformer_ctc/transformer.py b/egs/aishell/ASR/conformer_ctc/transformer.py index a3e50e385..dfd888414 100644 --- a/egs/aishell/ASR/conformer_ctc/transformer.py +++ b/egs/aishell/ASR/conformer_ctc/transformer.py @@ -545,6 +545,7 @@ class TransformerDecoderLayer(nn.Module): memory_mask: Optional[torch.Tensor] = None, tgt_key_padding_mask: Optional[torch.Tensor] = None, memory_key_padding_mask: Optional[torch.Tensor] = None, + **kwargs, ) -> torch.Tensor: """Pass the inputs (and mask) through the decoder layer. diff --git a/egs/aishell/ASR/conformer_mmi/transformer.py b/egs/aishell/ASR/conformer_mmi/transformer.py index a3e50e385..dfd888414 100644 --- a/egs/aishell/ASR/conformer_mmi/transformer.py +++ b/egs/aishell/ASR/conformer_mmi/transformer.py @@ -545,6 +545,7 @@ class TransformerDecoderLayer(nn.Module): memory_mask: Optional[torch.Tensor] = None, tgt_key_padding_mask: Optional[torch.Tensor] = None, memory_key_padding_mask: Optional[torch.Tensor] = None, + **kwargs, ) -> torch.Tensor: """Pass the inputs (and mask) through the decoder layer. diff --git a/egs/gigaspeech/ASR/conformer_ctc/transformer.py b/egs/gigaspeech/ASR/conformer_ctc/transformer.py index 0566cfc81..2d797cc67 100644 --- a/egs/gigaspeech/ASR/conformer_ctc/transformer.py +++ b/egs/gigaspeech/ASR/conformer_ctc/transformer.py @@ -549,6 +549,7 @@ class TransformerDecoderLayer(nn.Module): memory_mask: Optional[torch.Tensor] = None, tgt_key_padding_mask: Optional[torch.Tensor] = None, memory_key_padding_mask: Optional[torch.Tensor] = None, + **kwargs, ) -> torch.Tensor: """Pass the inputs (and mask) through the decoder layer. diff --git a/egs/librispeech/ASR/conformer_ctc/transformer.py b/egs/librispeech/ASR/conformer_ctc/transformer.py index 0566cfc81..2d797cc67 100644 --- a/egs/librispeech/ASR/conformer_ctc/transformer.py +++ b/egs/librispeech/ASR/conformer_ctc/transformer.py @@ -549,6 +549,7 @@ class TransformerDecoderLayer(nn.Module): memory_mask: Optional[torch.Tensor] = None, tgt_key_padding_mask: Optional[torch.Tensor] = None, memory_key_padding_mask: Optional[torch.Tensor] = None, + **kwargs, ) -> torch.Tensor: """Pass the inputs (and mask) through the decoder layer. diff --git a/egs/librispeech/ASR/conformer_ctc2/transformer.py b/egs/librispeech/ASR/conformer_ctc2/transformer.py index d3443dc94..6b62a5993 100644 --- a/egs/librispeech/ASR/conformer_ctc2/transformer.py +++ b/egs/librispeech/ASR/conformer_ctc2/transformer.py @@ -550,6 +550,7 @@ class TransformerDecoderLayer(nn.Module): tgt_key_padding_mask: Optional[torch.Tensor] = None, memory_key_padding_mask: Optional[torch.Tensor] = None, warmup: float = 1.0, + **kwargs, ) -> torch.Tensor: """Pass the inputs (and mask) through the decoder layer. diff --git a/egs/librispeech/ASR/conformer_mmi/transformer.py b/egs/librispeech/ASR/conformer_mmi/transformer.py index 2542d9abe..3bc6b88ec 100644 --- a/egs/librispeech/ASR/conformer_mmi/transformer.py +++ b/egs/librispeech/ASR/conformer_mmi/transformer.py @@ -537,6 +537,7 @@ class TransformerDecoderLayer(nn.Module): memory_mask: Optional[torch.Tensor] = None, tgt_key_padding_mask: Optional[torch.Tensor] = None, memory_key_padding_mask: Optional[torch.Tensor] = None, + **kwargs, ) -> torch.Tensor: """Pass the inputs (and mask) through the decoder layer. diff --git a/egs/librispeech/ASR/streaming_conformer_ctc/transformer.py b/egs/librispeech/ASR/streaming_conformer_ctc/transformer.py index 0c87fdf1b..987a45b1f 100644 --- a/egs/librispeech/ASR/streaming_conformer_ctc/transformer.py +++ b/egs/librispeech/ASR/streaming_conformer_ctc/transformer.py @@ -567,6 +567,7 @@ class TransformerDecoderLayer(nn.Module): memory_mask: Optional[torch.Tensor] = None, tgt_key_padding_mask: Optional[torch.Tensor] = None, memory_key_padding_mask: Optional[torch.Tensor] = None, + **kwargs, ) -> torch.Tensor: """Pass the inputs (and mask) through the decoder layer. diff --git a/egs/tedlium3/ASR/conformer_ctc2/transformer.py b/egs/tedlium3/ASR/conformer_ctc2/transformer.py index 9dbf32e48..804c92957 100644 --- a/egs/tedlium3/ASR/conformer_ctc2/transformer.py +++ b/egs/tedlium3/ASR/conformer_ctc2/transformer.py @@ -612,6 +612,7 @@ class TransformerDecoderLayer(nn.Module): tgt_key_padding_mask: Optional[torch.Tensor] = None, memory_key_padding_mask: Optional[torch.Tensor] = None, warmup: float = 1.0, + **kwargs, ) -> torch.Tensor: """Pass the inputs (and mask) through the decoder layer.