From f47fe8337aec12d8d7a005855e763b695f37e9d1 Mon Sep 17 00:00:00 2001 From: Daniel Povey Date: Thu, 31 Mar 2022 12:16:08 +0800 Subject: [PATCH] Remove some un-used code --- .../ASR/pruned_transducer_stateless2/conformer.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py b/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py index 528cc48f4..abe30633c 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py @@ -54,7 +54,6 @@ class Conformer(EncoderInterface): num_encoder_layers: int = 12, dropout: float = 0.1, cnn_module_kernel: int = 31, - aux_layer_period: int = 3 ) -> None: super(Conformer, self).__init__() @@ -79,8 +78,7 @@ class Conformer(EncoderInterface): dropout, cnn_module_kernel, ) - self.encoder = ConformerEncoder(encoder_layer, num_encoder_layers, - aux_layers=list(range(0, num_encoder_layers-1, aux_layer_period))) + self.encoder = ConformerEncoder(encoder_layer, num_encoder_layers) if output_dim == d_model: self.encoder_output_layer = nn.Identity() @@ -277,16 +275,13 @@ class ConformerEncoder(nn.Module): >>> out = conformer_encoder(src, pos_emb) """ - def __init__(self, encoder_layer: nn.Module, num_layers: int, - aux_layers: Sequence[int]) -> None: + def __init__(self, encoder_layer: nn.Module, num_layers: int) -> None: super().__init__() self.layers = nn.ModuleList( [copy.deepcopy(encoder_layer) for i in range(num_layers)] ) - self.aux_layers = set(aux_layers + [num_layers - 1]) - assert num_layers - 1 not in aux_layers self.num_layers = num_layers - num_channels = encoder_layer.d_model + def forward( self,