diff --git a/egs/aishell/ASR/conformer_ctc/.transformer.py.swp b/egs/aishell/ASR/conformer_ctc/.transformer.py.swp index 34ffe002d..a0a71f3c9 100644 Binary files a/egs/aishell/ASR/conformer_ctc/.transformer.py.swp and b/egs/aishell/ASR/conformer_ctc/.transformer.py.swp differ diff --git a/egs/aishell/ASR/conformer_ctc/transformer.py b/egs/aishell/ASR/conformer_ctc/transformer.py index 982abc6c0..90aca750d 100644 --- a/egs/aishell/ASR/conformer_ctc/transformer.py +++ b/egs/aishell/ASR/conformer_ctc/transformer.py @@ -113,6 +113,7 @@ class Transformer(nn.Module): # num_layers=num_encoder_layers, # norm=encoder_norm, #) + print(encoder_norm) self.encoder = TransfEncoder( encoder_layer=encoder_layer, num_layers=num_encoder_layers, @@ -491,10 +492,10 @@ class TransfEncoder(nn.TransformerEncoder): output = torch._nested_tensor_from_mask(output, src_key_padding_mask.logical_not(), mask_check=False) src_key_padding_mask_for_layers = None - outputs = [] + layer_outputs = [] for mod in self.layers: output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask_for_layers) - outputs.append(output) + layer_outputs.append(output) if convert_to_nested: output = output.to_padded_tensor(0.)