diff --git a/egs/librispeech/ASR/conformer_ctc2/.conformer.py.swp b/egs/librispeech/ASR/conformer_ctc2/.conformer.py.swp index 19088f777..f0eecb305 100644 Binary files a/egs/librispeech/ASR/conformer_ctc2/.conformer.py.swp and b/egs/librispeech/ASR/conformer_ctc2/.conformer.py.swp differ diff --git a/egs/librispeech/ASR/conformer_ctc2/.decode.py.swp b/egs/librispeech/ASR/conformer_ctc2/.decode.py.swp index 7b734f910..67d6febf7 100644 Binary files a/egs/librispeech/ASR/conformer_ctc2/.decode.py.swp and b/egs/librispeech/ASR/conformer_ctc2/.decode.py.swp differ diff --git a/egs/librispeech/ASR/conformer_ctc2/decode.py b/egs/librispeech/ASR/conformer_ctc2/decode.py index 48836920b..0ab55ef43 100755 --- a/egs/librispeech/ASR/conformer_ctc2/decode.py +++ b/egs/librispeech/ASR/conformer_ctc2/decode.py @@ -319,6 +319,8 @@ def decode_one_batch( supervisions = batch["supervisions"] nnet_output, memory, memory_key_padding_mask = model(feature, supervisions) + print(type(nnet_output)) + exit() # nnet_output is (N, T, C) supervision_segments = torch.stack(