diff --git a/egs/librispeech/ASR/conformer_ctc2/.decode.py.swp b/egs/librispeech/ASR/conformer_ctc2/.decode.py.swp index 120bac8a0..be51e9266 100644 Binary files a/egs/librispeech/ASR/conformer_ctc2/.decode.py.swp and b/egs/librispeech/ASR/conformer_ctc2/.decode.py.swp differ diff --git a/egs/librispeech/ASR/conformer_ctc2/.train.py.swp b/egs/librispeech/ASR/conformer_ctc2/.train.py.swp index 9e88101b7..bbe7892af 100644 Binary files a/egs/librispeech/ASR/conformer_ctc2/.train.py.swp and b/egs/librispeech/ASR/conformer_ctc2/.train.py.swp differ diff --git a/egs/librispeech/ASR/conformer_ctc2/decode.py b/egs/librispeech/ASR/conformer_ctc2/decode.py index f1a56fb05..72ae1fb10 100755 --- a/egs/librispeech/ASR/conformer_ctc2/decode.py +++ b/egs/librispeech/ASR/conformer_ctc2/decode.py @@ -321,6 +321,8 @@ def decode_one_batch( nnet_output, memory, memory_key_padding_mask = model(feature, supervisions) if type(nnet_output) == tuple: nnet_output = nnet_output[0] + print(type(nnet_output)) + exit() # nnet_output is (N, T, C) supervision_segments = torch.stack(