diff --git a/egs/librispeech/ASR/pruned_transducer_stateless9/beam_search.py b/egs/librispeech/ASR/pruned_transducer_stateless9/beam_search.py index 2bc4c1788..5b1352b63 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless9/beam_search.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless9/beam_search.py @@ -609,7 +609,7 @@ def greedy_search( else: sym_per_frame = 0 t += 1 - hyp = hyp[context_size :] # remove blanks + hyp = hyp[context_size:] # remove blanks if not return_timestamps: return hyp diff --git a/egs/librispeech/ASR/pruned_transducer_stateless9/decoder.py b/egs/librispeech/ASR/pruned_transducer_stateless9/decoder.py index aead79de5..89b21b0f9 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless9/decoder.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless9/decoder.py @@ -126,7 +126,7 @@ class Decoder(nn.Module): embedding_out = self.embedding(y.clamp(min=0)) * (y >= 0).unsqueeze(-1) if self.context_size > 1: embedding_out = embedding_out.permute(0, 2, 1) - if need_pad is True: + if need_pad is True: embedding_out = F.pad(embedding_out, pad=(self.context_size - 1, 0)) else: # During inference time, there is no need to do extra padding diff --git a/egs/librispeech/ASR/streaming_conformer_ctc/train.py b/egs/librispeech/ASR/streaming_conformer_ctc/train.py index d265de45b..74565dac3 100755 --- a/egs/librispeech/ASR/streaming_conformer_ctc/train.py +++ b/egs/librispeech/ASR/streaming_conformer_ctc/train.py @@ -52,6 +52,7 @@ from icefall.utils import ( ) from lhotse.cut import Cut + def get_parser(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter