diff --git a/.github/workflows/run-pretrained.yml b/.github/workflows/run-pretrained.yml index 5c80c0548..97d3c32d2 100644 --- a/.github/workflows/run-pretrained.yml +++ b/.github/workflows/run-pretrained.yml @@ -85,7 +85,7 @@ jobs: ./conformer_ctc/pretrained.py \ --num-classes 500 \ --checkpoint ./tmp/icefall-asr-conformer-ctc-bpe-500/exp/pretrained.pt \ - --lang-dir ./tmp/icefall-asr-conformer-ctc-bpe-500/data/lang_bpe_500 \ + --bpe-model ./tmp/icefall-asr-conformer-ctc-bpe-500/data/lang_bpe_500/bpe.model \ --method ctc-decoding \ ./tmp/icefall-asr-conformer-ctc-bpe-500/test_wavs/1089-134686-0001.flac \ ./tmp/icefall-asr-conformer-ctc-bpe-500/test_wavs/1221-135766-0001.flac \ @@ -99,7 +99,8 @@ jobs: ./conformer_ctc/pretrained.py \ --num-classes 500 \ --checkpoint ./tmp/icefall-asr-conformer-ctc-bpe-500/exp/pretrained.pt \ - --lang-dir ./tmp/icefall-asr-conformer-ctc-bpe-500/data/lang_bpe_500 \ + --words-file ./tmp/icefall-asr-conformer-ctc-bpe-500/data/lang_bpe_500/words.txt \ + --HLG ./tmp/icefall-asr-conformer-ctc-bpe-500/data/lang_bpe_500/HLG.pt \ ./tmp/icefall-asr-conformer-ctc-bpe-500/test_wavs/1089-134686-0001.flac \ ./tmp/icefall-asr-conformer-ctc-bpe-500/test_wavs/1221-135766-0001.flac \ ./tmp/icefall-asr-conformer-ctc-bpe-500/test_wavs/1221-135766-0002.flac diff --git a/egs/librispeech/ASR/conformer_ctc/pretrained.py b/egs/librispeech/ASR/conformer_ctc/pretrained.py index 20461cf63..99bd9c017 100755 --- a/egs/librispeech/ASR/conformer_ctc/pretrained.py +++ b/egs/librispeech/ASR/conformer_ctc/pretrained.py @@ -319,7 +319,7 @@ def main(): logging.info("Use CTC decoding") bpe_model = spm.SentencePieceProcessor() bpe_model.load(params.bpe_model) - max_token_id = bpe_model.get_piece_size() - 1 + max_token_id = params.num_classes - 1 H = k2.ctc_topo( max_token=max_token_id,