modify doc

This commit is contained in:
yaozengwei 2022-06-11 22:22:46 +08:00
parent 8c03d6079a
commit 86b9210d7b
2 changed files with 48 additions and 39 deletions

View File

@ -19,41 +19,53 @@
""" """
Usage: Usage:
(1) greedy search (1) greedy search
./pruned_transducer_stateless4/decode.py \ ./conv_emformer_transducer_stateless/decode.py \
--epoch 30 \ --epoch 30 \
--avg 15 \ --avg 10 \
--exp-dir ./pruned_transducer_stateless2/exp \ --exp-dir conv_emformer_transducer_stateless/exp \
--max-duration 100 \ --max-duration 300 \
--decoding-method greedy_search --num-encoder-layers 12 \
--chunk-length 32 \
--cnn-module-kernel 31 \
--left-context-length 32 \
--right-context-length 8 \
--memory-size 32 \
--decoding-method greedy_search \
--use-averaged-model True
(2) beam search (2) modified beam search
./pruned_transducer_stateless4/decode.py \ ./conv_emformer_transducer_stateless/decode.py \
--epoch 30 \ --epoch 30 \
--avg 15 \ --avg 10 \
--exp-dir ./pruned_transducer_stateless2/exp \ --exp-dir conv_emformer_transducer_stateless/exp \
--max-duration 100 \ --max-duration 300 \
--decoding-method beam_search \ --num-encoder-layers 12 \
--beam-size 4 --chunk-length 32 \
--cnn-module-kernel 31 \
--left-context-length 32 \
--right-context-length 8 \
--memory-size 32 \
--decoding-method modified_beam_search \
--use-averaged-model True \
--beam-size 4
(3) modified beam search (3) fast beam search
./pruned_transducer_stateless4/decode.py \ ./conv_emformer_transducer_stateless/decode.py \
--epoch 30 \ --epoch 30 \
--avg 15 \ --avg 10 \
--exp-dir ./pruned_transducer_stateless2/exp \ --exp-dir conv_emformer_transducer_stateless/exp \
--max-duration 100 \ --max-duration 300 \
--decoding-method modified_beam_search \ --num-encoder-layers 12 \
--beam-size 4 --chunk-length 32 \
--cnn-module-kernel 31 \
(4) fast beam search --left-context-length 32 \
./pruned_transducer_stateless4/decode.py \ --right-context-length 8 \
--epoch 30 \ --memory-size 32 \
--avg 15 \ --decoding-method fast_beam_search \
--exp-dir ./pruned_transducer_stateless2/exp \ --use-averaged-model True \
--max-duration 1500 \ --beam 4 \
--decoding-method fast_beam_search \ --max-contexts 4 \
--beam 4 \ --max-states 8
--max-contexts 4 \
--max-states 8
""" """
@ -122,7 +134,7 @@ def get_parser():
parser.add_argument( parser.add_argument(
"--avg", "--avg",
type=int, type=int,
default=15, default=10,
help="Number of checkpoints to average. Automatically select " help="Number of checkpoints to average. Automatically select "
"consecutive checkpoints before the checkpoint specified by " "consecutive checkpoints before the checkpoint specified by "
"'--epoch' and '--iter'", "'--epoch' and '--iter'",
@ -131,7 +143,7 @@ def get_parser():
parser.add_argument( parser.add_argument(
"--use-averaged-model", "--use-averaged-model",
type=str2bool, type=str2bool,
default=False, default=True,
help="Whether to load averaged model. Currently it only supports " help="Whether to load averaged model. Currently it only supports "
"using --epoch. If True, it would decode with the averaged model " "using --epoch. If True, it would decode with the averaged model "
"over the epoch range from `epoch-avg` (excluded) to `epoch`." "over the epoch range from `epoch-avg` (excluded) to `epoch`."
@ -159,7 +171,6 @@ def get_parser():
default="greedy_search", default="greedy_search",
help="""Possible values are: help="""Possible values are:
- greedy_search - greedy_search
- beam_search
- modified_beam_search - modified_beam_search
- fast_beam_search - fast_beam_search
""", """,
@ -207,6 +218,7 @@ def get_parser():
help="The context size in the decoder. 1 means bigram; " help="The context size in the decoder. 1 means bigram; "
"2 means tri-gram", "2 means tri-gram",
) )
parser.add_argument( parser.add_argument(
"--max-sym-per-frame", "--max-sym-per-frame",
type=int, type=int,

View File

@ -451,9 +451,6 @@ def fast_beam_search_one_best(
decoding_streams.terminate_and_flush_to_streams() decoding_streams.terminate_and_flush_to_streams()
# import pdb
# pdb.set_trace()
lattice = decoding_streams.format_output(processed_lens.tolist()) lattice = decoding_streams.format_output(processed_lens.tolist())
best_path = one_best_decoding(lattice) best_path = one_best_decoding(lattice)