Add attention rescoring

This commit is contained in:
pkufool 2024-09-24 10:41:19 +08:00
parent ca7dbb085e
commit 83c36ecc18

View File

@ -21,7 +21,16 @@
""" """
Usage: Usage:
(1) ctc-decoding (1) ctc-greedy-search
./zipformer/ctc_decode.py \
--epoch 30 \
--avg 15 \
--exp-dir ./zipformer/exp \
--use-ctc 1 \
--max-duration 600 \
--decoding-method ctc-greedy-search
(2) ctc-decoding
./zipformer/ctc_decode.py \ ./zipformer/ctc_decode.py \
--epoch 30 \ --epoch 30 \
--avg 15 \ --avg 15 \
@ -30,7 +39,7 @@ Usage:
--max-duration 600 \ --max-duration 600 \
--decoding-method ctc-decoding --decoding-method ctc-decoding
(2) 1best (3) 1best
./zipformer/ctc_decode.py \ ./zipformer/ctc_decode.py \
--epoch 30 \ --epoch 30 \
--avg 15 \ --avg 15 \
@ -40,7 +49,7 @@ Usage:
--hlg-scale 0.6 \ --hlg-scale 0.6 \
--decoding-method 1best --decoding-method 1best
(3) nbest (4) nbest
./zipformer/ctc_decode.py \ ./zipformer/ctc_decode.py \
--epoch 30 \ --epoch 30 \
--avg 15 \ --avg 15 \
@ -50,7 +59,7 @@ Usage:
--hlg-scale 0.6 \ --hlg-scale 0.6 \
--decoding-method nbest --decoding-method nbest
(4) nbest-rescoring (5) nbest-rescoring
./zipformer/ctc_decode.py \ ./zipformer/ctc_decode.py \
--epoch 30 \ --epoch 30 \
--avg 15 \ --avg 15 \
@ -62,7 +71,7 @@ Usage:
--lm-dir data/lm \ --lm-dir data/lm \
--decoding-method nbest-rescoring --decoding-method nbest-rescoring
(5) whole-lattice-rescoring (6) whole-lattice-rescoring
./zipformer/ctc_decode.py \ ./zipformer/ctc_decode.py \
--epoch 30 \ --epoch 30 \
--avg 15 \ --avg 15 \
@ -73,6 +82,29 @@ Usage:
--nbest-scale 1.0 \ --nbest-scale 1.0 \
--lm-dir data/lm \ --lm-dir data/lm \
--decoding-method whole-lattice-rescoring --decoding-method whole-lattice-rescoring
(7) attention-decoder-rescoring-no-ngram
./zipformer/ctc_decode.py \
--epoch 30 \
--avg 15 \
--exp-dir ./zipformer/exp \
--use-ctc 1 \
--use-attention-decoder 1 \
--max-duration 100 \
--decoding-method attention-decoder-rescoring-no-ngram
(8) attention-decoder-rescoring-with-ngram
./zipformer/ctc_decode.py \
--epoch 30 \
--avg 15 \
--exp-dir ./zipformer/exp \
--use-ctc 1 \
--use-attention-decoder 1 \
--max-duration 100 \
--hlg-scale 0.6 \
--nbest-scale 1.0 \
--lm-dir data/lm \
--decoding-method attention-decoder-rescoring-with-ngram
""" """
@ -87,9 +119,11 @@ import k2
import sentencepiece as spm import sentencepiece as spm
import torch import torch
import torch.nn as nn import torch.nn as nn
from asr_datamodule import GigaSpeechAsrDataModule
from asr_datamodule import GigaSpeechAsrDataModule
from gigaspeech_scoring import asr_text_post_processing from gigaspeech_scoring import asr_text_post_processing
from lhotse import set_caching_enabled
from train import add_model_arguments, get_model, get_params from train import add_model_arguments, get_model, get_params
from icefall.checkpoint import ( from icefall.checkpoint import (
@ -99,10 +133,13 @@ from icefall.checkpoint import (
load_checkpoint, load_checkpoint,
) )
from icefall.decode import ( from icefall.decode import (
ctc_greedy_search,
get_lattice, get_lattice,
nbest_decoding, nbest_decoding,
nbest_oracle, nbest_oracle,
one_best_decoding, one_best_decoding,
rescore_with_attention_decoder_no_ngram,
rescore_with_attention_decoder_with_ngram,
rescore_with_n_best_list, rescore_with_n_best_list,
rescore_with_whole_lattice, rescore_with_whole_lattice,
) )
@ -197,23 +234,30 @@ def get_parser():
default="ctc-decoding", default="ctc-decoding",
help="""Decoding method. help="""Decoding method.
Supported values are: Supported values are:
- (1) ctc-decoding. Use CTC decoding. It uses a sentence piece - (1) ctc-greedy-search. Use CTC greedy search. It uses a sentence piece
model, i.e., lang_dir/bpe.model, to convert word pieces to words. model, i.e., lang_dir/bpe.model, to convert word pieces to words.
It needs neither a lexicon nor an n-gram LM. It needs neither a lexicon nor an n-gram LM.
- (2) 1best. Extract the best path from the decoding lattice as the - (2) ctc-decoding. Use CTC decoding. It uses a sentence piece
model, i.e., lang_dir/bpe.model, to convert word pieces to words.
It needs neither a lexicon nor an n-gram LM.
- (3) 1best. Extract the best path from the decoding lattice as the
decoding result. decoding result.
- (3) nbest. Extract n paths from the decoding lattice; the path - (4) nbest. Extract n paths from the decoding lattice; the path
with the highest score is the decoding result. with the highest score is the decoding result.
- (4) nbest-rescoring. Extract n paths from the decoding lattice, - (5) nbest-rescoring. Extract n paths from the decoding lattice,
rescore them with an n-gram LM (e.g., a 4-gram LM), the path with rescore them with an n-gram LM (e.g., a 4-gram LM), the path with
the highest score is the decoding result. the highest score is the decoding result.
- (5) whole-lattice-rescoring. Rescore the decoding lattice with an - (6) whole-lattice-rescoring. Rescore the decoding lattice with an
n-gram LM (e.g., a 4-gram LM), the best path of rescored lattice n-gram LM (e.g., a 4-gram LM), the best path of rescored lattice
is the decoding result. is the decoding result.
you have trained an RNN LM using ./rnn_lm/train.py you have trained an RNN LM using ./rnn_lm/train.py
- (6) nbest-oracle. Its WER is the lower bound of any n-best - (7) nbest-oracle. Its WER is the lower bound of any n-best
rescoring method can achieve. Useful for debugging n-best rescoring method can achieve. Useful for debugging n-best
rescoring method. rescoring method.
- (8) attention-decoder-rescoring-no-ngram. Extract n paths from the decoding
lattice, rescore them with the attention decoder.
- (9) attention-decoder-rescoring-with-ngram. Extract n paths from the LM
rescored lattice, rescore them with the attention decoder.
""", """,
) )
@ -256,6 +300,13 @@ def get_parser():
""", """,
) )
parser.add_argument(
"--skip-scoring",
type=str2bool,
default=False,
help="""Skip scoring, but still save the ASR output (for eval sets).""",
)
add_model_arguments(parser) add_model_arguments(parser)
return parser return parser
@ -276,17 +327,6 @@ def get_decoding_params() -> AttributeDict:
return params return params
def post_processing(
results: List[Tuple[str, List[str], List[str]]],
) -> List[Tuple[str, List[str], List[str]]]:
new_results = []
for key, ref, hyp in results:
new_ref = asr_text_post_processing(" ".join(ref)).split()
new_hyp = asr_text_post_processing(" ".join(hyp)).split()
new_results.append((key, new_ref, new_hyp))
return new_results
def decode_one_batch( def decode_one_batch(
params: AttributeDict, params: AttributeDict,
model: nn.Module, model: nn.Module,
@ -365,6 +405,15 @@ def decode_one_batch(
encoder_out, encoder_out_lens = model.forward_encoder(feature, feature_lens) encoder_out, encoder_out_lens = model.forward_encoder(feature, feature_lens)
ctc_output = model.ctc_output(encoder_out) # (N, T, C) ctc_output = model.ctc_output(encoder_out) # (N, T, C)
if params.decoding_method == "ctc-greedy-search":
hyps = ctc_greedy_search(ctc_output, encoder_out_lens)
# hyps is a list of str, e.g., ['xxx yyy zzz', ...]
hyps = bpe_model.decode(hyps)
# hyps is a list of list of str, e.g., [['xxx', 'yyy', 'zzz'], ... ]
hyps = [s.split() for s in hyps]
key = "ctc-greedy-search"
return {key: hyps}
supervision_segments = torch.stack( supervision_segments = torch.stack(
( (
supervisions["sequence_idx"], supervisions["sequence_idx"],
@ -417,7 +466,27 @@ def decode_one_batch(
# hyps is a list of list of str, e.g., [['xxx', 'yyy', 'zzz'], ... ] # hyps is a list of list of str, e.g., [['xxx', 'yyy', 'zzz'], ... ]
hyps = [s.split() for s in hyps] hyps = [s.split() for s in hyps]
key = "ctc-decoding" key = "ctc-decoding"
return {key: hyps} return {key: hyps} # note: returns words
if params.decoding_method == "attention-decoder-rescoring-no-ngram":
best_path_dict = rescore_with_attention_decoder_no_ngram(
lattice=lattice,
num_paths=params.num_paths,
attention_decoder=model.attention_decoder,
encoder_out=encoder_out,
encoder_out_lens=encoder_out_lens,
nbest_scale=params.nbest_scale,
)
ans = dict()
for a_scale_str, best_path in best_path_dict.items():
# token_ids is a lit-of-list of IDs
token_ids = get_texts(best_path)
# hyps is a list of str, e.g., ['xxx yyy zzz', ...]
hyps = bpe_model.decode(token_ids)
# hyps is a list of list of str, e.g., [['xxx', 'yyy', 'zzz'], ... ]
hyps = [s.split() for s in hyps]
ans[a_scale_str] = hyps
return ans
if params.decoding_method == "nbest-oracle": if params.decoding_method == "nbest-oracle":
# Note: You can also pass rescored lattices to it. # Note: You can also pass rescored lattices to it.
@ -434,7 +503,7 @@ def decode_one_batch(
) )
hyps = get_texts(best_path) hyps = get_texts(best_path)
hyps = [[word_table[i] for i in ids] for ids in hyps] hyps = [[word_table[i] for i in ids] for ids in hyps]
key = f"oracle_{params.num_paths}_nbest_scale_{params.nbest_scale}" # noqa key = f"oracle_{params.num_paths}_nbest-scale-{params.nbest_scale}" # noqa
return {key: hyps} return {key: hyps}
if params.decoding_method in ["1best", "nbest"]: if params.decoding_method in ["1best", "nbest"]:
@ -442,7 +511,7 @@ def decode_one_batch(
best_path = one_best_decoding( best_path = one_best_decoding(
lattice=lattice, use_double_scores=params.use_double_scores lattice=lattice, use_double_scores=params.use_double_scores
) )
key = "no_rescore" key = "no-rescore"
else: else:
best_path = nbest_decoding( best_path = nbest_decoding(
lattice=lattice, lattice=lattice,
@ -450,15 +519,16 @@ def decode_one_batch(
use_double_scores=params.use_double_scores, use_double_scores=params.use_double_scores,
nbest_scale=params.nbest_scale, nbest_scale=params.nbest_scale,
) )
key = f"no_rescore-nbest-scale-{params.nbest_scale}-{params.num_paths}" # noqa key = f"no-rescore_nbest-scale-{params.nbest_scale}-{params.num_paths}" # noqa
hyps = get_texts(best_path) hyps = get_texts(best_path)
hyps = [[word_table[i] for i in ids] for ids in hyps] hyps = [[word_table[i] for i in ids] for ids in hyps]
return {key: hyps} return {key: hyps} # note: returns BPE tokens
assert params.decoding_method in [ assert params.decoding_method in [
"nbest-rescoring", "nbest-rescoring",
"whole-lattice-rescoring", "whole-lattice-rescoring",
"attention-decoder-rescoring-with-ngram",
] ]
lm_scale_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] lm_scale_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
@ -479,6 +549,21 @@ def decode_one_batch(
G_with_epsilon_loops=G, G_with_epsilon_loops=G,
lm_scale_list=lm_scale_list, lm_scale_list=lm_scale_list,
) )
elif params.decoding_method == "attention-decoder-rescoring-with-ngram":
# lattice uses a 3-gram Lm. We rescore it with a 4-gram LM.
rescored_lattice = rescore_with_whole_lattice(
lattice=lattice,
G_with_epsilon_loops=G,
lm_scale_list=None,
)
best_path_dict = rescore_with_attention_decoder_with_ngram(
lattice=rescored_lattice,
num_paths=params.num_paths,
attention_decoder=model.attention_decoder,
encoder_out=encoder_out,
encoder_out_lens=encoder_out_lens,
nbest_scale=params.nbest_scale,
)
else: else:
assert False, f"Unsupported decoding method: {params.decoding_method}" assert False, f"Unsupported decoding method: {params.decoding_method}"
@ -572,39 +657,64 @@ def decode_dataset(
return results return results
def save_results( def save_asr_output(
params: AttributeDict, params: AttributeDict,
test_set_name: str, test_set_name: str,
results_dict: Dict[str, List[Tuple[str, List[str], List[str]]]], results_dict: Dict[str, List[Tuple[str, List[str], List[str]]]],
): ):
test_set_wers = dict() """
Save text produced by ASR.
"""
for key, results in results_dict.items(): for key, results in results_dict.items():
recog_path = params.res_dir / f"recogs-{test_set_name}-{params.suffix}.txt" recogs_filename = params.res_dir / f"recogs-{test_set_name}-{params.suffix}.txt"
results = post_processing(results) results = post_processing(results)
results = sorted(results) results = sorted(results)
store_transcripts(filename=recog_path, texts=results) store_transcripts(filename=recogs_filename, texts=results)
logging.info(f"The transcripts are stored in {recog_path}")
logging.info(f"The transcripts are stored in {recogs_filename}")
def save_wer_results(
params: AttributeDict,
test_set_name: str,
results_dict: Dict[str, List[Tuple[str, List[str], List[str]]]],
):
if params.decoding_method in (
"attention-decoder-rescoring-with-ngram",
"whole-lattice-rescoring",
):
# Set it to False since there are too many logs.
enable_log = False
else:
enable_log = True
test_set_wers = dict()
for key, results in results_dict.items():
results = post_processing(results)
# The following prints out WERs, per-word error statistics and aligned # The following prints out WERs, per-word error statistics and aligned
# ref/hyp pairs. # ref/hyp pairs.
errs_filename = params.res_dir / f"errs-{test_set_name}-{params.suffix}.txt" errs_filename = params.res_dir / f"errs-{test_set_name}-{params.suffix}.txt"
with open(errs_filename, "w") as f: with open(errs_filename, "w", encoding="utf8") as fd:
wer = write_error_stats(f, f"{test_set_name}-{key}", results) wer = write_error_stats(
fd, f"{test_set_name}_{key}", results, enable_log=enable_log
)
test_set_wers[key] = wer test_set_wers[key] = wer
logging.info("Wrote detailed error stats to {}".format(errs_filename)) logging.info(f"Wrote detailed error stats to {errs_filename}")
test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1])
errs_info = params.res_dir / f"wer-summary-{test_set_name}-{params.suffix}.txt"
with open(errs_info, "w") as f:
print("settings\tWER", file=f)
for key, val in test_set_wers:
print("{}\t{}".format(key, val), file=f)
s = "\nFor {}, WER of different settings are:\n".format(test_set_name) wer_filename = params.res_dir / f"wer-summary-{test_set_name}-{params.suffix}.txt"
note = "\tbest for {}".format(test_set_name)
with open(wer_filename, "w", encoding="utf8") as fd:
print("settings\tWER", file=fd)
for key, val in test_set_wers: for key, val in test_set_wers:
s += "{}\t{}{}\n".format(key, val, note) print(f"{key}\t{val}", file=fd)
s = f"\nFor {test_set_name}, WER of different settings are:\n"
note = f"\tbest for {test_set_name}"
for key, val in test_set_wers:
s += f"{key}\t{val}{note}\n"
note = "" note = ""
logging.info(s) logging.info(s)
@ -623,20 +733,26 @@ def main():
params.update(get_decoding_params()) params.update(get_decoding_params())
params.update(vars(args)) params.update(vars(args))
# enable AudioCache
set_caching_enabled(True) # lhotse
assert params.decoding_method in ( assert params.decoding_method in (
"ctc-greedy-search",
"ctc-decoding", "ctc-decoding",
"1best", "1best",
"nbest", "nbest",
"nbest-rescoring", "nbest-rescoring",
"whole-lattice-rescoring", "whole-lattice-rescoring",
"nbest-oracle", "nbest-oracle",
"attention-decoder-rescoring-no-ngram",
"attention-decoder-rescoring-with-ngram",
) )
params.res_dir = params.exp_dir / params.decoding_method params.res_dir = params.exp_dir / params.decoding_method
if params.iter > 0: if params.iter > 0:
params.suffix = f"iter-{params.iter}-avg-{params.avg}" params.suffix = f"iter-{params.iter}_avg-{params.avg}"
else: else:
params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" params.suffix = f"epoch-{params.epoch}_avg-{params.avg}"
if params.causal: if params.causal:
assert ( assert (
@ -645,11 +761,11 @@ def main():
assert ( assert (
"," not in params.left_context_frames "," not in params.left_context_frames
), "left_context_frames should be one value in decoding." ), "left_context_frames should be one value in decoding."
params.suffix += f"-chunk-{params.chunk_size}" params.suffix += f"_chunk-{params.chunk_size}"
params.suffix += f"-left-context-{params.left_context_frames}" params.suffix += f"_left-context-{params.left_context_frames}"
if params.use_averaged_model: if params.use_averaged_model:
params.suffix += "-use-averaged-model" params.suffix += "_use-averaged-model"
setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") setup_logger(f"{params.res_dir}/log-decode-{params.suffix}")
logging.info("Decoding started") logging.info("Decoding started")
@ -668,8 +784,14 @@ def main():
params.vocab_size = num_classes params.vocab_size = num_classes
# <blk> and <unk> are defined in local/train_bpe_model.py # <blk> and <unk> are defined in local/train_bpe_model.py
params.blank_id = 0 params.blank_id = 0
params.eos_id = 1
params.sos_id = 1
if params.decoding_method == "ctc-decoding": if params.decoding_method in [
"ctc-greedy-search",
"ctc-decoding",
"attention-decoder-rescoring-no-ngram",
]:
HLG = None HLG = None
H = k2.ctc_topo( H = k2.ctc_topo(
max_token=max_token_id, max_token=max_token_id,
@ -693,6 +815,7 @@ def main():
if params.decoding_method in ( if params.decoding_method in (
"nbest-rescoring", "nbest-rescoring",
"whole-lattice-rescoring", "whole-lattice-rescoring",
"attention-decoder-rescoring-with-ngram",
): ):
if not (params.lm_dir / "G_4_gram.pt").is_file(): if not (params.lm_dir / "G_4_gram.pt").is_file():
logging.info("Loading G_4_gram.fst.txt") logging.info("Loading G_4_gram.fst.txt")
@ -724,7 +847,10 @@ def main():
d = torch.load(params.lm_dir / "G_4_gram.pt", map_location=device) d = torch.load(params.lm_dir / "G_4_gram.pt", map_location=device)
G = k2.Fsa.from_dict(d) G = k2.Fsa.from_dict(d)
if params.decoding_method == "whole-lattice-rescoring": if params.decoding_method in [
"whole-lattice-rescoring",
"attention-decoder-rescoring-with-ngram",
]:
# Add epsilon self-loops to G as we will compose # Add epsilon self-loops to G as we will compose
# it with the whole lattice later # it with the whole lattice later
G = k2.add_epsilon_self_loops(G) G = k2.add_epsilon_self_loops(G)
@ -825,6 +951,7 @@ def main():
# we need cut ids to display recognition results. # we need cut ids to display recognition results.
args.return_cuts = True args.return_cuts = True
gigaspeech = GigaSpeechAsrDataModule(args) gigaspeech = GigaSpeechAsrDataModule(args)
test_cuts = gigaspeech.test_cuts() test_cuts = gigaspeech.test_cuts()
@ -832,9 +959,9 @@ def main():
test_dl = gigaspeech.test_dataloaders(test_cuts) test_dl = gigaspeech.test_dataloaders(test_cuts)
test_sets = ["test"] test_sets = ["test"]
test_dl = [test_dl] test_dls = [test_dl]
for test_set, test_dl in zip(test_sets, test_dl): for test_set, test_dl in zip(test_sets, test_dls):
results_dict = decode_dataset( results_dict = decode_dataset(
dl=test_dl, dl=test_dl,
params=params, params=params,
@ -846,7 +973,14 @@ def main():
G=G, G=G,
) )
save_results( save_asr_output(
params=params,
test_set_name=test_set,
results_dict=results_dict,
)
if not params.skip_scoring:
save_wer_results(
params=params, params=params,
test_set_name=test_set, test_set_name=test_set,
results_dict=results_dict, results_dict=results_dict,