Update AIShell recipe result (#140)

* add MMI to AIShell

* fix MMI decode graph

* export model

* typo

* fix code style

* typo

* fix data prepare to just use train text by uid

* use a faster way to get the intersection of train and aishell_transcript_v0.8.txt

* update AIShell result

* update

* typo
This commit is contained in:
pingfengluo 2021-12-04 14:43:04 +08:00 committed by GitHub
parent 89b84208aa
commit d1adc25338
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 41 additions and 36 deletions

View File

@ -1,22 +1,22 @@
## Results
### Aishell training results (Conformer-MMI)
#### 2021-12-01
(Pingfeng Luo): Result of <https://github.com/k2-fsa/icefall/pull/137>
#### 2021-12-04
(Pingfeng Luo): Result of <https://github.com/k2-fsa/icefall/pull/140>
The tensorboard log for training is available at <https://tensorboard.dev/experiment/dyp3vWE9RE6SkqBAgLJjUw/>
The tensorboard log for training is available at <https://tensorboard.dev/experiment/PSRYVbptRGynqpPRSykp1g>
And pretrained model is available at <https://huggingface.co/pfluo/icefall_aishell_model>
And pretrained model is available at <https://huggingface.co/pfluo/icefall_aishell_mmi_model>
The best decoding results (CER) are listed below, we got this results by averaging models from epoch 20 to 49, and using `attention-decoder` decoder with num_paths equals to 100.
The best decoding results (CER) are listed below, we got this results by averaging models from epoch 61 to 85, and using `attention-decoder` decoder with num_paths equals to 100.
||test|
|--|--|
|CER| 5.12% |
|CER| 4.94% |
||lm_scale|attention_scale|
|--|--|--|
|test|1.5|0.5|
|test|1.1|0.3|
You can use the following commands to reproduce our results:
@ -31,12 +31,12 @@ export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7,8"
python conformer_mmi/train.py --bucketing-sampler True \
--max-duration 200 \
--start-epoch 0 \
--num-epochs 50 \
--num-epochs 90 \
--world-size 8
python conformer_mmi/decode.py --nbest-scale 0.5 \
--epoch 49 \
--avg 20 \
--epoch 85 \
--avg 25 \
--method attention-decoder \
--max-duration 20 \
--num-paths 100

View File

@ -1,5 +1,6 @@
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corporation (Author: Liyong Guo, Fangjun Kuang)
# Copyright 2021 Pingfeng Luo
#
# See ../../../../LICENSE for clarification regarding multiple authors
#

View File

@ -1,6 +1,7 @@
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang,
# Wei Kang)
# Copyright 2021 Pingfeng Luo
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
@ -80,7 +81,7 @@ def get_parser():
parser.add_argument(
"--num-epochs",
type=int,
default=50,
default=90,
help="Number of epochs to train.",
)

View File

@ -111,63 +111,66 @@ if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
./local/compute_fbank_musan.py
fi
lang_phone_dir=data/lang_phone
lang_char_dir=data/lang_char
if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
log "Stage 5: Prepare phone based lang"
lang_dir=data/lang_phone
mkdir -p $lang_dir
mkdir -p $lang_phone_dir
(echo '!SIL SIL'; echo '<SPOKEN_NOISE> SPN'; echo '<UNK> SPN'; ) |
cat - $dl_dir/aishell/resource_aishell/lexicon.txt |
sort | uniq > $lang_dir/lexicon.txt
sort | uniq > $lang_phone_dir/lexicon.txt
./local/generate_unique_lexicon.py --lang-dir $lang_dir
./local/generate_unique_lexicon.py --lang-dir $lang_phone_dir
if [ ! -f data/lang_phone/L_disambig.pt ]; then
if [ ! -f $lang_phone_dir/L_disambig.pt ]; then
./local/prepare_lang.py
fi
# Train a bigram P for MMI training
if [ ! -f $lang_dir/transcript_words.txt ]; then
if [ ! -f $lang_phone_dir/transcript_words.txt ]; then
log "Generate data to train phone based bigram P"
aishell_text=aishell/data_aishell/transcript/aishell_transcript_v0.8.txt
cat ${dl_dir}/${aishell_text} | cut -d " " -f 2- > $lang_dir/transcript_words.txt
aishell_text=$dl_dir/aishell/data_aishell/transcript/aishell_transcript_v0.8.txt
aishell_train_uid=$dl_dir/aishell/data_aishell/transcript/aishell_train_uid
find data/aishell/data_aishell/wav/train -name "*.wav" | sed 's/\.wav//g' | awk -F '/' '{print $NF}' > $aishell_train_uid
awk 'NR==FNR{uid[$1]=$1} NR!=FNR{if($1 in uid) print $0}' $aishell_train_uid $aishell_text | cut -d " " -f 2- > $lang_phone_dir/transcript_words.txt
fi
if [ ! -f $lang_dir/transcript_tokens.txt ]; then
if [ ! -f $lang_phone_dir/transcript_tokens.txt ]; then
./local/convert_transcript_words_to_tokens.py \
--lexicon $lang_dir/uniq_lexicon.txt \
--transcript $lang_dir/transcript_words.txt \
--lexicon $lang_phone_dir/uniq_lexicon.txt \
--transcript $lang_phone_dir/transcript_words.txt \
--oov "<UNK>" \
> $lang_dir/transcript_tokens.txt
> $lang_phone_dir/transcript_tokens.txt
fi
if [ ! -f $lang_dir/P.arpa ]; then
if [ ! -f $lang_phone_dir/P.arpa ]; then
./shared/make_kn_lm.py \
-ngram-order 2 \
-text $lang_dir/transcript_tokens.txt \
-lm $lang_dir/P.arpa
-text $lang_phone_dir/transcript_tokens.txt \
-lm $lang_phone_dir/P.arpa
fi
if [ ! -f $lang_dir/P.fst.txt ]; then
if [ ! -f $lang_phone_dir/P.fst.txt ]; then
python3 -m kaldilm \
--read-symbol-table="$lang_dir/tokens.txt" \
--read-symbol-table="$lang_phone_dir/tokens.txt" \
--disambig-symbol='#0' \
--max-order=2 \
$lang_dir/P.arpa > $lang_dir/P.fst.txt
$lang_phone_dir/P.arpa > $lang_phone_dir/P.fst.txt
fi
fi
if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
log "Stage 6: Prepare char based lang"
mkdir -p data/lang_char
mkdir -p $lang_char_dir
# We reuse words.txt from phone based lexicon
# so that the two can share G.pt later.
cp data/lang_phone/words.txt data/lang_char
cp $lang_phone_dir/words.txt $lang_char_dir
cat $dl_dir/aishell/data_aishell/transcript/aishell_transcript_v0.8.txt |
cut -d " " -f 2- | sed -e 's/[ \t\r\n]*//g' > data/lang_char/text
cut -d " " -f 2- | sed -e 's/[ \t\r\n]*//g' > $lang_char_dir/text
if [ ! -f data/lang_char/L_disambig.pt ]; then
if [ ! -f $lang_char_dir/L_disambig.pt ]; then
./local/prepare_char.py
fi
fi
@ -181,7 +184,7 @@ if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then
if [ ! -f data/lm/G_3_gram.fst.txt ]; then
# It is used in building HLG
python3 -m kaldilm \
--read-symbol-table="data/lang_phone/words.txt" \
--read-symbol-table="$lang_phone_dir/words.txt" \
--disambig-symbol='#0' \
--max-order=3 \
$dl_dir/lm/3-gram.unpruned.arpa > data/lm/G_3_gram.fst.txt
@ -190,6 +193,6 @@ fi
if [ $stage -le 8 ] && [ $stop_stage -ge 8 ]; then
log "Stage 8: Compile HLG"
./local/compile_hlg.py --lang-dir data/lang_phone
./local/compile_hlg.py --lang-dir data/lang_char
./local/compile_hlg.py --lang-dir $lang_phone_dir
./local/compile_hlg.py --lang-dir $lang_char_dir
fi