mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-10 02:22:17 +00:00
clean prepare.sh
This commit is contained in:
parent
3e4179bebb
commit
c5115fc460
@ -6,36 +6,15 @@ export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python
|
||||
set -eou pipefail
|
||||
|
||||
nj=60
|
||||
stage=-1
|
||||
stage=6
|
||||
stop_stage=9
|
||||
|
||||
# We assume dl_dir (download dir) contains the following
|
||||
# directories and files. If not, they will be downloaded
|
||||
# by this script automatically.
|
||||
#
|
||||
# - $dl_dir/LibriSpeech
|
||||
# You can find BOOKS.TXT, test-clean, train-clean-360, etc, inside it.
|
||||
# You can download them from https://www.openslr.org/12
|
||||
#
|
||||
# - $dl_dir/lm
|
||||
# This directory contains the following files downloaded from
|
||||
# http://www.openslr.org/resources/11
|
||||
#
|
||||
# - 3-gram.pruned.1e-7.arpa.gz
|
||||
# - 3-gram.pruned.1e-7.arpa
|
||||
# - 4-gram.arpa.gz
|
||||
# - 4-gram.arpa
|
||||
# - librispeech-vocab.txt
|
||||
# - librispeech-lexicon.txt
|
||||
# - librispeech-lm-norm.txt.gz
|
||||
#
|
||||
# - $dl_dir/musan
|
||||
# This directory contains the following directories downloaded from
|
||||
# http://www.openslr.org/17/
|
||||
#
|
||||
# - music
|
||||
# - noise
|
||||
# - speech
|
||||
# - $dl_dir/hi-en
|
||||
|
||||
dl_dir=$PWD/download
|
||||
espnet_path=/home/wtc7/espnet/egs2/MUCS/asr1/data/hi-en/
|
||||
|
||||
@ -43,13 +22,8 @@ espnet_path=/home/wtc7/espnet/egs2/MUCS/asr1/data/hi-en/
|
||||
|
||||
# vocab size for sentence piece models.
|
||||
# It will generate data/lang_bpe_xxx,
|
||||
# data/lang_bpe_yyy if the array contains xxx, yyy
|
||||
vocab_sizes=(
|
||||
# 5000
|
||||
# 2000
|
||||
# 1000
|
||||
200
|
||||
)
|
||||
# data/lang_bpe_yyy
|
||||
vocab_size=400
|
||||
|
||||
# All files generated by this script are saved in "data".
|
||||
# You can safely remove "data" and rerun this script to regenerate it.
|
||||
@ -68,7 +42,7 @@ if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then
|
||||
mkdir -p $dl_dir/lm
|
||||
if [ ! -e $dl_dir/lm/.done ]; then
|
||||
./local/prepare_lm_files.py --out-dir=$dl_dir/lm --data-path=$espnet_path --mode="train"
|
||||
# touch $dl_dir/lm/.done
|
||||
touch $dl_dir/lm/.done
|
||||
fi
|
||||
fi
|
||||
|
||||
@ -78,11 +52,11 @@ fi
|
||||
|
||||
if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
|
||||
log "Stage 1: Prepare MUCS manifest"
|
||||
# We assume that you have downloaded the LibriSpeech corpus
|
||||
# to $dl_dir/LibriSpeech
|
||||
# We assume that you have downloaded the MUCS corpus
|
||||
# to $dl_dir/
|
||||
mkdir -p data/manifests
|
||||
if [ ! -e data/manifests/.mucs.done ]; then
|
||||
# lhotse prepare mucs -j $nj $dl_dir/hi-en data/manifests
|
||||
# generate lhotse manifests from kaldi style files
|
||||
./local/prepare_manifest.py "$espnet_path" $nj data/manifests
|
||||
|
||||
touch data/manifests/.mucs.done
|
||||
@ -94,7 +68,7 @@ if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
||||
mkdir -p data/fbank
|
||||
if [ ! -e data/fbank/.mucs.done ]; then
|
||||
./local/compute_fbank_mucs.py
|
||||
# touch data/fbank/.mucs.done
|
||||
touch data/fbank/.mucs.done
|
||||
fi
|
||||
|
||||
# exit
|
||||
@ -110,7 +84,7 @@ if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
||||
python3 ./local/validate_manifest.py \
|
||||
data/fbank/mucs_cuts_${part}.jsonl.gz
|
||||
done
|
||||
# touch data/fbank/.mucs-validated.done
|
||||
touch data/fbank/.mucs-validated.done
|
||||
fi
|
||||
fi
|
||||
|
||||
@ -150,7 +124,6 @@ fi
|
||||
if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
|
||||
log "Stage 6: Prepare BPE based lang"
|
||||
|
||||
for vocab_size in ${vocab_sizes[@]}; do
|
||||
lang_dir=data/lang_bpe_${vocab_size}
|
||||
mkdir -p $lang_dir
|
||||
# We reuse words.txt from phone based lexicon
|
||||
@ -193,23 +166,14 @@ if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
|
||||
$lang_dir/L_disambig.pt \
|
||||
$lang_dir/L_disambig.fst
|
||||
fi
|
||||
done
|
||||
|
||||
fi
|
||||
|
||||
if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then
|
||||
log "Stage 7: Prepare bigram token-level P for MMI training"
|
||||
log "Stage 7: Train LM from training data"
|
||||
|
||||
for vocab_size in ${vocab_sizes[@]}; do
|
||||
lang_dir=data/lang_bpe_${vocab_size}
|
||||
|
||||
# if [ ! -f $lang_dir/transcript_tokens.txt ]; then
|
||||
# ./local/convert_transcript_words_to_tokens.py \
|
||||
# --lexicon $lang_dir/lexicon.txt \
|
||||
# --transcript $lang_dir/transcript_words.txt \
|
||||
# --oov "<UNK>" \
|
||||
# > $lang_dir/transcript_tokens.txt
|
||||
# fi
|
||||
|
||||
if [ ! -f $lang_dir/lm_3.arpa ]; then
|
||||
./shared/make_kn_lm.py \
|
||||
-ngram-order 3 \
|
||||
@ -224,14 +188,6 @@ if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then
|
||||
-lm $lang_dir/lm_4.arpa
|
||||
fi
|
||||
|
||||
# if [ ! -f $lang_dir/P.fst.txt ]; then
|
||||
# python3 -m kaldilm \
|
||||
# --read-symbol-table="$lang_dir/tokens.txt" \
|
||||
# --disambig-symbol='#0' \
|
||||
# --max-order=2 \
|
||||
# $lang_dir/P.arpa > $lang_dir/P.fst.txt
|
||||
# fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ $stage -le 8 ] && [ $stop_stage -ge 8 ]; then
|
||||
@ -246,7 +202,7 @@ if [ $stage -le 8 ] && [ $stop_stage -ge 8 ]; then
|
||||
--read-symbol-table="data/lang_phone/words.txt" \
|
||||
--disambig-symbol='#0' \
|
||||
--max-order=3 \
|
||||
data/lang_bpe_200/lm_3.arpa > data/lm/G_3_gram.fst.txt
|
||||
data/lang_bpe_${vocab_size}/lm_3.arpa > data/lm/G_3_gram.fst.txt
|
||||
fi
|
||||
|
||||
if [ ! -f data/lm/G_4_gram.fst.txt ]; then
|
||||
@ -255,17 +211,9 @@ if [ $stage -le 8 ] && [ $stop_stage -ge 8 ]; then
|
||||
--read-symbol-table="data/lang_phone/words.txt" \
|
||||
--disambig-symbol='#0' \
|
||||
--max-order=3 \
|
||||
data/lang_bpe_200/lm_4.arpa > data/lm/G_4_gram.fst.txt
|
||||
data/lang_bpe_${vocab_size}/lm_4.arpa > data/lm/G_4_gram.fst.txt
|
||||
fi
|
||||
|
||||
# if [ ! -f data/lm/G_4_gram.fst.txt ]; then
|
||||
# # It is used for LM rescoring
|
||||
# python3 -m kaldilm \
|
||||
# --read-symbol-table="data/lang_phone/words.txt" \
|
||||
# --disambig-symbol='#0' \
|
||||
# --max-order=4 \
|
||||
# $dl_dir/lm/4-gram.arpa > data/lm/G_4_gram.fst.txt
|
||||
# fi
|
||||
fi
|
||||
|
||||
if [ $stage -le 9 ] && [ $stop_stage -ge 9 ]; then
|
||||
@ -277,120 +225,8 @@ if [ $stage -le 9 ] && [ $stop_stage -ge 9 ]; then
|
||||
#
|
||||
# ./local/compile_hlg_using_openfst.py --lang-dir data/lang_phone
|
||||
|
||||
for vocab_size in ${vocab_sizes[@]}; do
|
||||
lang_dir=data/lang_bpe_${vocab_size}
|
||||
./local/compile_hlg.py --lang-dir $lang_dir
|
||||
|
||||
# Note If ./local/compile_hlg.py throws OOM,
|
||||
# please switch to the following command
|
||||
#
|
||||
# ./local/compile_hlg_using_openfst.py --lang-dir $lang_dir
|
||||
done
|
||||
fi
|
||||
|
||||
# Compile LG for RNN-T fast_beam_search decoding
|
||||
if [ $stage -le 10 ] && [ $stop_stage -ge 10 ]; then
|
||||
log "Stage 10: Compile LG"
|
||||
./local/compile_lg.py --lang-dir data/lang_phone
|
||||
|
||||
for vocab_size in ${vocab_sizes[@]}; do
|
||||
lang_dir=data/lang_bpe_${vocab_size}
|
||||
./local/compile_lg.py --lang-dir $lang_dir
|
||||
done
|
||||
fi
|
||||
|
||||
if [ $stage -le 11 ] && [ $stop_stage -ge 11 ]; then
|
||||
log "Stage 11: Generate LM training data"
|
||||
|
||||
for vocab_size in ${vocab_sizes[@]}; do
|
||||
log "Processing vocab_size == ${vocab_size}"
|
||||
lang_dir=data/lang_bpe_${vocab_size}
|
||||
out_dir=data/lm_training_bpe_${vocab_size}
|
||||
mkdir -p $out_dir
|
||||
|
||||
./local/prepare_lm_training_data.py \
|
||||
--bpe-model $lang_dir/bpe.model \
|
||||
--lm-data $dl_dir/lm/librispeech-lm-norm.txt \
|
||||
--lm-archive $out_dir/lm_data.pt
|
||||
done
|
||||
fi
|
||||
|
||||
if [ $stage -le 12 ] && [ $stop_stage -ge 12 ]; then
|
||||
log "Stage 12: Generate LM validation data"
|
||||
|
||||
for vocab_size in ${vocab_sizes[@]}; do
|
||||
log "Processing vocab_size == ${vocab_size}"
|
||||
out_dir=data/lm_training_bpe_${vocab_size}
|
||||
mkdir -p $out_dir
|
||||
|
||||
if [ ! -f $out_dir/valid.txt ]; then
|
||||
files=$(
|
||||
find "$dl_dir/LibriSpeech/dev-clean" -name "*.trans.txt"
|
||||
find "$dl_dir/LibriSpeech/dev-other" -name "*.trans.txt"
|
||||
)
|
||||
for f in ${files[@]}; do
|
||||
cat $f | cut -d " " -f 2-
|
||||
done > $out_dir/valid.txt
|
||||
fi
|
||||
|
||||
lang_dir=data/lang_bpe_${vocab_size}
|
||||
./local/prepare_lm_training_data.py \
|
||||
--bpe-model $lang_dir/bpe.model \
|
||||
--lm-data $out_dir/valid.txt \
|
||||
--lm-archive $out_dir/lm_data-valid.pt
|
||||
done
|
||||
fi
|
||||
|
||||
if [ $stage -le 13 ] && [ $stop_stage -ge 13 ]; then
|
||||
log "Stage 13: Generate LM test data"
|
||||
|
||||
for vocab_size in ${vocab_sizes[@]}; do
|
||||
log "Processing vocab_size == ${vocab_size}"
|
||||
out_dir=data/lm_training_bpe_${vocab_size}
|
||||
mkdir -p $out_dir
|
||||
|
||||
if [ ! -f $out_dir/test.txt ]; then
|
||||
files=$(
|
||||
find "$dl_dir/LibriSpeech/test-clean" -name "*.trans.txt"
|
||||
find "$dl_dir/LibriSpeech/test-other" -name "*.trans.txt"
|
||||
)
|
||||
for f in ${files[@]}; do
|
||||
cat $f | cut -d " " -f 2-
|
||||
done > $out_dir/test.txt
|
||||
fi
|
||||
|
||||
lang_dir=data/lang_bpe_${vocab_size}
|
||||
./local/prepare_lm_training_data.py \
|
||||
--bpe-model $lang_dir/bpe.model \
|
||||
--lm-data $out_dir/test.txt \
|
||||
--lm-archive $out_dir/lm_data-test.pt
|
||||
done
|
||||
fi
|
||||
|
||||
if [ $stage -le 14 ] && [ $stop_stage -ge 14 ]; then
|
||||
log "Stage 14: Sort LM training data"
|
||||
# Sort LM training data by sentence length in descending order
|
||||
# for ease of training.
|
||||
#
|
||||
# Sentence length equals to the number of BPE tokens
|
||||
# in a sentence.
|
||||
|
||||
for vocab_size in ${vocab_sizes[@]}; do
|
||||
out_dir=data/lm_training_bpe_${vocab_size}
|
||||
mkdir -p $out_dir
|
||||
./local/sort_lm_training_data.py \
|
||||
--in-lm-data $out_dir/lm_data.pt \
|
||||
--out-lm-data $out_dir/sorted_lm_data.pt \
|
||||
--out-statistics $out_dir/statistics.txt
|
||||
|
||||
./local/sort_lm_training_data.py \
|
||||
--in-lm-data $out_dir/lm_data-valid.pt \
|
||||
--out-lm-data $out_dir/sorted_lm_data-valid.pt \
|
||||
--out-statistics $out_dir/statistics-valid.txt
|
||||
|
||||
./local/sort_lm_training_data.py \
|
||||
--in-lm-data $out_dir/lm_data-test.pt \
|
||||
--out-lm-data $out_dir/sorted_lm_data-test.pt \
|
||||
--out-statistics $out_dir/statistics-test.txt
|
||||
done
|
||||
fi
|
||||
|
Loading…
x
Reference in New Issue
Block a user