icefall/egs/spgispeech/ASR/prepare.sh
Desh Raj 5aafbb970e
SPGISpeech recipe (#334)
* initial commit for SPGISpeech recipe

* add decoding

* add spgispeech transducer

* remove conformer ctc; minor fixes in RNN-T

* add results

* add tensorboard

* add pretrained model to HF

* remove unused scripts and soft link common scripts

* remove duplicate files

* pre commit hooks

* remove change in librispeech

* pre commit hook

* add CER numbers
2022-05-16 20:52:14 +08:00

197 lines
5.7 KiB
Bash
Executable File

#!/usr/bin/env bash
set -eou pipefail
nj=20
stage=-1
stop_stage=100
# We assume dl_dir (download dir) contains the following
# directories and files. If not, they will be downloaded
# by this script automatically.
#
# - $dl_dir/spgispeech
# You can find train.csv, val.csv, train, and val in this directory, which belong
# to the SPGISpeech dataset.
#
# - $dl_dir/musan
# This directory contains the following directories downloaded from
# http://www.openslr.org/17/
#
# - music
# - noise
# - speech
dl_dir=$PWD/download
. shared/parse_options.sh || exit 1
# vocab size for sentence piece models.
# It will generate data/lang_bpe_xxx,
# data/lang_bpe_yyy if the array contains xxx, yyy
vocab_sizes=(
500
)
# All files generated by this script are saved in "data".
# You can safely remove "data" and rerun this script to regenerate it.
mkdir -p data
log() {
# This function is from espnet
local fname=${BASH_SOURCE[1]##*/}
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
}
log "dl_dir: $dl_dir"
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
log "Stage 0: Download data"
# If you have pre-downloaded it to /path/to/spgispeech,
# you can create a symlink
#
# ln -sfv /path/to/spgispeech $dl_dir/spgispeech
#
if [ ! -d $dl_dir/spgispeech/train.csv ]; then
lhotse download spgispeech $dl_dir
fi
# If you have pre-downloaded it to /path/to/musan,
# you can create a symlink
#
# ln -sfv /path/to/musan $dl_dir/
#
if [ ! -d $dl_dir/musan ]; then
lhotse download musan $dl_dir
fi
fi
if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
log "Stage 1: Prepare SPGISpeech manifest (may take ~1h)"
# We assume that you have downloaded the SPGISpeech corpus
# to $dl_dir/spgispeech. We perform text normalization for the transcripts.
mkdir -p data/manifests
lhotse prepare spgispeech -j $nj --normalize-text $dl_dir/spgispeech data/manifests
fi
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
log "Stage 2: Prepare musan manifest"
# We assume that you have downloaded the musan corpus
# to data/musan
mkdir -p data/manifests
lhotse prepare musan $dl_dir/musan data/manifests
lhotse combine data/manifests/recordings_{music,speech,noise}.json data/manifests/recordings_musan.jsonl.gz
lhotse cut simple -r data/manifests/recordings_musan.jsonl.gz data/manifests/cuts_musan_raw.jsonl.gz
fi
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
log "Stage 3: Split train into train and dev and create cut sets."
python local/prepare_splits.py
fi
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
log "Stage 4: Compute fbank features for spgispeech dev and val"
mkdir -p data/fbank
python local/compute_fbank_spgispeech.py --test
fi
if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
log "Stage 5: Compute fbank features for train"
mkdir -p data/fbank
python local/compute_fbank_spgispeech.py --train --num-splits 20
log "Combine features from train splits (may take ~1h)"
if [ ! -f data/manifests/cuts_train.jsonl.gz ]; then
pieces=$(find data/manifests -name "cuts_train_[0-9]*.jsonl.gz")
lhotse combine $pieces data/manifests/cuts_train.jsonl.gz
fi
gunzip -c data/manifests/train_cuts.jsonl.gz | shuf | gzip -c > data/manifests/train_cuts_shuf.jsonl.gz
fi
if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
log "Stage 6: Compute fbank features for musan"
mkdir -p data/fbank
python local/compute_fbank_musan.py
fi
if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then
log "Stage 7: Dump transcripts for LM training"
mkdir -p data/lm
gunzip -c data/manifests/cuts_train_raw.jsonl.gz \
| jq '.supervisions[0].text' \
| sed 's:"::g' \
> data/lm/transcript_words.txt
fi
if [ $stage -le 8 ] && [ $stop_stage -ge 8 ]; then
log "Stage 8: Prepare BPE based lang"
for vocab_size in ${vocab_sizes[@]}; do
lang_dir=data/lang_bpe_${vocab_size}
mkdir -p $lang_dir
# Add special words to words.txt
echo "<eps> 0" > $lang_dir/words.txt
echo "!SIL 1" >> $lang_dir/words.txt
echo "[UNK] 2" >> $lang_dir/words.txt
# Add regular words to words.txt
gunzip -c data/manifests/cuts_train_raw.jsonl.gz \
| jq '.supervisions[0].text' \
| sed 's:"::g' \
| sed 's: :\n:g' \
| sort \
| uniq \
| sed '/^$/d' \
| awk '{print $0,NR+2}' \
>> $lang_dir/words.txt
# Add remaining special word symbols expected by LM scripts.
num_words=$(cat $lang_dir/words.txt | wc -l)
echo "<s> ${num_words}" >> $lang_dir/words.txt
num_words=$(cat $lang_dir/words.txt | wc -l)
echo "</s> ${num_words}" >> $lang_dir/words.txt
num_words=$(cat $lang_dir/words.txt | wc -l)
echo "#0 ${num_words}" >> $lang_dir/words.txt
./local/train_bpe_model.py \
--lang-dir $lang_dir \
--vocab-size $vocab_size \
--transcript data/lm/transcript_words.txt
if [ ! -f $lang_dir/L_disambig.pt ]; then
./local/prepare_lang_bpe.py --lang-dir $lang_dir
fi
done
fi
if [ $stage -le 9 ] && [ $stop_stage -ge 9 ]; then
log "Stage 9: Train LM"
lm_dir=data/lm
if [ ! -f $lm_dir/G.arpa ]; then
./shared/make_kn_lm.py \
-ngram-order 3 \
-text $lm_dir/transcript_words.txt \
-lm $lm_dir/G.arpa
fi
if [ ! -f $lm_dir/G_3_gram.fst.txt ]; then
python3 -m kaldilm \
--read-symbol-table="data/lang_phone/words.txt" \
--disambig-symbol='#0' \
--max-order=3 \
$lm_dir/G.arpa > $lm_dir/G_3_gram.fst.txt
fi
fi
if [ $stage -le 10 ] && [ $stop_stage -ge 10 ]; then
log "Stage 10: Compile HLG"
./local/compile_hlg.py --lang-dir data/lang_phone
for vocab_size in ${vocab_sizes[@]}; do
lang_dir=data/lang_bpe_${vocab_size}
./local/compile_hlg.py --lang-dir $lang_dir
done
fi