mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-09 10:02:22 +00:00
195 lines
5.4 KiB
Bash
Executable File
195 lines
5.4 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
# fix segmentation fault reported in https://github.com/k2-fsa/icefall/issues/674
|
|
export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python
|
|
|
|
set -eou pipefail
|
|
|
|
stage=0
|
|
stop_stage=100
|
|
sampling_rate=16000
|
|
nj=32
|
|
perturb_speed=true
|
|
vocab_sizes=(
|
|
# 5000
|
|
# 2000
|
|
# 1000
|
|
500
|
|
)
|
|
|
|
dl_dir=$PWD/download
|
|
|
|
. shared/parse_options.sh || exit 1
|
|
|
|
# All files generated by this script are saved in "data".
|
|
# You can safely remove "data" and rerun this script to regenerate it.
|
|
mkdir -p data
|
|
|
|
log() {
|
|
# This function is from espnet
|
|
local fname=${BASH_SOURCE[1]##*/}
|
|
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
}
|
|
|
|
log "dl_dir: $dl_dir"
|
|
|
|
if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then
|
|
log "Stage -1: Download LM" # we directly use the librispeech lm here
|
|
mkdir -p $dl_dir/lm
|
|
if [ ! -e $dl_dir/lm/.done ]; then
|
|
./local/download_lm.py --out-dir=$dl_dir/lm
|
|
touch $dl_dir/lm/.done
|
|
fi
|
|
fi
|
|
|
|
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
|
|
log "Stage 0: Download data"
|
|
|
|
# If you have pre-downloaded it to /path/to/LibriTTS,
|
|
# you can create a symlink
|
|
#
|
|
# ln -sfv /path/to/LibriTTS $dl_dir/LibriTTS
|
|
#
|
|
if [ ! -d $dl_dir/LibriTTS ]; then
|
|
lhotse download libritts $dl_dir
|
|
fi
|
|
|
|
# If you have pre-downloaded it to /path/to/musan,
|
|
# you can create a symlink
|
|
#
|
|
# ln -sfv /path/to/musan $dl_dir/musan
|
|
#
|
|
if [ ! -d $dl_dir/musan ]; then
|
|
lhotse download musan $dl_dir
|
|
fi
|
|
fi
|
|
|
|
if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
|
|
log "Stage 1: Prepare LibriTTS manifest"
|
|
# We assume that you have downloaded the LibriTTS corpus
|
|
# to $dl_dir/LibriTTS
|
|
mkdir -p data/manifests
|
|
if [ ! -e data/manifests/.libritts.done ]; then
|
|
lhotse prepare libritts --num-jobs 32 $dl_dir/LibriTTS data/manifests
|
|
touch data/manifests/.libritts.done
|
|
fi
|
|
fi
|
|
|
|
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
|
log "Stage 2: Prepare musan manifest"
|
|
# We assume that you have downloaded the musan corpus
|
|
# to data/musan
|
|
if [ ! -f data/manifests/.musan_manifests.done ]; then
|
|
log "It may take 6 minutes"
|
|
mkdir -p data/manifests
|
|
lhotse prepare musan $dl_dir/musan data/manifests
|
|
touch data/manifests/.musan_manifests.done
|
|
fi
|
|
fi
|
|
|
|
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
|
log "Stage 3: Compute Fbank for LibriTTS"
|
|
mkdir -p data/fbank
|
|
if [ ! -e data/fbank/.libritts.done ]; then
|
|
./local/compute_fbank_libritts.py \
|
|
--sampling-rate $sampling_rate \
|
|
--perturb-speed $perturb_speed
|
|
touch data/fbank/.libritts.done
|
|
fi
|
|
|
|
# Here we shuffle and combine the train-clean-100, train-clean-360 and
|
|
# train-other-500 together to form the training set.
|
|
if [ ! -f data/fbank/libritts_cuts_train-all-shuf.jsonl.gz ]; then
|
|
cat <(gunzip -c data/fbank/libritts_cuts_train-clean-100.jsonl.gz) \
|
|
<(gunzip -c data/fbank/libritts_cuts_train-clean-360.jsonl.gz) \
|
|
<(gunzip -c data/fbank/libritts_cuts_train-other-500.jsonl.gz) | \
|
|
shuf | gzip -c > data/fbank/libritts_cuts_train-all-shuf.jsonl.gz
|
|
fi
|
|
|
|
if [ ! -e data/fbank/.libritts-validated.done ]; then
|
|
log "Validating data/fbank for LibriTTS"
|
|
./local/validate_manifest.py \
|
|
data/fbank/libritts_cuts_train-all-shuf.jsonl.gz
|
|
touch data/fbank/.libritts-validated.done
|
|
fi
|
|
fi
|
|
|
|
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
|
log "Stage 4: Compute fbank for musan"
|
|
if [ ! -f data/fbank/.msuan.done ]; then
|
|
mkdir -p data/fbank
|
|
./local/compute_fbank_musan.py
|
|
touch data/fbank/.msuan.done
|
|
fi
|
|
fi
|
|
|
|
if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
|
|
log "Stage 5: Train BPE model for normalized text"
|
|
|
|
if [ ! -f data/text ]; then
|
|
gunzip -c data/manifests/libritts_supervisions_train-clean-100.jsonl.gz \
|
|
| jq ".text" | sed 's/"//g' \
|
|
| ./local/norm_text.py > data/text
|
|
|
|
gunzip -c data/manifests/libritts_supervisions_train-clean-360.jsonl.gz \
|
|
| jq ".text" | sed 's/"//g' \
|
|
| ./local/norm_text.py >> data/text
|
|
|
|
gunzip -c data/manifests/libritts_supervisions_train-other-500.jsonl.gz \
|
|
| jq ".text" | sed 's/"//g' \
|
|
| ./local/norm_text.py >> data/text
|
|
fi
|
|
|
|
for vocab_size in ${vocab_sizes[@]}; do
|
|
lang_dir=data/lang_bpe_${vocab_size}
|
|
mkdir -p $lang_dir
|
|
|
|
cp data/text $lang_dir/text
|
|
|
|
if [ ! -f $lang_dir/bpe.model ]; then
|
|
./local/train_bpe_model.py \
|
|
--lang-dir $lang_dir \
|
|
--vocab-size $vocab_size \
|
|
--transcript $lang_dir/text
|
|
fi
|
|
done
|
|
fi
|
|
|
|
if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
|
|
log "Stage 6: Prepare phone based lang"
|
|
lang_dir=data/lang_phone
|
|
mkdir -p $lang_dir
|
|
|
|
if [ ! -f $dl_dir/lm/librispeech-lexicon.txt ]; then
|
|
log "No lexicon file in $dl_dir/lm, please run :"
|
|
log "prepare.sh --stage -1 --stop-stage -1"
|
|
exit -1
|
|
fi
|
|
|
|
if [ ! -f $lang_dir/lexicon.txt ]; then
|
|
(echo '!SIL SIL'; echo '<SPOKEN_NOISE> SPN'; echo '<UNK> SPN'; ) |
|
|
cat - $dl_dir/lm/librispeech-lexicon.txt |
|
|
sort | uniq > $lang_dir/lexicon.txt
|
|
fi
|
|
|
|
if [ ! -f $lang_dir/L_disambig.pt ]; then
|
|
./local/prepare_lang.py --lang-dir $lang_dir
|
|
fi
|
|
|
|
if [ ! -f $lang_dir/L.fst ]; then
|
|
log "Converting L.pt to L.fst"
|
|
./shared/convert-k2-to-openfst.py \
|
|
--olabels aux_labels \
|
|
$lang_dir/L.pt \
|
|
$lang_dir/L.fst
|
|
fi
|
|
|
|
if [ ! -f $lang_dir/L_disambig.fst ]; then
|
|
log "Converting L_disambig.pt to L_disambig.fst"
|
|
./shared/convert-k2-to-openfst.py \
|
|
--olabels aux_labels \
|
|
$lang_dir/L_disambig.pt \
|
|
$lang_dir/L_disambig.fst
|
|
fi
|
|
fi
|