mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-08 09:32:20 +00:00
* merge upstream * add SURT model and training * add libricss decoding * add chunk width randomization * decode SURT with libricss * initial commit for zipformer_ctc * remove unwanted changes * remove changes to other recipe * fix zipformer softlink * fix for JIT export * add missing file * fix symbolic links * update results * clean commit for SURT recipe * training libricss surt model * remove unwanted files * remove unwanted changes * remove changes in librispeech * change some files to symlinks * remove unwanted changes in utils * add export script * add README * minor fix in README * add assets for README * replace some files with symlinks * remove unused decoding methods * initial commit for SURT AMI recipe * fix symlink * add train + decode scripts * add missing symlink * change files to symlink * change file type
196 lines
6.2 KiB
Bash
Executable File
196 lines
6.2 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
set -eou pipefail
|
|
|
|
stage=-1
|
|
stop_stage=100
|
|
|
|
# We assume dl_dir (download dir) contains the following
|
|
# directories and files. If not, they will be downloaded
|
|
# by this script automatically.
|
|
#
|
|
# - $dl_dir/ami
|
|
# You can find audio and transcripts for AMI in this path.
|
|
#
|
|
# - $dl_dir/icsi
|
|
# You can find audio and transcripts for ICSI in this path.
|
|
#
|
|
# - $dl_dir/rirs_noises
|
|
# This directory contains the RIRS_NOISES corpus downloaded from https://openslr.org/28/.
|
|
#
|
|
dl_dir=$PWD/download
|
|
|
|
. shared/parse_options.sh || exit 1
|
|
|
|
# All files generated by this script are saved in "data".
|
|
# You can safely remove "data" and rerun this script to regenerate it.
|
|
mkdir -p data
|
|
vocab_size=500
|
|
|
|
log() {
|
|
# This function is from espnet
|
|
local fname=${BASH_SOURCE[1]##*/}
|
|
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
}
|
|
|
|
log "dl_dir: $dl_dir"
|
|
|
|
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
|
|
log "Stage 0: Download data"
|
|
|
|
# If you have pre-downloaded it to /path/to/amicorpus,
|
|
# you can create a symlink
|
|
#
|
|
# ln -sfv /path/to/amicorpus $dl_dir/amicorpus
|
|
#
|
|
if [ ! -d $dl_dir/amicorpus ]; then
|
|
for mic in ihm ihm-mix sdm mdm8-bf; do
|
|
lhotse download ami --mic $mic $dl_dir/amicorpus
|
|
done
|
|
fi
|
|
|
|
# If you have pre-downloaded it to /path/to/icsi,
|
|
# you can create a symlink
|
|
#
|
|
# ln -sfv /path/to/icsi $dl_dir/icsi
|
|
#
|
|
if [ ! -d $dl_dir/icsi ]; then
|
|
lhotse download icsi $dl_dir/icsi
|
|
fi
|
|
|
|
# If you have pre-downloaded it to /path/to/rirs_noises,
|
|
# you can create a symlink
|
|
#
|
|
# ln -sfv /path/to/rirs_noises $dl_dir/
|
|
#
|
|
if [ ! -d $dl_dir/rirs_noises ]; then
|
|
lhotse download rirs_noises $dl_dir
|
|
fi
|
|
fi
|
|
|
|
if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
|
|
log "Stage 1: Prepare AMI manifests"
|
|
# We assume that you have downloaded the AMI corpus
|
|
# to $dl_dir/amicorpus. We perform text normalization for the transcripts.
|
|
mkdir -p data/manifests
|
|
for mic in ihm ihm-mix sdm mdm8-bf; do
|
|
log "Preparing AMI manifest for $mic"
|
|
lhotse prepare ami --mic $mic --max-words-per-segment 30 --merge-consecutive $dl_dir/amicorpus data/manifests/
|
|
done
|
|
fi
|
|
|
|
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
|
log "Stage 2: Prepare ICSI manifests"
|
|
# We assume that you have downloaded the ICSI corpus
|
|
# to $dl_dir/icsi. We perform text normalization for the transcripts.
|
|
mkdir -p data/manifests
|
|
log "Preparing ICSI manifest"
|
|
for mic in ihm ihm-mix sdm; do
|
|
lhotse prepare icsi --mic $mic $dl_dir/icsi data/manifests/
|
|
done
|
|
fi
|
|
|
|
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
|
log "Stage 3: Prepare RIRs"
|
|
# We assume that you have downloaded the RIRS_NOISES corpus
|
|
# to $dl_dir/rirs_noises
|
|
lhotse prepare rir-noise -p real_rir -p iso_noise $dl_dir/rirs_noises data/manifests
|
|
fi
|
|
|
|
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
|
log "Stage 3: Extract features for AMI and ICSI recordings"
|
|
python local/compute_fbank_ami.py
|
|
python local/compute_fbank_icsi.py
|
|
fi
|
|
|
|
if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
|
|
log "Stage 5: Create sources for simulating mixtures"
|
|
# In the following script, we speed-perturb the IHM recordings and extract features.
|
|
python local/compute_fbank_ihm.py
|
|
lhotse combine data/manifests/ami-ihm_cuts_train.jsonl.gz \
|
|
data/manifests/icsi-ihm_cuts_train.jsonl.gz - |\
|
|
lhotse cut trim-to-alignments --type word --max-pause 0.5 - - |\
|
|
lhotse filter 'duration<=12.0' - - |\
|
|
shuf | gzip -c > data/manifests/ihm_cuts_train_trimmed.jsonl.gz
|
|
fi
|
|
|
|
if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
|
|
log "Stage 6: Create training mixtures"
|
|
lhotse workflows simulate-meetings \
|
|
--method conversational \
|
|
--same-spk-pause 0.5 \
|
|
--diff-spk-pause 0.5 \
|
|
--diff-spk-overlap 1.0 \
|
|
--prob-diff-spk-overlap 0.8 \
|
|
--num-meetings 200000 \
|
|
--num-speakers-per-meeting 2,3 \
|
|
--max-duration-per-speaker 15.0 \
|
|
--max-utterances-per-speaker 3 \
|
|
--seed 1234 \
|
|
--num-jobs 2 \
|
|
data/manifests/ihm_cuts_train_trimmed.jsonl.gz \
|
|
data/manifests/ai-mix_cuts_clean.jsonl.gz
|
|
|
|
python local/compute_fbank_aimix.py
|
|
|
|
# Add source features to the manifest (will be used for masking loss)
|
|
# This may take ~2 hours.
|
|
python local/add_source_feats.py
|
|
|
|
# Combine clean and reverb
|
|
cat <(gunzip -c data/manifests/cuts_train_clean_sources.jsonl.gz) \
|
|
<(gunzip -c data/manifests/cuts_train_reverb_sources.jsonl.gz) |\
|
|
shuf | gzip -c > data/manifests/cuts_train_comb_sources.jsonl.gz
|
|
fi
|
|
|
|
if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then
|
|
log "Stage 7: Create training mixtures from real sessions"
|
|
python local/prepare_ami_train_cuts.py
|
|
python local/prepare_icsi_train_cuts.py
|
|
|
|
# Combine AMI and ICSI
|
|
cat <(gunzip -c data/manifests/cuts_train_ami.jsonl.gz) \
|
|
<(gunzip -c data/manifests/cuts_train_icsi.jsonl.gz) |\
|
|
shuf | gzip -c > data/manifests/cuts_train_ami_icsi.jsonl.gz
|
|
fi
|
|
|
|
if [ $stage -le 8 ] && [ $stop_stage -ge 8 ]; then
|
|
log "Stage 8: Dump transcripts for BPE model training (using AMI and ICSI)."
|
|
mkdir -p data/lm
|
|
cat <(gunzip -c data/manifests/ami-sdm_supervisions_train.jsonl.gz | jq '.text' | sed 's:"::g') \
|
|
<(gunzip -c data/manifests/icsi-sdm_supervisions_train.jsonl.gz | jq '.text' | sed 's:"::g') \
|
|
> data/lm/transcript_words.txt
|
|
fi
|
|
|
|
if [ $stage -le 9 ] && [ $stop_stage -ge 9 ]; then
|
|
log "Stage 9: Prepare BPE based lang (combining AMI and ICSI)"
|
|
|
|
lang_dir=data/lang_bpe_${vocab_size}
|
|
mkdir -p $lang_dir
|
|
|
|
# Add special words to words.txt
|
|
echo "<eps> 0" > $lang_dir/words.txt
|
|
echo "!SIL 1" >> $lang_dir/words.txt
|
|
echo "<UNK> 2" >> $lang_dir/words.txt
|
|
|
|
# Add regular words to words.txt
|
|
cat data/lm/transcript_words.txt | grep -o -E '\w+' | sort -u | awk '{print $0,NR+2}' >> $lang_dir/words.txt
|
|
|
|
# Add remaining special word symbols expected by LM scripts.
|
|
num_words=$(cat $lang_dir/words.txt | wc -l)
|
|
echo "<s> ${num_words}" >> $lang_dir/words.txt
|
|
num_words=$(cat $lang_dir/words.txt | wc -l)
|
|
echo "</s> ${num_words}" >> $lang_dir/words.txt
|
|
num_words=$(cat $lang_dir/words.txt | wc -l)
|
|
echo "#0 ${num_words}" >> $lang_dir/words.txt
|
|
|
|
./local/train_bpe_model.py \
|
|
--lang-dir $lang_dir \
|
|
--vocab-size $vocab_size \
|
|
--transcript data/lm/transcript_words.txt
|
|
|
|
if [ ! -f $lang_dir/L_disambig.pt ]; then
|
|
./local/prepare_lang_bpe.py --lang-dir $lang_dir
|
|
fi
|
|
fi
|