Yifan Yang 03853f1ee5
Add peoples_speech (#1101)
* update

* Small fix

* Update egs/peoples_speech/ASR/prepare.sh

Co-authored-by: Fangjun Kuang <csukuangfj@gmail.com>

* limit normalize log

* Update egs/peoples_speech/ASR/local/compute_fbank_peoples_speech_valid_test.py

Co-authored-by: Fangjun Kuang <csukuangfj@gmail.com>

* Update compute_fbank_peoples_speech_splits.py

* Update compute_fbank_peoples_speech_valid_test.py

---------

Co-authored-by: Fangjun Kuang <csukuangfj@gmail.com>
2023-05-31 12:46:17 +08:00

248 lines
7.3 KiB
Bash
Executable File

#!/usr/bin/env bash
set -eou pipefail
nj=32
stage=-1
stop_stage=100
# Split data/set to a number of pieces
# This is to avoid OOM during feature extraction.
num_per_split=4000
# We assume dl_dir (download dir) contains the following
# directories and files. If not, they will be downloaded
# by this script automatically.
#
# - $dl_dir/peoples_speech
# This directory contains the following files downloaded from
# https://huggingface.co/datasets/MLCommons/peoples_speech
#
# - test
# - train
# - validation
#
# - $dl_dir/musan
# This directory contains the following directories downloaded from
# http://www.openslr.org/17/
#
# - music
# - noise
# - speech
dl_dir=$PWD/download
. shared/parse_options.sh || exit 1
# vocab size for sentence piece models.
# It will generate data/lang_bpe_xxx,
# data/lang_bpe_yyy if the array contains xxx, yyy
vocab_sizes=(
# 5000
# 2000
# 1000
500
)
# All files generated by this script are saved in "data".
# You can safely remove "data" and rerun this script to regenerate it.
mkdir -p data
log() {
# This function is from espnet
local fname=${BASH_SOURCE[1]##*/}
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
}
log "dl_dir: $dl_dir"
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
log "Stage 0: Download data"
# If you have pre-downloaded it to /path/to/peoples_speech,
# you can create a symlink
#
# ln -sfv /path/to/peoples_speech $dl_dir/peoples_speech
#
if [ ! -d $dl_dir/peoples_speech/train ]; then
git lfs install
git clone https://huggingface.co/datasets/MLCommons/peoples_speech
fi
# If you have pre-downloaded it to /path/to/musan,
# you can create a symlink
#
# ln -sfv /path/to/musan $dl_dir/
#
if [ ! -d $dl_dir/musan ]; then
lhotse download musan $dl_dir
fi
fi
if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
log "Stage 1: Prepare People's Speech manifest"
# We assume that you have downloaded the People's Speech corpus
# to $dl_dir/peoples_speech
mkdir -p data/manifests
if [ ! -e data/manifests/.peoples_speech.done ]; then
lhotse prepare peoples-speech -j $nj $dl_dir/peoples_speech data/manifests
touch data/manifests/.peoples_speech.done
fi
fi
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
log "Stage 2: Prepare musan manifest"
# We assume that you have downloaded the musan corpus
# to data/musan
mkdir -p data/manifests
if [ ! -e data/manifests/.musan.done ]; then
lhotse prepare musan $dl_dir/musan data/manifests
touch data/manifests/.musan.done
fi
fi
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
log "Stage 3: Preprocess People's Speech manifest"
mkdir -p data/fbank
if [ ! -e data/fbank/.preprocess_complete ]; then
./local/preprocess_peoples_speech.py
touch data/fbank/.preprocess_complete
fi
fi
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
log "Stage 4: Compute fbank for valid and test subsets of People's Speech"
if [ ! -e data/fbank/.peoples_speech_valid_test.done ]; then
./local/compute_fbank_peoples_speech_valid_test.py
touch data/fbank/.peoples_speech_valid_test.done
fi
fi
if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
log "Stage 5: Split train subset into pieces"
split_dir=data/fbank/peoples_speech_train_split
if [ ! -e $split_dir/.peoples_speech_dirty_split.done ]; then
lhotse split-lazy ./data/fbank/peoples_speech_cuts_dirty_raw.jsonl.gz $split_dir $num_per_split
touch $split_dir/.peoples_speech_dirty_split.done
fi
if [ ! -e $split_dir/.peoples_speech_dirty_sa_split.done ]; then
lhotse split-lazy ./data/fbank/peoples_speech_cuts_dirty_sa_raw.jsonl.gz $split_dir $num_per_split
touch $split_dir/.peoples_speech_dirty_sa_split.done
fi
if [ ! -e $split_dir/.peoples_speech_clean_split.done ]; then
lhotse split-lazy ./data/fbank/peoples_speech_cuts_clean_raw.jsonl.gz $split_dir $num_per_split
touch $split_dir/.peoples_speech_clean_split.done
fi
if [ ! -e $split_dir/.peoples_speech_clean_sa_split.done ]; then
lhotse split-lazy ./data/fbank/peoples_speech_cuts_clean_sa_raw.jsonl.gz $split_dir $num_per_split
touch $split_dir/.peoples_speech_clean_sa_split.done
fi
fi
if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
log "Stage 6: Compute features for train subset of People's Speech"
if [ ! -e data/fbank/.peoples_speech_train.done ]; then
./local/compute_fbank_peoples_speech_splits.py \
--num-workers $nj \
--batch-duration 600 \
--start 0 \
--num-splits 2000
touch data/fbank/.peoples_speech_train.done
fi
fi
if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then
log "Stage 7: Compute fbank for musan"
mkdir -p data/fbank
if [ ! -e data/fbank/.musan.done ]; then
./local/compute_fbank_musan.py
touch data/fbank/.musan.done
fi
fi
if [ $stage -le 8 ] && [ $stop_stage -ge 8 ]; then
log "Stage 8: Prepare BPE based lang"
for vocab_size in ${vocab_sizes[@]}; do
lang_dir=data/lang_bpe_${vocab_size}
mkdir -p $lang_dir
if [ ! -f $lang_dir/transcript_words.txt ]; then
log "Generate data for BPE training"
file=$(
find "data/fbank/peoples_speech_cuts_dirty_raw.jsonl.gz"
find "data/fbank/peoples_speech_cuts_dirty_sa_raw.jsonl.gz"
find "data/fbank/peoples_speech_cuts_clean_raw.jsonl.gz"
find "data/fbank/peoples_speech_cuts_clean_sa_raw.jsonl.gz"
)
gunzip -c ${file} | awk -F '"' '{print $30}' > $lang_dir/transcript_words.txt
# Ensure space only appears once
sed -i 's/\t/ /g' $lang_dir/transcript_words.txt
sed -i 's/ +/ /g' $lang_dir/transcript_words.txt
fi
if [ ! -f $lang_dir/words.txt ]; then
cat $lang_dir/transcript_words.txt | sed 's/ /\n/g' \
| sort -u | sed '/^$/d' > $lang_dir/words.txt
(echo '!SIL'; echo '<SPOKEN_NOISE>'; echo '<UNK>'; ) |
cat - $lang_dir/words.txt | sort | uniq | awk '
BEGIN {
print "<eps> 0";
}
{
if ($1 == "<s>") {
print "<s> is in the vocabulary!" | "cat 1>&2"
exit 1;
}
if ($1 == "</s>") {
print "</s> is in the vocabulary!" | "cat 1>&2"
exit 1;
}
printf("%s %d\n", $1, NR);
}
END {
printf("#0 %d\n", NR+1);
printf("<s> %d\n", NR+2);
printf("</s> %d\n", NR+3);
}' > $lang_dir/words || exit 1;
mv $lang_dir/words $lang_dir/words.txt
fi
if [ ! -f $lang_dir/bpe.model ]; then
./local/train_bpe_model.py \
--lang-dir $lang_dir \
--vocab-size $vocab_size \
--transcript $lang_dir/transcript_words.txt
fi
if [ ! -f $lang_dir/L_disambig.pt ]; then
./local/prepare_lang_bpe.py --lang-dir $lang_dir
log "Validating $lang_dir/lexicon.txt"
./local/validate_bpe_lexicon.py \
--lexicon $lang_dir/lexicon.txt \
--bpe-model $lang_dir/bpe.model
fi
if [ ! -f $lang_dir/L.fst ]; then
log "Converting L.pt to L.fst"
./shared/convert-k2-to-openfst.py \
--olabels aux_labels \
$lang_dir/L.pt \
$lang_dir/L.fst
fi
if [ ! -f $lang_dir/L_disambig.fst ]; then
log "Converting L_disambig.pt to L_disambig.fst"
./shared/convert-k2-to-openfst.py \
--olabels aux_labels \
$lang_dir/L_disambig.pt \
$lang_dir/L_disambig.fst
fi
done
fi