icefall/egs/mls_english/ASR/prepare.sh
2025-04-16 08:05:05 +09:00

73 lines
2.0 KiB
Bash

#!/usr/bin/env bash
# Prepare script for MLS English ASR recipe in icefall
# This recipe uses on-the-fly feature extraction, so it skips manifest
# and feature generation steps used in other recipes.
# fix segmentation fault reported in https://github.com/k2-fsa/icefall/issues/674
export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python
set -eou pipefail
nj=15
stage=-1
stop_stage=100
# Configuration for BPE tokenizer
vocab_sizes=(2000) # You can add more sizes like (500 1000 2000) for comparison
# Directory where dataset will be downloaded
dl_dir=$PWD/download
. shared/parse_options.sh || exit 1
# All files generated by this script are saved in "data".
mkdir -p data
log() {
local fname=${BASH_SOURCE[1]##*/}
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
}
log "Starting MLS English data preparation"
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
log "Stage 0: Download MLS English dataset"
if [ ! -d $dl_dir/mls_english ]; then
if ! git clone https://huggingface.co/datasets/parler-tts/mls_eng $dl_dir/mls_english; then
log "Failed to download MLS English dataset"
exit 1
fi
fi
fi
mkdir -p data/lang
lang_dir=data/lang
if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
log "Stage 1: Prepare transcript for BPE training"
if [ ! -f $lang_dir/transcript.txt ]; then
log "Generating transcripts for BPE training"
./local/utils/generate_transcript.py --lang-dir $lang_dir
fi
fi
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
log "Stage 2: Prepare BPE tokenizer"
for vocab_size in ${vocab_sizes[@]}; do
log "Training BPE model with vocab_size=${vocab_size}"
bpe_dir=data/lang/bpe_${vocab_size}
mkdir -p $bpe_dir
if [ ! -f $bpe_dir/bpe.model ]; then
./local/train_bpe_model.py \
--lang-dir $bpe_dir \
--vocab-size $vocab_size \
--transcript $lang_dir/transcript.txt
fi
done
fi
log "MLS English data preparation completed successfully"