mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-09 18:12:19 +00:00
109 lines
3.6 KiB
Bash
Executable File
109 lines
3.6 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
# Prepare script for MLS English ASR recipe in icefall
|
|
|
|
# fix segmentation fault reported in https://github.com/k2-fsa/icefall/issues/674
|
|
export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python
|
|
|
|
set -eou pipefail
|
|
|
|
stage=-1
|
|
stop_stage=100
|
|
|
|
# Configuration for BPE tokenizer
|
|
vocab_sizes=(2000) # You can add more sizes like (500 1000 2000) for comparison
|
|
|
|
# Directory where dataset will be downloaded
|
|
dl_dir=$PWD/download
|
|
|
|
. shared/parse_options.sh || exit 1
|
|
|
|
# All files generated by this script are saved in "data".
|
|
mkdir -p data
|
|
mkdir -p data/audio
|
|
mkdir -p data/manifests
|
|
mkdir -p data/lang
|
|
|
|
log() {
|
|
local fname=${BASH_SOURCE[1]##*/}
|
|
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
}
|
|
|
|
log "Starting MLS English data preparation"
|
|
|
|
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
|
|
log "Stage 0: Download MLS English dataset"
|
|
# Check if huggingface_hub is installed
|
|
if ! python -c "import huggingface_hub" &> /dev/null; then
|
|
log "huggingface_hub Python library not found. Installing it now..."
|
|
# Using --break-system-packages for Debian/Ubuntu environments where pip install might fail without it
|
|
python -m pip install huggingface_hub || \
|
|
python -m pip install huggingface_hub --break-system-packages || { \
|
|
log "Failed to install huggingface_hub. Please install it manually: pip install huggingface_hub"; \
|
|
exit 1; \
|
|
}
|
|
log "huggingface_hub installed successfully."
|
|
fi
|
|
|
|
# Check if the dataset already exists to avoid re-downloading
|
|
if [ ! -d "$dl_dir/mls_english" ]; then
|
|
log "Dataset not found at $dl_dir/mls_english. Starting download..."
|
|
if ! python ./local/utils/download_mls_english.py --dl-dir "$dl_dir"; then
|
|
log "Failed to download MLS English dataset via download_mls_english.py"
|
|
exit 1
|
|
fi
|
|
else
|
|
log "Dataset already exists at $dl_dir/mls_english. Skipping download."
|
|
fi
|
|
fi
|
|
|
|
if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
|
|
log "Stage 1: Compute MLS English fbank"
|
|
if [ ! -e data/manifests/.mls_english-validated.done ]; then
|
|
python local/compute_fbank_mls_english.py \
|
|
--manifest-dir data/manifests \
|
|
--audio-dir data/audio \
|
|
--dl-dir $dl_dir/mls_english
|
|
# --dl-dir /root/datasets/parler-tts--mls_eng
|
|
python local/validate_manifest.py --manifest data/manifests/mls_eng_cuts_train.jsonl.gz
|
|
python local/validate_manifest.py --manifest data/manifests/mls_eng_cuts_dev.jsonl.gz
|
|
python local/validate_manifest.py --manifest data/manifests/mls_eng_cuts_test.jsonl.gz
|
|
touch data/manifests/.mls_english-validated.done
|
|
fi
|
|
fi
|
|
|
|
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
|
log "Stage 2: Prepare transcript for BPE training"
|
|
if [ ! -f data/lang/transcript.txt ]; then
|
|
log "Generating transcripts for BPE training"
|
|
python local/utils/generate_transcript.py \
|
|
--dataset-path $dl_dir/mls_english \
|
|
--lang-dir data/lang \
|
|
--split train
|
|
fi
|
|
fi
|
|
|
|
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
|
log "Stage 3: Prepare BPE tokenizer"
|
|
for vocab_size in ${vocab_sizes[@]}; do
|
|
log "Training BPE model with vocab_size=${vocab_size}"
|
|
bpe_dir=data/lang/bpe_${vocab_size}
|
|
mkdir -p $bpe_dir
|
|
|
|
if [ ! -f $bpe_dir/bpe.model ]; then
|
|
python local/train_bpe_model.py \
|
|
--lang-dir $bpe_dir \
|
|
--vocab-size $vocab_size \
|
|
--transcript data/lang/transcript.txt
|
|
fi
|
|
done
|
|
fi
|
|
|
|
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
|
log "Stage 4: Show manifest statistics"
|
|
python local/display_manifest_statistics.py --manifest-dir data/manifests > data/manifests/manifest_statistics.txt
|
|
cat data/manifests/manifest_statistics.txt
|
|
fi
|
|
|
|
log "MLS English data preparation completed successfully"
|