icefall/egs/emilia/TTS/prepare.sh
2025-03-03 05:40:38 +00:00

116 lines
4.1 KiB
Bash
Executable File

#!/usr/bin/env bash
set -eou pipefail
# fix segmentation fault reported in https://github.com/k2-fsa/icefall/issues/674
export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python
stage=3
stop_stage=4
# Please download the OpenDataLab format from HuggingFace, you can specify the revision argument to fc71e07e8572f5f3be1dbd02ed3172a4d298f152, which is the old format.
# https://huggingface.co/datasets/amphion/Emilia-Dataset/tree/fc71e07e8572f5f3be1dbd02ed3172a4d298f152
dl_dir=$PWD/download
prefix="emilia"
# zh, en, ja, ko, de, fr
lang_set=("de" "en" "zh" "ja" "ko" "fr")
. shared/parse_options.sh || exit 1
# All files generated by this script are saved in "data".
# You can safely remove "data" and rerun this script to regenerate it.
mkdir -p data
log() {
# This function is from espnet
local fname=${BASH_SOURCE[1]##*/}
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
}
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
log "dl_dir: $dl_dir"
log "Stage 0: Download data"
# Extract the downloaded data:
cat $dl_dir/raw/EN/EN_B00008.tar.gz.* > $dl_dir/raw/EN/EN_B00008.tar.gz
for lang in "${lang_set[@]}"; do
lang_upper=$(echo "${lang}" | tr '[:lower:]' '[:upper:]')
folder=$dl_dir/raw/${lang_upper}
for file in $folder/*.tar.gz; do
echo "Processing ${file}"
tar -xzvf $file -C $folder
done
done
fi
if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
log "Stage 1: Prepare emilia manifest (used by ./f5-tts)"
# We assume that you have downloaded the Emilia corpus
# to $dl_dir/emilia
mkdir -p data/manifests
for lang in "${lang_set[@]}"; do
echo "Processing ${lang}"
if [ ! -e data/manifests/.emilia.${lang}.done ]; then
lhotse prepare emilia $dl_dir data/manifests --num-jobs 30 --lang "${lang}"
touch data/manifests/.emilia.${lang}.done
fi
done
fi
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
log "Stage 2: Generate fbank (used by ./f5-tts)"
mkdir -p data/fbank
for lang in "${lang_set[@]}"; do
echo "Processing ${lang}"
if [ ! -e data/fbank/.emilia.${lang}.done ]; then
./local/compute_mel_feat.py --dataset-parts $lang --split 100 --prefix ${prefix}
touch data/fbank/.emilia.${lang}.done
fi
done
fi
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
log "Stage 3: Extract cosyvoice2 FSQ token (used by ./llaasa_cosyvoice2_token)"
for lang in "${lang_set[@]}"; do
lang_upper=$(echo "${lang}" | tr '[:lower:]' '[:upper:]')
data_dir=$dl_dir/raw/${lang_upper}
# for all jsonl files in data_dir
for jsonl_file in $data_dir/*.jsonl; do
# get the file basename
jsonl_file_basename=$(basename $jsonl_file)
echo "Processing $jsonl_file"
output_dir="./cosy_v2_tokens_${lang_upper}/${jsonl_file_basename%.jsonl}"
echo "output_dir: $output_dir"
# skip if the output_dir exists
if [ -e $output_dir ]; then
echo "Output directory $output_dir already exists, skipping"
continue
fi
mkdir -p $output_dir
torchrun --nproc_per_node=8 --nnodes=1 \
--rdzv_id=2024 --rdzv_backend="c10d" --rdzv_endpoint="localhost:0" \
local/extract_cosyvoice2_token.py --data_dir $data_dir \
--jsonl_file $jsonl_file_basename \
--device "cuda" \
--output_dir $output_dir \
--batch_size 32 \
--num_workers 2 \
--model "speech_tokenizer_v2_25hz" # or "speech_tokenizer_v1_25hz
done
done
fi
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
log "Stage 4: Merge cosyvoice2 FSQ token (used by ./llaasa_cosyvoice2_token)"
for lang in "${lang_set[@]}"; do
lang_upper=$(echo "${lang}" | tr '[:lower:]' '[:upper:]')
cosy_token_dir="./cosy_v2_tokens_${lang_upper}"
for dir in $cosy_token_dir/*; do
echo "Processing $dir"
dir_basename=$(basename $dir)
echo "dir_basename: $dir_basename"
cat $dir/part* > $dir/${dir_basename}.jsonl
done
cat $cosy_token_dir/${lang_upper}*/*.jsonl > $cosy_token_dir/cosy_v2_tokens_${lang_upper}.jsonl
done
fi