mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-21 15:56:15 +00:00
179 lines
6.4 KiB
Bash
Executable File
179 lines
6.4 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
set -eou pipefail
|
|
|
|
# fix segmentation fault reported in https://github.com/k2-fsa/icefall/issues/674
|
|
export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python
|
|
# pip install lhotse s3tokenizer
|
|
stage=6
|
|
stop_stage=6
|
|
|
|
dl_dir=$PWD/download
|
|
dl_dir=/workspace_data/Emilia-Dataset/
|
|
prefix="emilia"
|
|
# zh, en, ja, ko, de, fr
|
|
lang_set=("de" "en" "zh" "ja" "ko" "fr")
|
|
lang_set=("de" "en" "zh" "ja" "fr")
|
|
. shared/parse_options.sh || exit 1
|
|
|
|
|
|
# All files generated by this script are saved in "data".
|
|
# You can safely remove "data" and rerun this script to regenerate it.
|
|
mkdir -p data
|
|
log() {
|
|
# This function is from espnet
|
|
local fname=${BASH_SOURCE[1]##*/}
|
|
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
}
|
|
|
|
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
|
|
log "dl_dir: $dl_dir"
|
|
log "Stage 0: Download data"
|
|
#huggingface-cli login
|
|
# huggingface-cli download --repo-type dataset --local-dir $dl_dir Wenetspeech4TTS/WenetSpeech4TTS
|
|
|
|
# Extract the downloaded data:
|
|
for lang in "${lang_set[@]}"; do
|
|
lang_upper=$(echo "${lang}" | tr '[:lower:]' '[:upper:]')
|
|
folder=$dl_dir/raw/${lang_upper}
|
|
for file in $folder/*.tar.gz; do
|
|
echo "Processing ${file}"
|
|
# e.g. $dl_dir/raw/DE/*tar.gz untar first, DE is the language code in upper case
|
|
tar -xzvf $file -C $folder
|
|
done
|
|
done
|
|
fi
|
|
|
|
if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
|
|
log "Stage 1: Prepare emilia manifest"
|
|
# We assume that you have downloaded the Emilia corpus
|
|
# to $dl_dir/emilia
|
|
mkdir -p data/manifests
|
|
for lang in "${lang_set[@]}"; do
|
|
echo "Processing ${lang}"
|
|
if [ ! -e data/manifests/.emilia.${lang}.done ]; then
|
|
lhotse prepare emilia $dl_dir data/manifests --num-jobs 30 --lang "${lang}"
|
|
touch data/manifests/.emilia.${lang}.done
|
|
fi
|
|
done
|
|
fi
|
|
|
|
|
|
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
|
log "Stage 2: Generate fbank (used by ./f5-tts)"
|
|
mkdir -p data/fbank
|
|
for lang in "${lang_set[@]}"; do
|
|
echo "Processing ${lang}"
|
|
if [ ! -e data/fbank/.emilia.${lang}.done ]; then
|
|
./local/compute_mel_feat.py --dataset-parts $lang --split 100 --prefix ${prefix}
|
|
touch data/fbank/.emilia.${lang}.done
|
|
fi
|
|
done
|
|
fi
|
|
|
|
if [ $stage -le 16 ] && [ $stop_stage -ge 16 ]; then
|
|
log "Stage 6: Split the ${prefix} cuts into train, valid and test sets (used by ./f5-tts)"
|
|
if [ ! -f data/fbank/${prefix}_cuts_${subset}.jsonl.gz ]; then
|
|
echo "Combining ${prefix} cuts"
|
|
pieces=$(find data/fbank/ -name "${prefix}_cuts_${subset}.*.jsonl.gz")
|
|
lhotse combine $pieces data/fbank/${prefix}_cuts_${subset}.jsonl.gz
|
|
fi
|
|
if [ ! -e data/fbank/.${prefix}_split.done ]; then
|
|
echo "Splitting ${prefix} cuts into train, valid and test sets"
|
|
|
|
lhotse subset --last 800 \
|
|
data/fbank/${prefix}_cuts_${subset}.jsonl.gz \
|
|
data/fbank/${prefix}_cuts_validtest.jsonl.gz
|
|
lhotse subset --first 400 \
|
|
data/fbank/${prefix}_cuts_validtest.jsonl.gz \
|
|
data/fbank/${prefix}_cuts_valid.jsonl.gz
|
|
lhotse subset --last 400 \
|
|
data/fbank/${prefix}_cuts_validtest.jsonl.gz \
|
|
data/fbank/${prefix}_cuts_test.jsonl.gz
|
|
|
|
rm data/fbank/${prefix}_cuts_validtest.jsonl.gz
|
|
|
|
n=$(( $(gunzip -c data/fbank/${prefix}_cuts_${subset}.jsonl.gz | wc -l) - 800 ))
|
|
lhotse subset --first $n \
|
|
data/fbank/${prefix}_cuts_${subset}.jsonl.gz \
|
|
data/fbank/${prefix}_cuts_train.jsonl.gz
|
|
touch data/fbank/.${prefix}_split.done
|
|
fi
|
|
fi
|
|
|
|
# zcat test.jsonl.gz | jq -r '.recording.id + " " + .recording.sources[0].source' > wav.scp
|
|
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
|
log "Stage 4: Extract cosyvoice2 FSQ token (used by ./f5-tts semantic token experiment)"
|
|
data_dir=$dl_dir/raw/ZH
|
|
# for all jsonl files in data_dir
|
|
for jsonl_file in $data_dir/*.jsonl; do
|
|
# get the file basename
|
|
jsonl_file_basename=$(basename $jsonl_file)
|
|
echo "Processing $jsonl_file"
|
|
output_dir="./cosy_v2_tokens_ZH/${jsonl_file_basename%.jsonl}"
|
|
echo "output_dir: $output_dir"
|
|
# skip if the output_dir exists
|
|
if [ -e $output_dir ]; then
|
|
echo "Output directory $output_dir already exists, skipping"
|
|
continue
|
|
fi
|
|
mkdir -p $output_dir
|
|
torchrun --nproc_per_node=8 --nnodes=1 \
|
|
--rdzv_id=2024 --rdzv_backend="c10d" --rdzv_endpoint="localhost:0" \
|
|
local/extract_cosyvoice2_token.py --data_dir $data_dir \
|
|
--jsonl_file $jsonl_file_basename \
|
|
--device "cuda" \
|
|
--output_dir $output_dir \
|
|
--batch_size 32 \
|
|
--num_workers 2 \
|
|
--model "speech_tokenizer_v2_25hz" # or "speech_tokenizer_v1_25hz
|
|
done
|
|
fi
|
|
|
|
if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
|
|
log "Stage 5: Extract cosyvoice2 FSQ token (used by ./f5-tts semantic token experiment)"
|
|
for lang in "${lang_set[@]}"; do
|
|
lang_upper=$(echo "${lang}" | tr '[:lower:]' '[:upper:]')
|
|
data_dir=$dl_dir/raw/${lang_upper}
|
|
# for all jsonl files in data_dir
|
|
for jsonl_file in $data_dir/*.jsonl; do
|
|
# get the file basename
|
|
jsonl_file_basename=$(basename $jsonl_file)
|
|
echo "Processing $jsonl_file"
|
|
output_dir="./cosy_v2_tokens_${lang_upper}/${jsonl_file_basename%.jsonl}"
|
|
echo "output_dir: $output_dir"
|
|
# skip if the output_dir exists
|
|
if [ -e $output_dir ]; then
|
|
echo "Output directory $output_dir already exists, skipping"
|
|
continue
|
|
fi
|
|
mkdir -p $output_dir
|
|
torchrun --nproc_per_node=8 --nnodes=1 \
|
|
--rdzv_id=2024 --rdzv_backend="c10d" --rdzv_endpoint="localhost:0" \
|
|
local/extract_cosyvoice2_token.py --data_dir $data_dir \
|
|
--jsonl_file $jsonl_file_basename \
|
|
--device "cuda" \
|
|
--output_dir $output_dir \
|
|
--batch_size 32 \
|
|
--num_workers 2 \
|
|
--model "speech_tokenizer_v2_25hz" # or "speech_tokenizer_v1_25hz
|
|
done
|
|
done
|
|
fi
|
|
|
|
if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
|
|
# cat EN_B00008.tar.gz.* > EN_B00008.tar.gz
|
|
for lang in "${lang_set[@]}"; do
|
|
lang_upper=$(echo "${lang}" | tr '[:lower:]' '[:upper:]')
|
|
cosy_token_dir="./cosy_v2_tokens_${lang_upper}"
|
|
for dir in $cosy_token_dir/*; do
|
|
echo "Processing $dir"
|
|
# get the file basename
|
|
dir_basename=$(basename $dir)
|
|
echo "dir_basename: $dir_basename"
|
|
cat $dir/part* > $dir/${dir_basename}.jsonl
|
|
done
|
|
cat $cosy_token_dir/${lang_upper}*/*.jsonl > $cosy_token_dir/cosy_v2_tokens_${lang_upper}.jsonl
|
|
done
|
|
fi
|