#!/usr/bin/env bash # fix segmentation fault reported in https://github.com/k2-fsa/icefall/issues/674 export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python set -eou pipefail stage=0 stop_stage=100 sampling_rate=24000 nj=32 dl_dir=$PWD/download . shared/parse_options.sh || exit 1 # All files generated by this script are saved in "data". # You can safely remove "data" and rerun this script to regenerate it. mkdir -p data log() { # This function is from espnet local fname=${BASH_SOURCE[1]##*/} echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*" } log "dl_dir: $dl_dir" if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then log "Stage -1: build monotonic_align lib" if [ ! -d vits/monotonic_align/build ]; then cd vits/monotonic_align python setup.py build_ext --inplace cd ../../ else log "monotonic_align lib already built" fi fi if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then log "Stage 0: Download data" # If you have pre-downloaded it to /path/to/LibriTTS, # you can create a symlink # # ln -sfv /path/to/LibriTTS $dl_dir/LibriTTS # if [ ! -d $dl_dir/LibriTTS ]; then lhotse download libritts $dl_dir fi if [ ! -d $dl_dir/xvector_nnet_1a_libritts_clean_460 ]; then log "Downloading x-vector" git clone https://huggingface.co/datasets/zrjin/xvector_nnet_1a_libritts_clean_460 $dl_dir/xvector_nnet_1a_libritts_clean_460 mkdir -p exp/xvector_nnet_1a/ cp -r $dl_dir/xvector_nnet_1a_libritts_clean_460/* exp/xvector_nnet_1a/ fi fi if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then log "Stage 1: Prepare LibriTTS manifest" # We assume that you have downloaded the LibriTTS corpus # to $dl_dir/LibriTTS mkdir -p data/manifests if [ ! -e data/manifests/.libritts.done ]; then lhotse prepare libritts --num-jobs ${nj} $dl_dir/LibriTTS data/manifests touch data/manifests/.libritts.done fi fi if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then log "Stage 2: Compute Spectrogram for LibriTTS" mkdir -p data/spectrogram if [ ! -e data/spectrogram/.libritts.done ]; then ./local/compute_spectrogram_libritts.py --sampling-rate $sampling_rate touch data/spectrogram/.libritts.done fi # Here we shuffle and combine the train-clean-100, train-clean-360 and # train-other-500 together to form the training set. if [ ! -f data/spectrogram/libritts_cuts_train-all-shuf.jsonl.gz ]; then cat <(gunzip -c data/spectrogram/libritts_cuts_train-clean-100.jsonl.gz) \ <(gunzip -c data/spectrogram/libritts_cuts_train-clean-360.jsonl.gz) \ <(gunzip -c data/spectrogram/libritts_cuts_train-other-500.jsonl.gz) | \ shuf | gzip -c > data/spectrogram/libritts_cuts_train-all-shuf.jsonl.gz fi # Here we shuffle and combine the train-clean-100, train-clean-360 # together to form the training set. if [ ! -f data/spectrogram/libritts_cuts_train-clean-460.jsonl.gz ]; then cat <(gunzip -c data/spectrogram/libritts_cuts_train-clean-100.jsonl.gz) \ <(gunzip -c data/spectrogram/libritts_cuts_train-clean-360.jsonl.gz) | \ shuf | gzip -c > data/spectrogram/libritts_cuts_train-clean-460.jsonl.gz fi if [ ! -e data/spectrogram/.libritts-validated.done ]; then log "Validating data/spectrogram for LibriTTS" ./local/validate_manifest.py \ data/spectrogram/libritts_cuts_train-all-shuf.jsonl.gz touch data/spectrogram/.libritts-validated.done fi fi if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then log "Stage 3: Prepare phoneme tokens for LibriTTS" # We assume you have installed piper_phonemize and espnet_tts_frontend. # If not, please install them with: # - piper_phonemize: # refer to https://github.com/rhasspy/piper-phonemize, # could install the pre-built wheels from https://github.com/csukuangfj/piper-phonemize/releases/tag/2023.12.5 # - espnet_tts_frontend: # `pip install espnet_tts_frontend`, refer to https://github.com/espnet/espnet_tts_frontend/ if [ ! -e data/spectrogram/.libritts_with_token.done ]; then ./local/prepare_tokens_libritts.py touch data/spectrogram/.libritts_with_token.done fi fi if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then log "Stage 4: Generate token file" # We assume you have installed piper_phonemize and espnet_tts_frontend. # If not, please install them with: # - piper_phonemize: # refer to https://github.com/rhasspy/piper-phonemize, # could install the pre-built wheels from https://github.com/csukuangfj/piper-phonemize/releases/tag/2023.12.5 # - espnet_tts_frontend: # `pip install espnet_tts_frontend`, refer to https://github.com/espnet/espnet_tts_frontend/ if [ ! -e data/tokens.txt ]; then ./local/prepare_token_file.py --tokens data/tokens.txt fi fi audio_feats_dir=data/tokenized dataset_parts="--dataset-parts all" # debug "-p dev-clean -p test-clean" if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then log "Stage 5: Tokenize/Fbank LibriTTS for valle" mkdir -p ${audio_feats_dir} if [ ! -e ${audio_feats_dir}/.libritts.tokenize.done ]; then python3 ./local/compute_neural_codec_and_prepare_text_tokens.py --dataset-parts "${dataset_parts}" \ --audio-extractor "Encodec" \ --batch-duration 400 \ --src-dir "data/manifests" \ --output-dir "${audio_feats_dir}" fi touch ${audio_feats_dir}/.libritts.tokenize.done lhotse combine \ ${audio_feats_dir}/libritts_cuts_train-clean-100.jsonl.gz \ ${audio_feats_dir}/libritts_cuts_train-clean-360.jsonl.gz \ ${audio_feats_dir}/libritts_cuts_train-other-500.jsonl.gz \ ${audio_feats_dir}/cuts_train.jsonl.gz lhotse copy \ ${audio_feats_dir}/libritts_cuts_dev-clean.jsonl.gz \ ${audio_feats_dir}/cuts_dev.jsonl.gz lhotse copy \ ${audio_feats_dir}/libritts_cuts_test-clean.jsonl.gz \ ${audio_feats_dir}/cuts_test.jsonl.gz fi