diff --git a/egs/ami/ASR/local/compute_fbank_ami.py b/egs/ami/ASR/local/compute_fbank_ami.py new file mode 100755 index 000000000..4892b40e3 --- /dev/null +++ b/egs/ami/ASR/local/compute_fbank_ami.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 +# Copyright 2022 Johns Hopkins University (authors: Desh Raj) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +This file computes fbank features of the AMI dataset. +For the training data, we pool together IHM, reverberated IHM, and GSS-enhanced +audios. For the test data, we separately prepare IHM, SDM, and GSS-enhanced +parts (which are the 3 evaluation settings). +It looks for manifests in the directory data/manifests. + +The generated fbank features are saved in data/fbank. +""" +import logging +import math +from pathlib import Path + +import torch +import torch.multiprocessing +from lhotse import CutSet, LilcomChunkyWriter +from lhotse.features.kaldifeat import ( + KaldifeatFbank, + KaldifeatFbankConfig, + KaldifeatFrameOptions, + KaldifeatMelOptions, +) +from lhotse.recipes.utils import read_manifests_if_cached + +# Torch's multithreaded behavior needs to be disabled or +# it wastes a lot of CPU and slow things down. +# Do this outside of main() in case it needs to take effect +# even when we are not invoking the main (e.g. when spawning subprocesses). +torch.set_num_threads(1) +torch.set_num_interop_threads(1) +torch.multiprocessing.set_sharing_strategy("file_system") + + +def compute_fbank_ami(): + src_dir = Path("data/manifests") + output_dir = Path("data/fbank") + + sampling_rate = 16000 + num_mel_bins = 80 + + extractor = KaldifeatFbank( + KaldifeatFbankConfig( + frame_opts=KaldifeatFrameOptions(sampling_rate=sampling_rate), + mel_opts=KaldifeatMelOptions(num_bins=num_mel_bins), + device="cuda", + ) + ) + + logging.info("Reading manifests") + manifests_ihm = read_manifests_if_cached( + dataset_parts=["train", "dev", "test"], + output_dir=src_dir, + prefix="ami-ihm", + suffix="jsonl.gz", + ) + manifests_sdm = read_manifests_if_cached( + dataset_parts=["train", "dev", "test"], + output_dir=src_dir, + prefix="ami-sdm", + suffix="jsonl.gz", + ) + # For GSS we already have cuts so we read them directly. + manifests_gss = read_manifests_if_cached( + dataset_parts=["train", "dev", "test"], + output_dir=src_dir, + prefix="ami-gss", + suffix="jsonl.gz", + ) + + def _extract_feats(cuts: CutSet, storage_path: Path, manifest_path: Path) -> None: + cuts = cuts + cuts.perturb_speed(0.9) + cuts.perturb_speed(1.1) + _ = cuts.compute_and_store_features_batch( + extractor=extractor, + storage_path=storage_path, + manifest_path=manifest_path, + batch_duration=5000, + num_workers=8, + storage_type=LilcomChunkyWriter, + ) + + logging.info( + "Preparing training cuts: IHM + reverberated IHM + SDM + GSS (optional)" + ) + + logging.info("Processing train split IHM") + cuts_ihm = ( + CutSet.from_manifests(**manifests_ihm["train"]) + .trim_to_supervisions(keep_overlapping=False, keep_all_channels=False) + .modify_ids(lambda x: x + "-ihm") + ) + _extract_feats( + cuts_ihm, + output_dir / "feats_train_ihm", + src_dir / "cuts_train_ihm.jsonl.gz", + ) + + logging.info("Processing train split IHM + reverberated IHM") + cuts_ihm_rvb = cuts_ihm.reverb_rir() + _extract_feats( + cuts_ihm_rvb, + output_dir / "feats_train_ihm_rvb", + src_dir / "cuts_train_ihm_rvb.jsonl.gz", + ) + + logging.info("Processing train split SDM") + cuts_sdm = ( + CutSet.from_manifests(**manifests_sdm["train"]) + .trim_to_supervisions(keep_overlapping=False) + .modify_ids(lambda x: x + "-sdm") + ) + _extract_feats( + cuts_sdm, + output_dir / "feats_train_sdm", + src_dir / "cuts_train_sdm.jsonl.gz", + ) + + logging.info("Processing train split GSS") + cuts_gss = ( + CutSet.from_manifests(**manifests_gss["train"]) + .trim_to_supervisions(keep_overlapping=False) + .modify_ids(lambda x: x + "-gss") + ) + _extract_feats( + cuts_gss, + output_dir / "feats_train_gss", + src_dir / "cuts_train_gss.jsonl.gz", + ) + + logging.info("Preparing test cuts: IHM, SDM, GSS (optional)") + for split in ["dev", "test"]: + logging.info(f"Processing {split} IHM") + cuts_ihm = ( + CutSet.from_manifests(**manifests_ihm[split]) + .trim_to_supervisions(keep_overlapping=False, keep_all_channels=False) + .compute_and_store_features_batch( + extractor=extractor, + storage_path=output_dir / f"feats_{split}_ihm", + manifest_path=src_dir / f"cuts_{split}_ihm.jsonl.gz", + batch_duration=5000, + num_workers=8, + storage_type=LilcomChunkyWriter, + ) + ) + logging.info(f"Processing {split} SDM") + cuts_sdm = ( + CutSet.from_manifests(**manifests_sdm[split]) + .trim_to_supervisions(keep_overlapping=False) + .compute_and_store_features_batch( + extractor=extractor, + storage_path=output_dir / f"feats_{split}_sdm", + manifest_path=src_dir / f"cuts_{split}_sdm.jsonl.gz", + batch_duration=500, + num_workers=4, + storage_type=LilcomChunkyWriter, + ) + ) + logging.info(f"Processing {split} GSS") + cuts_gss = ( + CutSet.from_manifests(**manifests_gss[split]) + .trim_to_supervisions(keep_overlapping=False) + .compute_and_store_features_batch( + extractor=extractor, + storage_path=output_dir / f"feats_{split}_gss", + manifest_path=src_dir / f"cuts_{split}_gss.jsonl.gz", + batch_duration=500, + num_workers=4, + storage_type=LilcomChunkyWriter, + ) + ) + + +if __name__ == "__main__": + formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + logging.basicConfig(format=formatter, level=logging.INFO) + + compute_fbank_ami() diff --git a/egs/ami/ASR/prepare.sh b/egs/ami/ASR/prepare.sh new file mode 100755 index 000000000..fb21a8ec6 --- /dev/null +++ b/egs/ami/ASR/prepare.sh @@ -0,0 +1,144 @@ +#!/usr/bin/env bash + +set -eou pipefail + +stage=-1 +stop_stage=100 +use_gss=true # Use GSS-based enhancement with MDM setting + +# We assume dl_dir (download dir) contains the following +# directories and files. If not, they will be downloaded +# by this script automatically. +# +# - $dl_dir/amicorpus +# You can find audio and transcripts in this path. +# +# - $dl_dir/musan +# This directory contains the following directories downloaded from +# http://www.openslr.org/17/ +# +# - music +# - noise +# - speech +# +# - $dl_dir/{LDC2004S13,LDC2005S13,LDC2004T19,LDC2005T19} +# These contain the Fisher English audio and transcripts. We will +# only use the transcripts as extra LM training data (similar to Kaldi). +# +dl_dir=$PWD/download + +. shared/parse_options.sh || exit 1 + +# All files generated by this script are saved in "data". +# You can safely remove "data" and rerun this script to regenerate it. +mkdir -p data +vocab_size=500 + +log() { + # This function is from espnet + local fname=${BASH_SOURCE[1]##*/} + echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*" +} + +log "dl_dir: $dl_dir" + +if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then + log "Stage 0: Download data" + + # If you have pre-downloaded it to /path/to/amicorpus, + # you can create a symlink + # + # ln -sfv /path/to/amicorpus $dl_dir/amicorpus + # + if [ ! -d $dl_dir/amicorpus ]; then + lhotse download ami --mic ihm $dl_dir/amicorpus + lhotse download ami --mic mdm $dl_dir/amicorpus + fi + + # If you have pre-downloaded it to /path/to/musan, + # you can create a symlink + # + # ln -sfv /path/to/musan $dl_dir/ + # + if [ ! -d $dl_dir/musan ]; then + lhotse download musan $dl_dir + fi +fi + +if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then + log "Stage 1: Prepare AMI manifests" + # We assume that you have downloaded the AMI corpus + # to $dl_dir/amicorpus. We perform text normalization for the transcripts. + mkdir -p data/manifests + for mic in ihm sdm mdm; do + lhotse prepare ami --mic $mic --partition full-corpus-asr --normalize-text kaldi \ + --max-words-per-segment 30 $dl_dir/amicorpus data/manifests/ + done +fi + +if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then + log "Stage 2: Prepare musan manifest" + # We assume that you have downloaded the musan corpus + # to $dl_dir/musan + mkdir -p data/manifests + lhotse prepare musan $dl_dir/musan data/manifests +fi + +if [ $stage -le 3 ] && [ $stop_stage -ge 3 ] && [ $use_gss = true ]; then + log "Stage 3: Apply GSS enhancement on MDM data (this stage requires a GPU)" + # We assume that you have installed the GSS package: https://github.com/desh2608/gss + local/prepare_ami_gss.sh data/manifests exp/ami_gss +fi + +if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then + log "Stage 4: Compute fbank features for AMI" + mkdir -p data/fbank + python local/compute_fbank_ami.py + log "Combine features from train splits" + lhotse combine data/manifests/cuts_train_{ihm,ihm_rvb,sdm,gss}.jsonl.gz - | shuf |\ + gzip -c > data/manifests/cuts_train_all.jsonl.gz +fi + +if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then + log "Stage 5: Compute fbank features for musan" + mkdir -p data/fbank + python local/compute_fbank_musan.py +fi + +if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then + log "Stage 6: Dump transcripts for BPE model training." + mkdir -p data/lm + cat <(gunzip -c data/manifests/ami-sdm_supervisions_train.jsonl.gz | jq '.text' | sed 's:"::g')> data/lm/transcript_words.txt +fi + +if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then + log "Stage 7: Prepare BPE based lang" + + lang_dir=data/lang_bpe_${vocab_size} + mkdir -p $lang_dir + + # Add special words to words.txt + echo " 0" > $lang_dir/words.txt + echo "!SIL 1" >> $lang_dir/words.txt + echo " 2" >> $lang_dir/words.txt + + # Add regular words to words.txt + cat data/lm/transcript_words.txt | grep -o -E '\w+' | sort -u | awk '{print $0,NR+2}' >> $lang_dir/words.txt + + # Add remaining special word symbols expected by LM scripts. + num_words=$(cat $lang_dir/words.txt | wc -l) + echo " ${num_words}" >> $lang_dir/words.txt + num_words=$(cat $lang_dir/words.txt | wc -l) + echo " ${num_words}" >> $lang_dir/words.txt + num_words=$(cat $lang_dir/words.txt | wc -l) + echo "#0 ${num_words}" >> $lang_dir/words.txt + + ./local/train_bpe_model.py \ + --lang-dir $lang_dir \ + --vocab-size $vocab_size \ + --transcript data/lm/transcript_words.txt + + if [ ! -f $lang_dir/L_disambig.pt ]; then + ./local/prepare_lang_bpe.py --lang-dir $lang_dir + fi +fi