mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-09 10:02:22 +00:00
Musan implementation for ReazonSpeech (#1988)
This commit is contained in:
parent
9fd0f2dc1d
commit
5fe13078cc
154
egs/reazonspeech/ASR/local/compute_fbank_musan.py
Executable file
154
egs/reazonspeech/ASR/local/compute_fbank_musan.py
Executable file
@ -0,0 +1,154 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
This file computes fbank features of the musan dataset.
|
||||||
|
It looks for manifests in the directory data/manifests.
|
||||||
|
|
||||||
|
The generated fbank features are saved in data/manifests.
|
||||||
|
"""
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from lhotse import (
|
||||||
|
CutSet,
|
||||||
|
Fbank,
|
||||||
|
FbankConfig,
|
||||||
|
LilcomChunkyWriter,
|
||||||
|
MonoCut,
|
||||||
|
WhisperFbank,
|
||||||
|
WhisperFbankConfig,
|
||||||
|
combine,
|
||||||
|
)
|
||||||
|
from lhotse.recipes.utils import read_manifests_if_cached
|
||||||
|
|
||||||
|
from icefall.utils import get_executor, str2bool
|
||||||
|
|
||||||
|
# Torch's multithreaded behavior needs to be disabled or
|
||||||
|
# it wastes a lot of CPU and slow things down.
|
||||||
|
# Do this outside of main() in case it needs to take effect
|
||||||
|
# even when we are not invoking the main (e.g. when spawning subprocesses).
|
||||||
|
torch.set_num_threads(1)
|
||||||
|
torch.set_num_interop_threads(1)
|
||||||
|
|
||||||
|
|
||||||
|
def is_cut_long(c: MonoCut) -> bool:
|
||||||
|
return c.duration > 5
|
||||||
|
|
||||||
|
|
||||||
|
def compute_fbank_musan(
|
||||||
|
num_mel_bins: int = 80,
|
||||||
|
whisper_fbank: bool = False,
|
||||||
|
output_dir: str = "data/manifests",
|
||||||
|
):
|
||||||
|
src_dir = Path("data/manifests")
|
||||||
|
output_dir = Path(output_dir)
|
||||||
|
num_jobs = min(15, os.cpu_count())
|
||||||
|
|
||||||
|
dataset_parts = (
|
||||||
|
"music",
|
||||||
|
"speech",
|
||||||
|
"noise",
|
||||||
|
)
|
||||||
|
prefix = "musan"
|
||||||
|
suffix = "jsonl.gz"
|
||||||
|
manifests = read_manifests_if_cached(
|
||||||
|
dataset_parts=dataset_parts,
|
||||||
|
output_dir=src_dir,
|
||||||
|
prefix=prefix,
|
||||||
|
suffix=suffix,
|
||||||
|
)
|
||||||
|
assert manifests is not None
|
||||||
|
|
||||||
|
assert len(manifests) == len(dataset_parts), (
|
||||||
|
len(manifests),
|
||||||
|
len(dataset_parts),
|
||||||
|
list(manifests.keys()),
|
||||||
|
dataset_parts,
|
||||||
|
)
|
||||||
|
|
||||||
|
musan_cuts_path = output_dir / "musan_cuts.jsonl.gz"
|
||||||
|
|
||||||
|
if musan_cuts_path.is_file():
|
||||||
|
logging.info(f"{musan_cuts_path} already exists - skipping")
|
||||||
|
return
|
||||||
|
|
||||||
|
logging.info("Extracting features for Musan")
|
||||||
|
|
||||||
|
if whisper_fbank:
|
||||||
|
extractor = WhisperFbank(
|
||||||
|
WhisperFbankConfig(num_filters=num_mel_bins, device="cuda")
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins))
|
||||||
|
|
||||||
|
with get_executor() as ex: # Initialize the executor only once.
|
||||||
|
# create chunks of Musan with duration 5 - 10 seconds
|
||||||
|
musan_cuts = (
|
||||||
|
CutSet.from_manifests(
|
||||||
|
recordings=combine(part["recordings"] for part in manifests.values())
|
||||||
|
)
|
||||||
|
.cut_into_windows(10.0)
|
||||||
|
.filter(is_cut_long)
|
||||||
|
.compute_and_store_features(
|
||||||
|
extractor=extractor,
|
||||||
|
storage_path=f"{output_dir}/musan_feats",
|
||||||
|
num_jobs=num_jobs if ex is None else 80,
|
||||||
|
executor=ex,
|
||||||
|
storage_type=LilcomChunkyWriter,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
musan_cuts.to_file(musan_cuts_path)
|
||||||
|
|
||||||
|
|
||||||
|
def get_args():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"--num-mel-bins",
|
||||||
|
type=int,
|
||||||
|
default=80,
|
||||||
|
help="""The number of mel bins for Fbank""",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--whisper-fbank",
|
||||||
|
type=str2bool,
|
||||||
|
default=False,
|
||||||
|
help="Use WhisperFbank instead of Fbank. Default: False.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--output-dir",
|
||||||
|
type=str,
|
||||||
|
default="data/manifests",
|
||||||
|
help="Output directory. Default: data/manifests.",
|
||||||
|
)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
|
||||||
|
|
||||||
|
logging.basicConfig(format=formatter, level=logging.INFO)
|
||||||
|
args = get_args()
|
||||||
|
compute_fbank_musan(
|
||||||
|
num_mel_bins=args.num_mel_bins,
|
||||||
|
whisper_fbank=args.whisper_fbank,
|
||||||
|
output_dir=args.output_dir,
|
||||||
|
)
|
@ -180,7 +180,10 @@ class ReazonSpeechAsrDataModule:
|
|||||||
)
|
)
|
||||||
|
|
||||||
def train_dataloaders(
|
def train_dataloaders(
|
||||||
self, cuts_train: CutSet, sampler_state_dict: Optional[Dict[str, Any]] = None
|
self,
|
||||||
|
cuts_train: CutSet,
|
||||||
|
sampler_state_dict: Optional[Dict[str, Any]] = None,
|
||||||
|
cuts_musan: Optional[CutSet] = None,
|
||||||
) -> DataLoader:
|
) -> DataLoader:
|
||||||
"""
|
"""
|
||||||
Args:
|
Args:
|
||||||
@ -191,6 +194,14 @@ class ReazonSpeechAsrDataModule:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
transforms = []
|
transforms = []
|
||||||
|
if cuts_musan is not None:
|
||||||
|
logging.info("Enable MUSAN")
|
||||||
|
transforms.append(
|
||||||
|
CutMix(cuts=cuts_musan, p=0.5, snr=(10, 20), preserve_id=True)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logging.info("Disable MUSAN")
|
||||||
|
|
||||||
input_transforms = []
|
input_transforms = []
|
||||||
|
|
||||||
if self.args.enable_spec_aug:
|
if self.args.enable_spec_aug:
|
||||||
|
@ -17,8 +17,16 @@ stop_stage=100
|
|||||||
# You can find FLAC files in this directory.
|
# You can find FLAC files in this directory.
|
||||||
# You can download them from https://huggingface.co/datasets/reazon-research/reazonspeech
|
# You can download them from https://huggingface.co/datasets/reazon-research/reazonspeech
|
||||||
#
|
#
|
||||||
# - $dl_dir/dataset.json
|
# - $dl_dir/ReazonSpeech/dataset.json
|
||||||
# The metadata of the ReazonSpeech dataset.
|
# The metadata of the ReazonSpeech dataset.
|
||||||
|
#
|
||||||
|
# - $dl_dir/musan
|
||||||
|
# This directory contains the following directories downloaded from
|
||||||
|
# http://www.openslr.org/17/
|
||||||
|
#
|
||||||
|
# - music
|
||||||
|
# - noise
|
||||||
|
# - speech
|
||||||
|
|
||||||
dl_dir=$PWD/download
|
dl_dir=$PWD/download
|
||||||
|
|
||||||
@ -48,7 +56,15 @@ if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
|
|||||||
#
|
#
|
||||||
if [ ! -d $dl_dir/ReazonSpeech/downloads ]; then
|
if [ ! -d $dl_dir/ReazonSpeech/downloads ]; then
|
||||||
# Download small-v1 by default.
|
# Download small-v1 by default.
|
||||||
lhotse download reazonspeech --subset small-v1 $dl_dir
|
lhotse download reazonspeech --subset medium $dl_dir
|
||||||
|
fi
|
||||||
|
# If you have pre-downloaded it to /path/to/musan,
|
||||||
|
# you can create a symlink
|
||||||
|
#
|
||||||
|
# ln -sfv /path/to/musan $dl_dir/
|
||||||
|
#
|
||||||
|
if [ ! -d $dl_dir/musan ]; then
|
||||||
|
lhotse download musan $dl_dir
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -64,7 +80,18 @@ if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
||||||
log "Stage 2: Compute ReazonSpeech fbank"
|
log "Stage 2: Prepare musan manifest"
|
||||||
|
# We assume that you have downloaded the musan corpus
|
||||||
|
# to $dl_dir/musan
|
||||||
|
mkdir -p data/manifests
|
||||||
|
if [ ! -e data/manifests/.musan_prep.done ]; then
|
||||||
|
lhotse prepare musan $dl_dir/musan data/manifests
|
||||||
|
touch data/manifests/.musan_prep.done
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
||||||
|
log "Stage 3: Compute ReazonSpeech fbank"
|
||||||
if [ ! -e data/manifests/.reazonspeech-validated.done ]; then
|
if [ ! -e data/manifests/.reazonspeech-validated.done ]; then
|
||||||
python local/compute_fbank_reazonspeech.py --manifest-dir data/manifests
|
python local/compute_fbank_reazonspeech.py --manifest-dir data/manifests
|
||||||
python local/validate_manifest.py --manifest data/manifests/reazonspeech_cuts_train.jsonl.gz
|
python local/validate_manifest.py --manifest data/manifests/reazonspeech_cuts_train.jsonl.gz
|
||||||
@ -74,13 +101,22 @@ if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
||||||
log "Stage 3: Prepare ReazonSpeech lang_char"
|
log "Stage 4: Compute fbank for musan"
|
||||||
|
mkdir -p data/manifests
|
||||||
|
if [ ! -e data/manifests/.musan_fbank.done ]; then
|
||||||
|
./local/compute_fbank_musan.py
|
||||||
|
touch data/manifests/.musan_fbank.done
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
|
||||||
|
log "Stage 5: Prepare ReazonSpeech lang_char"
|
||||||
python local/prepare_lang_char.py data/manifests/reazonspeech_cuts_train.jsonl.gz
|
python local/prepare_lang_char.py data/manifests/reazonspeech_cuts_train.jsonl.gz
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
|
||||||
log "Stage 4: Show manifest statistics"
|
log "Stage 6: Show manifest statistics"
|
||||||
python local/display_manifest_statistics.py --manifest-dir data/manifests > data/manifests/manifest_statistics.txt
|
python local/display_manifest_statistics.py --manifest-dir data/manifests > data/manifests/manifest_statistics.txt
|
||||||
cat data/manifests/manifest_statistics.txt
|
cat data/manifests/manifest_statistics.txt
|
||||||
fi
|
fi
|
||||||
|
@ -65,6 +65,7 @@ import torch.nn as nn
|
|||||||
from asr_datamodule import ReazonSpeechAsrDataModule
|
from asr_datamodule import ReazonSpeechAsrDataModule
|
||||||
from decoder import Decoder
|
from decoder import Decoder
|
||||||
from joiner import Joiner
|
from joiner import Joiner
|
||||||
|
from lhotse import load_manifest
|
||||||
from lhotse.cut import Cut
|
from lhotse.cut import Cut
|
||||||
from lhotse.dataset.sampling.base import CutSampler
|
from lhotse.dataset.sampling.base import CutSampler
|
||||||
from lhotse.utils import fix_random_seed
|
from lhotse.utils import fix_random_seed
|
||||||
@ -1219,8 +1220,23 @@ def run(rank, world_size, args):
|
|||||||
else:
|
else:
|
||||||
sampler_state_dict = None
|
sampler_state_dict = None
|
||||||
|
|
||||||
|
if args.enable_musan:
|
||||||
|
musan_path = Path(args.manifest_dir) / "musan_cuts.jsonl.gz"
|
||||||
|
if musan_path.exists():
|
||||||
|
cuts_musan = load_manifest(musan_path)
|
||||||
|
logging.info(f"Loaded MUSAN manifest from {musan_path}")
|
||||||
|
else:
|
||||||
|
logging.warning(
|
||||||
|
f"MUSAN manifest not found at {musan_path}, disabling MUSAN augmentation"
|
||||||
|
)
|
||||||
|
cuts_musan = None
|
||||||
|
else:
|
||||||
|
cuts_musan = None
|
||||||
|
|
||||||
train_dl = reazonspeech_corpus.train_dataloaders(
|
train_dl = reazonspeech_corpus.train_dataloaders(
|
||||||
train_cuts, sampler_state_dict=sampler_state_dict
|
train_cuts,
|
||||||
|
sampler_state_dict=sampler_state_dict,
|
||||||
|
cuts_musan=cuts_musan,
|
||||||
)
|
)
|
||||||
|
|
||||||
valid_cuts = reazonspeech_corpus.valid_cuts()
|
valid_cuts = reazonspeech_corpus.valid_cuts()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user