mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-26 18:24:18 +00:00
chunked feature extraction by default
This commit is contained in:
parent
89c0e2e7ff
commit
fa734e01a3
@ -32,8 +32,8 @@ from pathlib import Path
|
||||
import torch
|
||||
from lhotse import (
|
||||
CutSet,
|
||||
Fbank,
|
||||
FbankConfig,
|
||||
KaldifeatFbank,
|
||||
KaldifeatFbankConfig,
|
||||
LilcomHdf5Writer,
|
||||
SupervisionSegment,
|
||||
)
|
||||
@ -53,12 +53,6 @@ def get_parser():
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||
)
|
||||
parser.add_argument(
|
||||
"--num-jobs",
|
||||
type=int,
|
||||
default=min(15, os.cpu_count()),
|
||||
help="Number of parallel jobs.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--context-window",
|
||||
type=float,
|
||||
@ -86,6 +80,19 @@ def get_parser():
|
||||
"pre-computation might currently consume excessive memory and time "
|
||||
"-- use on-the-fly feature extraction in the training script instead.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--num-workers",
|
||||
type=int,
|
||||
default=4,
|
||||
help="Number of dataloading workers used for reading the audio.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--batch-duration",
|
||||
type=float,
|
||||
default=600.0,
|
||||
help="The maximum number of audio seconds in a batch."
|
||||
"Determines batch size dynamically.",
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
@ -119,7 +126,6 @@ def get_context_suffix(args):
|
||||
def compute_fbank_gigaspeech(args):
|
||||
src_dir = Path("data/manifests")
|
||||
output_dir = Path("data/fbank")
|
||||
num_mel_bins = 80
|
||||
|
||||
dataset_parts = (
|
||||
"XL",
|
||||
@ -134,10 +140,16 @@ def compute_fbank_gigaspeech(args):
|
||||
)
|
||||
assert manifests is not None
|
||||
|
||||
extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins))
|
||||
if torch.cuda.is_available():
|
||||
extractor = KaldifeatFbank(
|
||||
KaldifeatFbankConfig(device="cuda"),
|
||||
)
|
||||
else:
|
||||
extractor = KaldifeatFbank(
|
||||
KaldifeatFbankConfig(device="cpu"),
|
||||
)
|
||||
ctx_suffix = get_context_suffix(args)
|
||||
|
||||
with get_executor() as ex: # Initialize the executor only once.
|
||||
for partition, m in manifests.items():
|
||||
raw_cuts_path = output_dir / f"cuts_{partition}_raw.jsonl.gz"
|
||||
if raw_cuts_path.is_file():
|
||||
@ -200,11 +212,6 @@ def compute_fbank_gigaspeech(args):
|
||||
else args.context_window,
|
||||
context_direction=args.context_direction,
|
||||
)
|
||||
if partition in ["L", "XL"]:
|
||||
# Before storing manifests in, we want to pre-shuffle them,
|
||||
# as the sampler won't be able to do it later in an
|
||||
# efficient manner.
|
||||
cut_set = cut_set.shuffle()
|
||||
|
||||
if args.precomputed_features:
|
||||
# Extract the features after cutting large recordings into
|
||||
@ -217,12 +224,13 @@ def compute_fbank_gigaspeech(args):
|
||||
# Therefore, we sacrifice some storage for the ability to
|
||||
# precompute features on shorter chunks,
|
||||
# without memory blow-ups.
|
||||
cut_set = cut_set.compute_and_store_features(
|
||||
if torch.cuda.is_available():
|
||||
logging.info("GPU detected, do the CUDA extraction.")
|
||||
cut_set = cut_set.compute_and_store_features_batch(
|
||||
extractor=extractor,
|
||||
storage_path=f"{output_dir}/feats_{partition}",
|
||||
# when an executor is specified, make more partitions
|
||||
num_jobs=args.num_jobs if ex is None else 80,
|
||||
executor=ex,
|
||||
num_workers=args.num_workers,
|
||||
batch_duration=args.batch_duration,
|
||||
storage_type=LilcomHdf5Writer,
|
||||
)
|
||||
cut_set.to_file(cuts_path)
|
||||
@ -231,6 +239,39 @@ def compute_fbank_gigaspeech(args):
|
||||
# whether it needs to load the raw cuts from disk or not.
|
||||
del cut_set
|
||||
|
||||
# In case the user insists on CPU extraction
|
||||
if not torch.cuda.is_available():
|
||||
with get_executor() as ex: # Initialize the executor only once.
|
||||
for partition, m in manifests.items():
|
||||
cuts_path = (
|
||||
output_dir / f"cuts_{partition}{ctx_suffix}.jsonl.gz"
|
||||
)
|
||||
cut_set = CutSet.from_file(cuts_path)
|
||||
if args.precomputed_features:
|
||||
# Extract the features after cutting large recordings into
|
||||
# smaller cuts.
|
||||
# Note:
|
||||
# we support very efficient "chunked" feature reads with
|
||||
# the argument `storage_type=ChunkedLilcomHdf5Writer`,
|
||||
# but we don't support efficient data augmentation and
|
||||
# feature computation for long recordings yet.
|
||||
# Therefore, we sacrifice some storage for the ability to
|
||||
# precompute features on shorter chunks,
|
||||
# without memory blow-ups.
|
||||
logging.info(
|
||||
"GPU not detected, we recommend you skip the "
|
||||
"extraction and do on-the-fly extraction "
|
||||
"while training."
|
||||
)
|
||||
cut_set = cut_set.compute_and_store_features(
|
||||
extractor=extractor,
|
||||
storage_path=f"{output_dir}/feats_{partition}",
|
||||
# when an executor is specified, make more partitions
|
||||
num_jobs=min(15, os.cpu_count()) if ex is None else 80,
|
||||
executor=ex,
|
||||
storage_type=LilcomHdf5Writer,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
formatter = (
|
||||
|
@ -76,6 +76,7 @@ if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
|
||||
echo "$0: Error, invalid $dl_dir/password."
|
||||
exit 1;
|
||||
fi
|
||||
# Download XL, DEV and TEST sets by default.
|
||||
lhotse download gigaspeech --subset auto --host tsinghua \
|
||||
$dl_dir/password $dl_dir/GigaSpeech
|
||||
fi
|
||||
@ -110,8 +111,15 @@ fi
|
||||
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
||||
log "Stage 3: Compute fbank for GigaSpeech"
|
||||
mkdir -p data/fbank
|
||||
./local/compute_fbank_gigaspeech.py --num-jobs $nj --context-window 0.0 \
|
||||
--context-direction center --precomputed-features False
|
||||
# We assume you have a GPU card and implement CUDA extraction here.
|
||||
# Since without CUDA it would take too much time to compute feats
|
||||
# for L or XL subset, we recommend --precomputed-features False.
|
||||
#
|
||||
# We assume you have install kaldifeat, if not, please install
|
||||
# it using: pip install kaldifeat
|
||||
./local/compute_fbank_gigaspeech.py --precomputed-features True \
|
||||
--num-workers 4 --batch-duration 600.0 \
|
||||
--context-window 0.0 --context-direction center
|
||||
fi
|
||||
|
||||
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
||||
@ -155,7 +163,7 @@ if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
|
||||
sed -i 's/[ ][ ]*/ /g' $lang_dir/transcript_words.txt
|
||||
fi
|
||||
|
||||
cat $lang_dir/transcript_words.txt | sed 's| |\n|g' \
|
||||
cat $lang_dir/transcript_words.txt | sed 's/ /\n/g' \
|
||||
| sort -u | sed '/^$/d' > $lang_dir/words.txt
|
||||
(echo '!SIL'; echo '<SPOKEN_NOISE>'; echo '<UNK>'; ) |
|
||||
cat - $lang_dir/words.txt | sort | uniq | awk '
|
||||
|
Loading…
x
Reference in New Issue
Block a user