mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-26 18:24:18 +00:00
chunked feature extraction by default
This commit is contained in:
parent
89c0e2e7ff
commit
fa734e01a3
@ -32,8 +32,8 @@ from pathlib import Path
|
|||||||
import torch
|
import torch
|
||||||
from lhotse import (
|
from lhotse import (
|
||||||
CutSet,
|
CutSet,
|
||||||
Fbank,
|
KaldifeatFbank,
|
||||||
FbankConfig,
|
KaldifeatFbankConfig,
|
||||||
LilcomHdf5Writer,
|
LilcomHdf5Writer,
|
||||||
SupervisionSegment,
|
SupervisionSegment,
|
||||||
)
|
)
|
||||||
@ -53,12 +53,6 @@ def get_parser():
|
|||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
|
||||||
"--num-jobs",
|
|
||||||
type=int,
|
|
||||||
default=min(15, os.cpu_count()),
|
|
||||||
help="Number of parallel jobs.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--context-window",
|
"--context-window",
|
||||||
type=float,
|
type=float,
|
||||||
@ -86,6 +80,19 @@ def get_parser():
|
|||||||
"pre-computation might currently consume excessive memory and time "
|
"pre-computation might currently consume excessive memory and time "
|
||||||
"-- use on-the-fly feature extraction in the training script instead.",
|
"-- use on-the-fly feature extraction in the training script instead.",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--num-workers",
|
||||||
|
type=int,
|
||||||
|
default=4,
|
||||||
|
help="Number of dataloading workers used for reading the audio.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--batch-duration",
|
||||||
|
type=float,
|
||||||
|
default=600.0,
|
||||||
|
help="The maximum number of audio seconds in a batch."
|
||||||
|
"Determines batch size dynamically.",
|
||||||
|
)
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
||||||
@ -119,7 +126,6 @@ def get_context_suffix(args):
|
|||||||
def compute_fbank_gigaspeech(args):
|
def compute_fbank_gigaspeech(args):
|
||||||
src_dir = Path("data/manifests")
|
src_dir = Path("data/manifests")
|
||||||
output_dir = Path("data/fbank")
|
output_dir = Path("data/fbank")
|
||||||
num_mel_bins = 80
|
|
||||||
|
|
||||||
dataset_parts = (
|
dataset_parts = (
|
||||||
"XL",
|
"XL",
|
||||||
@ -134,10 +140,16 @@ def compute_fbank_gigaspeech(args):
|
|||||||
)
|
)
|
||||||
assert manifests is not None
|
assert manifests is not None
|
||||||
|
|
||||||
extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins))
|
if torch.cuda.is_available():
|
||||||
|
extractor = KaldifeatFbank(
|
||||||
|
KaldifeatFbankConfig(device="cuda"),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
extractor = KaldifeatFbank(
|
||||||
|
KaldifeatFbankConfig(device="cpu"),
|
||||||
|
)
|
||||||
ctx_suffix = get_context_suffix(args)
|
ctx_suffix = get_context_suffix(args)
|
||||||
|
|
||||||
with get_executor() as ex: # Initialize the executor only once.
|
|
||||||
for partition, m in manifests.items():
|
for partition, m in manifests.items():
|
||||||
raw_cuts_path = output_dir / f"cuts_{partition}_raw.jsonl.gz"
|
raw_cuts_path = output_dir / f"cuts_{partition}_raw.jsonl.gz"
|
||||||
if raw_cuts_path.is_file():
|
if raw_cuts_path.is_file():
|
||||||
@ -200,11 +212,6 @@ def compute_fbank_gigaspeech(args):
|
|||||||
else args.context_window,
|
else args.context_window,
|
||||||
context_direction=args.context_direction,
|
context_direction=args.context_direction,
|
||||||
)
|
)
|
||||||
if partition in ["L", "XL"]:
|
|
||||||
# Before storing manifests in, we want to pre-shuffle them,
|
|
||||||
# as the sampler won't be able to do it later in an
|
|
||||||
# efficient manner.
|
|
||||||
cut_set = cut_set.shuffle()
|
|
||||||
|
|
||||||
if args.precomputed_features:
|
if args.precomputed_features:
|
||||||
# Extract the features after cutting large recordings into
|
# Extract the features after cutting large recordings into
|
||||||
@ -217,12 +224,13 @@ def compute_fbank_gigaspeech(args):
|
|||||||
# Therefore, we sacrifice some storage for the ability to
|
# Therefore, we sacrifice some storage for the ability to
|
||||||
# precompute features on shorter chunks,
|
# precompute features on shorter chunks,
|
||||||
# without memory blow-ups.
|
# without memory blow-ups.
|
||||||
cut_set = cut_set.compute_and_store_features(
|
if torch.cuda.is_available():
|
||||||
|
logging.info("GPU detected, do the CUDA extraction.")
|
||||||
|
cut_set = cut_set.compute_and_store_features_batch(
|
||||||
extractor=extractor,
|
extractor=extractor,
|
||||||
storage_path=f"{output_dir}/feats_{partition}",
|
storage_path=f"{output_dir}/feats_{partition}",
|
||||||
# when an executor is specified, make more partitions
|
num_workers=args.num_workers,
|
||||||
num_jobs=args.num_jobs if ex is None else 80,
|
batch_duration=args.batch_duration,
|
||||||
executor=ex,
|
|
||||||
storage_type=LilcomHdf5Writer,
|
storage_type=LilcomHdf5Writer,
|
||||||
)
|
)
|
||||||
cut_set.to_file(cuts_path)
|
cut_set.to_file(cuts_path)
|
||||||
@ -231,6 +239,39 @@ def compute_fbank_gigaspeech(args):
|
|||||||
# whether it needs to load the raw cuts from disk or not.
|
# whether it needs to load the raw cuts from disk or not.
|
||||||
del cut_set
|
del cut_set
|
||||||
|
|
||||||
|
# In case the user insists on CPU extraction
|
||||||
|
if not torch.cuda.is_available():
|
||||||
|
with get_executor() as ex: # Initialize the executor only once.
|
||||||
|
for partition, m in manifests.items():
|
||||||
|
cuts_path = (
|
||||||
|
output_dir / f"cuts_{partition}{ctx_suffix}.jsonl.gz"
|
||||||
|
)
|
||||||
|
cut_set = CutSet.from_file(cuts_path)
|
||||||
|
if args.precomputed_features:
|
||||||
|
# Extract the features after cutting large recordings into
|
||||||
|
# smaller cuts.
|
||||||
|
# Note:
|
||||||
|
# we support very efficient "chunked" feature reads with
|
||||||
|
# the argument `storage_type=ChunkedLilcomHdf5Writer`,
|
||||||
|
# but we don't support efficient data augmentation and
|
||||||
|
# feature computation for long recordings yet.
|
||||||
|
# Therefore, we sacrifice some storage for the ability to
|
||||||
|
# precompute features on shorter chunks,
|
||||||
|
# without memory blow-ups.
|
||||||
|
logging.info(
|
||||||
|
"GPU not detected, we recommend you skip the "
|
||||||
|
"extraction and do on-the-fly extraction "
|
||||||
|
"while training."
|
||||||
|
)
|
||||||
|
cut_set = cut_set.compute_and_store_features(
|
||||||
|
extractor=extractor,
|
||||||
|
storage_path=f"{output_dir}/feats_{partition}",
|
||||||
|
# when an executor is specified, make more partitions
|
||||||
|
num_jobs=min(15, os.cpu_count()) if ex is None else 80,
|
||||||
|
executor=ex,
|
||||||
|
storage_type=LilcomHdf5Writer,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
formatter = (
|
formatter = (
|
||||||
|
@ -76,6 +76,7 @@ if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
|
|||||||
echo "$0: Error, invalid $dl_dir/password."
|
echo "$0: Error, invalid $dl_dir/password."
|
||||||
exit 1;
|
exit 1;
|
||||||
fi
|
fi
|
||||||
|
# Download XL, DEV and TEST sets by default.
|
||||||
lhotse download gigaspeech --subset auto --host tsinghua \
|
lhotse download gigaspeech --subset auto --host tsinghua \
|
||||||
$dl_dir/password $dl_dir/GigaSpeech
|
$dl_dir/password $dl_dir/GigaSpeech
|
||||||
fi
|
fi
|
||||||
@ -110,8 +111,15 @@ fi
|
|||||||
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
||||||
log "Stage 3: Compute fbank for GigaSpeech"
|
log "Stage 3: Compute fbank for GigaSpeech"
|
||||||
mkdir -p data/fbank
|
mkdir -p data/fbank
|
||||||
./local/compute_fbank_gigaspeech.py --num-jobs $nj --context-window 0.0 \
|
# We assume you have a GPU card and implement CUDA extraction here.
|
||||||
--context-direction center --precomputed-features False
|
# Since without CUDA it would take too much time to compute feats
|
||||||
|
# for L or XL subset, we recommend --precomputed-features False.
|
||||||
|
#
|
||||||
|
# We assume you have install kaldifeat, if not, please install
|
||||||
|
# it using: pip install kaldifeat
|
||||||
|
./local/compute_fbank_gigaspeech.py --precomputed-features True \
|
||||||
|
--num-workers 4 --batch-duration 600.0 \
|
||||||
|
--context-window 0.0 --context-direction center
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
||||||
@ -155,7 +163,7 @@ if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
|
|||||||
sed -i 's/[ ][ ]*/ /g' $lang_dir/transcript_words.txt
|
sed -i 's/[ ][ ]*/ /g' $lang_dir/transcript_words.txt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cat $lang_dir/transcript_words.txt | sed 's| |\n|g' \
|
cat $lang_dir/transcript_words.txt | sed 's/ /\n/g' \
|
||||||
| sort -u | sed '/^$/d' > $lang_dir/words.txt
|
| sort -u | sed '/^$/d' > $lang_dir/words.txt
|
||||||
(echo '!SIL'; echo '<SPOKEN_NOISE>'; echo '<UNK>'; ) |
|
(echo '!SIL'; echo '<SPOKEN_NOISE>'; echo '<UNK>'; ) |
|
||||||
cat - $lang_dir/words.txt | sort | uniq | awk '
|
cat - $lang_dir/words.txt | sort | uniq | awk '
|
||||||
|
Loading…
x
Reference in New Issue
Block a user