Use default storage_type

This commit is contained in:
Guanbo Wang 2022-04-11 20:10:50 -04:00
parent 407998267c
commit ba245aa60f
3 changed files with 0 additions and 6 deletions

View File

@ -24,7 +24,6 @@ from lhotse import (
CutSet, CutSet,
KaldifeatFbank, KaldifeatFbank,
KaldifeatFbankConfig, KaldifeatFbankConfig,
LilcomHdf5Writer,
) )
# Torch's multithreaded behavior needs to be disabled or # Torch's multithreaded behavior needs to be disabled or
@ -70,7 +69,6 @@ def compute_fbank_gigaspeech_dev_test():
storage_path=f"{in_out_dir}/feats_{partition}", storage_path=f"{in_out_dir}/feats_{partition}",
num_workers=num_workers, num_workers=num_workers,
batch_duration=batch_duration, batch_duration=batch_duration,
storage_type=LilcomHdf5Writer,
) )
cut_set = cut_set.trim_to_supervisions( cut_set = cut_set.trim_to_supervisions(
keep_overlapping=False, min_duration=None keep_overlapping=False, min_duration=None

View File

@ -23,7 +23,6 @@ from pathlib import Path
import torch import torch
from lhotse import ( from lhotse import (
ChunkedLilcomHdf5Writer,
CutSet, CutSet,
KaldifeatFbank, KaldifeatFbank,
KaldifeatFbankConfig, KaldifeatFbankConfig,
@ -124,7 +123,6 @@ def compute_fbank_gigaspeech_splits(args):
storage_path=f"{output_dir}/feats_XL_{idx}", storage_path=f"{output_dir}/feats_XL_{idx}",
num_workers=args.num_workers, num_workers=args.num_workers,
batch_duration=args.batch_duration, batch_duration=args.batch_duration,
storage_type=ChunkedLilcomHdf5Writer,
) )
logging.info("About to split cuts into smaller chunks.") logging.info("About to split cuts into smaller chunks.")

View File

@ -24,7 +24,6 @@ from lhotse import (
CutSet, CutSet,
KaldifeatFbank, KaldifeatFbank,
KaldifeatFbankConfig, KaldifeatFbankConfig,
LilcomHdf5Writer,
combine, combine,
) )
from lhotse.recipes.utils import read_manifests_if_cached from lhotse.recipes.utils import read_manifests_if_cached
@ -86,7 +85,6 @@ def compute_fbank_musan():
storage_path=f"{output_dir}/feats_musan", storage_path=f"{output_dir}/feats_musan",
num_workers=num_workers, num_workers=num_workers,
batch_duration=batch_duration, batch_duration=batch_duration,
storage_type=LilcomHdf5Writer,
) )
) )
musan_cuts.to_json(musan_cuts_path) musan_cuts.to_json(musan_cuts_path)