From c10aec56569749aee8a1acceb33c8c9922966005 Mon Sep 17 00:00:00 2001 From: Mingshuang Luo <37799481+luomingshuang@users.noreply.github.com> Date: Wed, 29 Jun 2022 17:45:30 +0800 Subject: [PATCH] load_manifest_lazy for asr_datamodule.py (#453) --- .../asr_datamodule.py | 22 ++++--------------- 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless2/asr_datamodule.py b/egs/wenetspeech/ASR/pruned_transducer_stateless2/asr_datamodule.py index 200a694d6..10c953e3b 100644 --- a/egs/wenetspeech/ASR/pruned_transducer_stateless2/asr_datamodule.py +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless2/asr_datamodule.py @@ -192,13 +192,6 @@ class WenetSpeechAsrDataModule: "with training dataset. ", ) - group.add_argument( - "--lazy-load", - type=str2bool, - default=True, - help="lazily open CutSets to avoid OOM (for L|XL subset)", - ) - group.add_argument( "--training-subset", type=str, @@ -420,17 +413,10 @@ class WenetSpeechAsrDataModule: @lru_cache() def train_cuts(self) -> CutSet: logging.info("About to get train cuts") - if self.args.lazy_load: - logging.info("use lazy cuts") - cuts_train = CutSet.from_jsonl_lazy( - self.args.manifest_dir - / f"cuts_{self.args.training_subset}.jsonl.gz" - ) - else: - cuts_train = CutSet.from_file( - self.args.manifest_dir - / f"cuts_{self.args.training_subset}.jsonl.gz" - ) + cuts_train = load_manifest_lazy( + self.args.manifest_dir + / f"cuts_{self.args.training_subset}.jsonl.gz" + ) return cuts_train @lru_cache()