mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-10 18:42:19 +00:00
fix option conflicts between libri and giga
This commit is contained in:
parent
946d6ea00b
commit
343f99305f
@ -25,10 +25,10 @@ from icefall.dataset.datamodule import DataModule
|
|||||||
|
|
||||||
|
|
||||||
def get_context_suffix(args):
|
def get_context_suffix(args):
|
||||||
if args.context_window is None or args.context_window <= 0.0:
|
if args.giga_context_window is None or args.giga_context_window <= 0.0:
|
||||||
ctx_suffix = ""
|
ctx_suffix = ""
|
||||||
else:
|
else:
|
||||||
ctx_suffix = f"_{args.context_direction}{args.context_window}"
|
ctx_suffix = f"_{args.giga_context_direction}{args.giga_context_window}"
|
||||||
return ctx_suffix
|
return ctx_suffix
|
||||||
|
|
||||||
|
|
||||||
@ -53,6 +53,8 @@ class GigaSpeechAsrDataModule(DataModule):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def add_arguments(cls, parser: argparse.ArgumentParser):
|
def add_arguments(cls, parser: argparse.ArgumentParser):
|
||||||
|
subparsers = parser.add_subparsers(help='seperate gigaspeech arguments from librispeech arguments')
|
||||||
|
parser = subparsers.add_parser(name='giga')
|
||||||
super().add_arguments(parser)
|
super().add_arguments(parser)
|
||||||
group = parser.add_argument_group(
|
group = parser.add_argument_group(
|
||||||
title='ASR data related options',
|
title='ASR data related options',
|
||||||
@ -62,17 +64,20 @@ class GigaSpeechAsrDataModule(DataModule):
|
|||||||
)
|
)
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
'--feature-dir',
|
'--feature-dir',
|
||||||
|
dest="giga_feature_dir",
|
||||||
type=Path,
|
type=Path,
|
||||||
default=Path('exp/data'),
|
default=Path('exp/giga_data'),
|
||||||
help='Path to directory with train/valid/test cuts.'
|
help='Path to directory with train/valid/test cuts.'
|
||||||
)
|
)
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
'--max-duration',
|
'--max-duration',
|
||||||
|
dest="giga_max_duration",
|
||||||
type=int,
|
type=int,
|
||||||
default=500.0,
|
default=500.0,
|
||||||
help="Maximum pooled recordings duration (seconds) in a single batch.")
|
help="Maximum pooled recordings duration (seconds) in a single batch.")
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
'--bucketing-sampler',
|
'--bucketing-sampler',
|
||||||
|
dest="giga_bucketing_sampler",
|
||||||
type=str2bool,
|
type=str2bool,
|
||||||
default=False,
|
default=False,
|
||||||
help='When enabled, the batches will come from buckets of '
|
help='When enabled, the batches will come from buckets of '
|
||||||
@ -81,28 +86,33 @@ class GigaSpeechAsrDataModule(DataModule):
|
|||||||
'--num-buckets',
|
'--num-buckets',
|
||||||
type=int,
|
type=int,
|
||||||
default=30,
|
default=30,
|
||||||
|
dest="giga_num_buckets",
|
||||||
help='The number of buckets for the BucketingSampler'
|
help='The number of buckets for the BucketingSampler'
|
||||||
'(you might want to increase it for larger datasets).')
|
'(you might want to increase it for larger datasets).')
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
'--concatenate-cuts',
|
'--concatenate-cuts',
|
||||||
|
dest="giga_concatenate_cuts",
|
||||||
type=str2bool,
|
type=str2bool,
|
||||||
default=True,
|
default=True,
|
||||||
help='When enabled, utterances (cuts) will be concatenated '
|
help='When enabled, utterances (cuts) will be concatenated '
|
||||||
'to minimize the amount of padding.')
|
'to minimize the amount of padding.')
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
'--duration-factor',
|
'--duration-factor',
|
||||||
|
dest="giga_duration_factor",
|
||||||
type=float,
|
type=float,
|
||||||
default=1.0,
|
default=1.0,
|
||||||
help='Determines the maximum duration of a concatenated cut '
|
help='Determines the maximum duration of a concatenated cut '
|
||||||
'relative to the duration of the longest cut in a batch.')
|
'relative to the duration of the longest cut in a batch.')
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
'--gap',
|
'--gap',
|
||||||
|
dest="giga_gap",
|
||||||
type=float,
|
type=float,
|
||||||
default=1.0,
|
default=1.0,
|
||||||
help='The amount of padding (in seconds) inserted between concatenated cuts. '
|
help='The amount of padding (in seconds) inserted between concatenated cuts. '
|
||||||
'This padding is filled with noise when noise augmentation is used.')
|
'This padding is filled with noise when noise augmentation is used.')
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
'--on-the-fly-feats',
|
'--on-the-fly-feats',
|
||||||
|
dest="giga_on_the_fly_feats",
|
||||||
type=str2bool,
|
type=str2bool,
|
||||||
default=False,
|
default=False,
|
||||||
help='When enabled, use on-the-fly cut mixing and feature extraction. '
|
help='When enabled, use on-the-fly cut mixing and feature extraction. '
|
||||||
@ -110,12 +120,14 @@ class GigaSpeechAsrDataModule(DataModule):
|
|||||||
)
|
)
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
'--shuffle',
|
'--shuffle',
|
||||||
|
dest="giga_shuffle",
|
||||||
type=str2bool,
|
type=str2bool,
|
||||||
default=True,
|
default=True,
|
||||||
help='When enabled (=default), the examples will be shuffled for each epoch.'
|
help='When enabled (=default), the examples will be shuffled for each epoch.'
|
||||||
)
|
)
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
'--check-cuts',
|
'--check-cuts',
|
||||||
|
dest="giga_check_cuts",
|
||||||
type=str2bool,
|
type=str2bool,
|
||||||
default=True,
|
default=True,
|
||||||
help='When enabled (=default), we will iterate over the whole training cut set '
|
help='When enabled (=default), we will iterate over the whole training cut set '
|
||||||
@ -126,12 +138,14 @@ class GigaSpeechAsrDataModule(DataModule):
|
|||||||
# GigaSpeech specific arguments
|
# GigaSpeech specific arguments
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
"--subset",
|
"--subset",
|
||||||
|
dest="giga_subset",
|
||||||
type=str,
|
type=str,
|
||||||
default="XS",
|
default="XS",
|
||||||
help="Select the GigaSpeech subset (XS|S|M|L|XL)",
|
help="Select the GigaSpeech subset (XS|S|M|L|XL)",
|
||||||
)
|
)
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
"--context-window",
|
"--context-window",
|
||||||
|
dest="giga_context_window",
|
||||||
type=float,
|
type=float,
|
||||||
default=0.0,
|
default=0.0,
|
||||||
help="Training cut duration in seconds. "
|
help="Training cut duration in seconds. "
|
||||||
@ -140,6 +154,7 @@ class GigaSpeechAsrDataModule(DataModule):
|
|||||||
)
|
)
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
"--context-direction",
|
"--context-direction",
|
||||||
|
dest="giga_context_direction",
|
||||||
type=str,
|
type=str,
|
||||||
default="center",
|
default="center",
|
||||||
help="If context-window is 0, does nothing. "
|
help="If context-window is 0, does nothing. "
|
||||||
@ -148,6 +163,7 @@ class GigaSpeechAsrDataModule(DataModule):
|
|||||||
)
|
)
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
'--use-context-for-test',
|
'--use-context-for-test',
|
||||||
|
dest="giga_use_context_for_text",
|
||||||
type=str2bool,
|
type=str2bool,
|
||||||
default=False,
|
default=False,
|
||||||
help='Should we read cuts with acoustic context or without it. '
|
help='Should we read cuts with acoustic context or without it. '
|
||||||
@ -155,24 +171,25 @@ class GigaSpeechAsrDataModule(DataModule):
|
|||||||
)
|
)
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
'--small-dev',
|
'--small-dev',
|
||||||
|
dest="giga_small_dev",
|
||||||
type=str2bool,
|
type=str2bool,
|
||||||
default=False,
|
default=False,
|
||||||
help='Should we use only 1000 utterances for dev (speeds up training)'
|
help='Should we use only 1000 utterances for dev (speeds up training)'
|
||||||
)
|
)
|
||||||
|
|
||||||
def validate_args(self):
|
def validate_args(self):
|
||||||
if self.args.subset in ['L', 'XL']:
|
if self.args.giga_subset in ['L', 'XL']:
|
||||||
assert (
|
assert (
|
||||||
self.args.shuffle == False
|
self.args.giga_shuffle == False
|
||||||
), "For GigaSpeech L/XL, you must use --shuffle 0 to avoid eagerly reading pyarrow manifests."
|
), "For GigaSpeech L/XL, you must use --shuffle 0 to avoid eagerly reading pyarrow manifests."
|
||||||
assert (
|
assert (
|
||||||
self.args.check_cuts == False
|
self.args.giga_check_cuts == False
|
||||||
), "For GigaSpeech L/XL, you must use --check-cuts 0 to avoid eagerly reading pyarrow manifests."
|
), "For GigaSpeech L/XL, you must use --check-cuts 0 to avoid eagerly reading pyarrow manifests."
|
||||||
assert (
|
assert (
|
||||||
self.args.bucketing_sampler == False
|
self.args.giga_bucketing_sampler == False
|
||||||
), "For GigaSpeech L/XL, you must use --bucketing-sampler 0 to avoid eagerly reading pyarrow manifests."
|
), "For GigaSpeech L/XL, you must use --bucketing-sampler 0 to avoid eagerly reading pyarrow manifests."
|
||||||
assert (
|
assert (
|
||||||
self.args.on_the_fly_feats == True
|
self.args.giga_on_the_fly_feats == True
|
||||||
), "For GigaSpeech L/XL, you must use --on-the-fly-feats 1 as we do not pre-compute them by default."
|
), "For GigaSpeech L/XL, you must use --on-the-fly-feats 1 as we do not pre-compute them by default."
|
||||||
|
|
||||||
def train_dataloaders(self) -> DataLoader:
|
def train_dataloaders(self) -> DataLoader:
|
||||||
@ -183,19 +200,19 @@ class GigaSpeechAsrDataModule(DataModule):
|
|||||||
self.consumed_cuts = 0
|
self.consumed_cuts = 0
|
||||||
|
|
||||||
logging.info("About to get Musan cuts")
|
logging.info("About to get Musan cuts")
|
||||||
cuts_musan = load_manifest(self.args.feature_dir / 'cuts_musan.json.gz')
|
cuts_musan = load_manifest(self.args.giga_feature_dir / 'cuts_musan.json.gz')
|
||||||
|
|
||||||
logging.info("About to create train dataset")
|
logging.info("About to create train dataset")
|
||||||
transforms = [CutMix(cuts=cuts_musan, prob=0.5, snr=(10, 20))]
|
transforms = [CutMix(cuts=cuts_musan, prob=0.5, snr=(10, 20))]
|
||||||
if self.args.concatenate_cuts:
|
if self.args.giga_concatenate_cuts:
|
||||||
logging.info(f'Using cut concatenation with duration factor '
|
logging.info(f'Using cut concatenation with duration factor '
|
||||||
f'{self.args.duration_factor} and gap {self.args.gap}.')
|
f'{self.args.giga_duration_factor} and gap {self.args.giga_gap}.')
|
||||||
# Cut concatenation should be the first transform in the list,
|
# Cut concatenation should be the first transform in the list,
|
||||||
# so that if we e.g. mix noise in, it will fill the gaps between different utterances.
|
# so that if we e.g. mix noise in, it will fill the gaps between different utterances.
|
||||||
transforms = [
|
transforms = [
|
||||||
CutConcatenate(
|
CutConcatenate(
|
||||||
duration_factor=self.args.duration_factor,
|
duration_factor=self.args.giga_duration_factor,
|
||||||
gap=self.args.gap
|
gap=self.args.giga_gap
|
||||||
)
|
)
|
||||||
] + transforms
|
] + transforms
|
||||||
|
|
||||||
@ -203,10 +220,10 @@ class GigaSpeechAsrDataModule(DataModule):
|
|||||||
# cuts_train,
|
# cuts_train,
|
||||||
cut_transforms=transforms,
|
cut_transforms=transforms,
|
||||||
return_cuts=True,
|
return_cuts=True,
|
||||||
# check_inputs=self.args.check_cuts,
|
# check_inputs=self.args.giga_check_cuts,
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.args.on_the_fly_feats:
|
if self.args.giga_on_the_fly_feats:
|
||||||
# NOTE: the PerturbSpeed transform should be added only if we remove it from data prep stage.
|
# NOTE: the PerturbSpeed transform should be added only if we remove it from data prep stage.
|
||||||
# # Add on-the-fly speed perturbation; since originally it would have increased epoch
|
# # Add on-the-fly speed perturbation; since originally it would have increased epoch
|
||||||
# # size by 3, we will apply prob 2/3 and use 3x more epochs.
|
# # size by 3, we will apply prob 2/3 and use 3x more epochs.
|
||||||
@ -218,23 +235,23 @@ class GigaSpeechAsrDataModule(DataModule):
|
|||||||
cut_transforms=transforms,
|
cut_transforms=transforms,
|
||||||
input_strategy=OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80)), num_workers=20),
|
input_strategy=OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80)), num_workers=20),
|
||||||
return_cuts=True,
|
return_cuts=True,
|
||||||
# check_inputs=self.args.check_cuts,
|
# check_inputs=self.args.giga_check_cuts,
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.args.bucketing_sampler:
|
if self.args.giga_bucketing_sampler:
|
||||||
logging.info('Using BucketingSampler.')
|
logging.info('Using BucketingSampler.')
|
||||||
train_sampler = BucketingSampler(
|
train_sampler = BucketingSampler(
|
||||||
cuts_train,
|
cuts_train,
|
||||||
max_duration=self.args.max_duration,
|
max_duration=self.args.giga_max_duration,
|
||||||
shuffle=self.args.shuffle,
|
shuffle=self.args.giga_shuffle,
|
||||||
num_buckets=self.args.num_buckets
|
num_buckets=self.args.giga_num_buckets
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logging.info('Using SingleCutSampler.')
|
logging.info('Using SingleCutSampler.')
|
||||||
train_sampler = SingleCutSampler(
|
train_sampler = SingleCutSampler(
|
||||||
cuts_train,
|
cuts_train,
|
||||||
max_duration=self.args.max_duration,
|
max_duration=self.args.giga_max_duration,
|
||||||
shuffle=self.args.shuffle,
|
shuffle=self.args.giga_shuffle,
|
||||||
)
|
)
|
||||||
logging.info("About to create train dataloader")
|
logging.info("About to create train dataloader")
|
||||||
#train_dl = DataLoader(
|
#train_dl = DataLoader(
|
||||||
@ -261,32 +278,32 @@ class GigaSpeechAsrDataModule(DataModule):
|
|||||||
cuts_valid = self.valid_cuts()
|
cuts_valid = self.valid_cuts()
|
||||||
|
|
||||||
transforms = [ ]
|
transforms = [ ]
|
||||||
if self.args.concatenate_cuts:
|
if self.args.giga_concatenate_cuts:
|
||||||
transforms = [ CutConcatenate(
|
transforms = [ CutConcatenate(
|
||||||
duration_factor=self.args.duration_factor,
|
duration_factor=self.args.giga_duration_factor,
|
||||||
gap=self.args.gap)
|
gap=self.args.giga_gap)
|
||||||
] + transforms
|
] + transforms
|
||||||
|
|
||||||
|
|
||||||
logging.info("About to create dev dataset")
|
logging.info("About to create dev dataset")
|
||||||
if self.args.on_the_fly_feats:
|
if self.args.giga_on_the_fly_feats:
|
||||||
validate = K2SpeechRecognitionDataset(
|
validate = K2SpeechRecognitionDataset(
|
||||||
cuts_valid,
|
cuts_valid,
|
||||||
cut_transforms=transforms,
|
cut_transforms=transforms,
|
||||||
input_strategy=OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80)), num_workers=8),
|
input_strategy=OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80)), num_workers=8),
|
||||||
return_cuts=True,
|
return_cuts=True,
|
||||||
check_inputs=self.args.check_cuts,
|
check_inputs=self.args.giga_check_cuts,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
validate = K2SpeechRecognitionDataset(
|
validate = K2SpeechRecognitionDataset(
|
||||||
# cuts_valid,
|
# cuts_valid,
|
||||||
cut_transforms=transforms,
|
cut_transforms=transforms,
|
||||||
return_cuts=True,
|
return_cuts=True,
|
||||||
# check_inputs=self.args.check_cuts,
|
# check_inputs=self.args.giga_check_cuts,
|
||||||
)
|
)
|
||||||
valid_sampler = SingleCutSampler(
|
valid_sampler = SingleCutSampler(
|
||||||
cuts_valid,
|
cuts_valid,
|
||||||
max_duration=self.args.max_duration,
|
max_duration=self.args.giga_max_duration,
|
||||||
shuffle=False,
|
shuffle=False,
|
||||||
)
|
)
|
||||||
logging.info("About to create dev dataloader")
|
logging.info("About to create dev dataloader")
|
||||||
@ -318,13 +335,13 @@ class GigaSpeechAsrDataModule(DataModule):
|
|||||||
cuts_test,
|
cuts_test,
|
||||||
input_strategy=(
|
input_strategy=(
|
||||||
OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80)), num_workers=8)
|
OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80)), num_workers=8)
|
||||||
if self.args.on_the_fly_feats
|
if self.args.giga_on_the_fly_feats
|
||||||
else PrecomputedFeatures()
|
else PrecomputedFeatures()
|
||||||
),
|
),
|
||||||
return_cuts=True,
|
return_cuts=True,
|
||||||
check_inputs=self.args.check_cuts,
|
check_inputs=self.args.giga_check_cuts,
|
||||||
)
|
)
|
||||||
sampler = SingleCutSampler(cuts_test, max_duration=self.args.max_duration)
|
sampler = SingleCutSampler(cuts_test, max_duration=self.args.giga_max_duration)
|
||||||
logging.debug("About to create test dataloader")
|
logging.debug("About to create test dataloader")
|
||||||
#test_dl = DataLoader(test, batch_size=None, sampler=sampler, num_workers=1)
|
#test_dl = DataLoader(test, batch_size=None, sampler=sampler, num_workers=1)
|
||||||
test_dl = LhotseDataLoader(test, sampler=sampler, num_workers=2)
|
test_dl = LhotseDataLoader(test, sampler=sampler, num_workers=2)
|
||||||
@ -339,32 +356,32 @@ class GigaSpeechAsrDataModule(DataModule):
|
|||||||
def train_cuts(self) -> CutSet:
|
def train_cuts(self) -> CutSet:
|
||||||
logging.info("About to get train cuts")
|
logging.info("About to get train cuts")
|
||||||
# Note: for L and XL subsets, we are expecting that the training manifest is stored using pyarrow and pre-shuffled.
|
# Note: for L and XL subsets, we are expecting that the training manifest is stored using pyarrow and pre-shuffled.
|
||||||
cuts_path_ext = 'jsonl.gz' if self.args.subset not in ['L', 'XL'] else 'arrow'
|
cuts_path_ext = 'jsonl.gz' if self.args.giga_subset not in ['L', 'XL'] else 'arrow'
|
||||||
cuts_train = CutSet.from_file(
|
cuts_train = CutSet.from_file(
|
||||||
self.args.feature_dir
|
self.args.giga_feature_dir
|
||||||
/ f"gigaspeech_cuts_{self.args.subset}{get_context_suffix(self.args)}.{cuts_path_ext}"
|
/ f"gigaspeech_cuts_{self.args.giga_subset}{get_context_suffix(self.args)}.{cuts_path_ext}"
|
||||||
)
|
)
|
||||||
return cuts_train
|
return cuts_train
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def valid_cuts(self) -> CutSet:
|
def valid_cuts(self) -> CutSet:
|
||||||
if self.args.use_context_for_test:
|
if self.args.giga_use_context_for_test:
|
||||||
path = self.args.feature_dir / f"gigaspeech_cuts_DEV{get_context_suffix(self.args)}.jsonl.gz"
|
path = self.args.giga_feature_dir / f"gigaspeech_cuts_DEV{get_context_suffix(self.args)}.jsonl.gz"
|
||||||
else:
|
else:
|
||||||
path = self.args.feature_dir / f"gigaspeech_cuts_DEV.jsonl.gz"
|
path = self.args.giga_feature_dir / f"gigaspeech_cuts_DEV.jsonl.gz"
|
||||||
logging.info(f"About to get valid cuts from {path}")
|
logging.info(f"About to get valid cuts from {path}")
|
||||||
cuts_valid = load_manifest(path)
|
cuts_valid = load_manifest(path)
|
||||||
if self.args.small_dev:
|
if self.args.giga_small_dev:
|
||||||
return cuts_valid.subset(first=1000)
|
return cuts_valid.subset(first=1000)
|
||||||
else:
|
else:
|
||||||
return cuts_valid
|
return cuts_valid
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def test_cuts(self) -> CutSet:
|
def test_cuts(self) -> CutSet:
|
||||||
if self.args.use_context_for_test:
|
if self.args.giga_use_context_for_test:
|
||||||
path = self.args.feature_dir / f"gigaspeech_cuts_TEST{get_context_suffix(self.args)}.jsonl.gz"
|
path = self.args.giga_feature_dir / f"gigaspeech_cuts_TEST{get_context_suffix(self.args)}.jsonl.gz"
|
||||||
else:
|
else:
|
||||||
path = self.args.feature_dir / f"gigaspeech_cuts_TEST.jsonl.gz"
|
path = self.args.giga_feature_dir / f"gigaspeech_cuts_TEST.jsonl.gz"
|
||||||
logging.info(f"About to get test cuts from {path}")
|
logging.info(f"About to get test cuts from {path}")
|
||||||
cuts_test = load_manifest(path)
|
cuts_test = load_manifest(path)
|
||||||
return cuts_test
|
return cuts_test
|
||||||
|
28
egs/librispeech/ASR/example_giga_dataloader.py
Normal file
28
egs/librispeech/ASR/example_giga_dataloader.py
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from gigaspeech_datamodule import GigaSpeechAsrDataModule
|
||||||
|
|
||||||
|
def get_parser():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||||
|
)
|
||||||
|
group = parser.add_argument_group(title='libri related options')
|
||||||
|
group.add_argument(
|
||||||
|
'--max-duration',
|
||||||
|
type=int,
|
||||||
|
default=500.0,
|
||||||
|
help="Maximum pooled recordings duration (seconds) in a single batch.")
|
||||||
|
return parser
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = get_parser()
|
||||||
|
GigaSpeechAsrDataModule.add_arguments(parser)
|
||||||
|
args = parser.parse_args()
|
||||||
|
gigaspeech = GigaSpeechAsrDataModule(args)
|
||||||
|
train_dl = gigaspeech.inexhaustible_train_dataloaders()
|
||||||
|
for idx, batch in enumerate(train_dl):
|
||||||
|
print(batch["inputs"].shape)
|
||||||
|
print(len(batch["supervisions"]["text"]))
|
||||||
|
print(batch["supervisions"]["text"][0:2])
|
Loading…
x
Reference in New Issue
Block a user