leave the old code in comments for reference

This commit is contained in:
jinzr 2024-01-08 14:24:55 +08:00
parent 9c29aaa61c
commit b5dfe244be
8 changed files with 64 additions and 0 deletions

View File

@ -954,6 +954,15 @@ def run(rank, world_size, args):
if params.full_libri:
train_cuts = librispeech.train_all_shuf_cuts()
# previously we used the following code to load all training cuts
# strictly speaking, shuffled training cuts should be used instead
# but we leave the code here to demonstrate that there is an option
# like this to combine multiple cutsets
# train_cuts = librispeech.train_clean_100_cuts()
# train_cuts += librispeech.train_clean_360_cuts()
# train_cuts += librispeech.train_other_500_cuts()
else:
train_cuts = librispeech.train_clean_100_cuts()

View File

@ -774,6 +774,15 @@ def run(rank, world_size, args):
if params.full_libri:
train_cuts = librispeech.train_all_shuf_cuts()
# previously we used the following code to load all training cuts,
# strictly speaking, shuffled training cuts should be used instead,
# but we leave the code here to demonstrate that there is an option
# like this to combine multiple cutsets
# train_cuts = librispeech.train_clean_100_cuts()
# train_cuts += librispeech.train_clean_360_cuts()
# train_cuts += librispeech.train_other_500_cuts()
else:
train_cuts = librispeech.train_clean_100_cuts()

View File

@ -991,6 +991,15 @@ def run(rank, world_size, args):
if params.full_libri:
train_cuts = librispeech.train_all_shuf_cuts()
# previously we used the following code to load all training cuts,
# strictly speaking, shuffled training cuts should be used instead,
# but we leave the code here to demonstrate that there is an option
# like this to combine multiple cutsets
# train_cuts = librispeech.train_clean_100_cuts()
# train_cuts += librispeech.train_clean_360_cuts()
# train_cuts += librispeech.train_other_500_cuts()
else:
train_cuts = librispeech.train_clean_100_cuts()

View File

@ -819,6 +819,15 @@ def run(rank, world_size, args):
if params.full_libri:
train_cuts = librispeech.train_all_shuf_cuts()
# previously we used the following code to load all training cuts,
# strictly speaking, shuffled training cuts should be used instead,
# but we leave the code here to demonstrate that there is an option
# like this to combine multiple cutsets
# train_cuts = librispeech.train_clean_100_cuts()
# train_cuts += librispeech.train_clean_360_cuts()
# train_cuts += librispeech.train_other_500_cuts()
else:
train_cuts = librispeech.train_clean_100_cuts()

View File

@ -1047,6 +1047,15 @@ def run(rank, world_size, args):
else:
if params.full_libri:
train_cuts = librispeech.train_all_shuf_cuts()
# previously we used the following code to load all training cuts,
# strictly speaking, shuffled training cuts should be used instead,
# but we leave the code here to demonstrate that there is an option
# like this to combine multiple cutsets
# train_cuts = librispeech.train_clean_100_cuts()
# train_cuts += librispeech.train_clean_360_cuts()
# train_cuts += librispeech.train_other_500_cuts()
else:
train_cuts = librispeech.train_clean_100_cuts()

View File

@ -1152,6 +1152,11 @@ def run(rank, world_size, args):
if params.full_libri:
train_cuts = librispeech.train_all_shuf_cuts()
# previously we used the following code to load all training cuts,
# strictly speaking, shuffled training cuts should be used instead,
# but we leave the code here to demonstrate that there is an option
# like this to combine multiple cutsets
else:
train_cuts = librispeech.train_clean_100_cuts()

View File

@ -1176,6 +1176,15 @@ def run(rank, world_size, args):
if params.full_libri:
train_cuts = librispeech.train_all_shuf_cuts()
# previously we used the following code to load all training cuts,
# strictly speaking, shuffled training cuts should be used instead,
# but we leave the code here to demonstrate that there is an option
# like this to combine multiple cutsets
# train_cuts = librispeech.train_clean_100_cuts()
# train_cuts += librispeech.train_clean_360_cuts()
# train_cuts += librispeech.train_other_500_cuts()
else:
train_cuts = librispeech.train_clean_100_cuts()

View File

@ -992,6 +992,11 @@ def run(rank, world_size, args):
if params.full_libri:
train_cuts = librispeech.train_all_shuf_cuts()
# previously we used the following code to load all training cuts,
# strictly speaking, shuffled training cuts should be used instead,
# but we leave the code here to demonstrate that there is an option
# like this to combine multiple cutsets
else:
train_cuts = librispeech.train_clean_100_cuts()