mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-26 18:24:18 +00:00
complete exp on zipformer-L
This commit is contained in:
parent
5d94a19026
commit
860a6b27fa
@ -1 +1 @@
|
|||||||
export PYTHONPATH=/var/data/share20/qc/k2/Github/icefall:$PYTHONPATH
|
export PYTHONPATH=/root/k2/Github/icefall:$PYTHONPATH
|
||||||
|
@ -1,15 +1,20 @@
|
|||||||
num_epochs=30
|
num_epochs=40
|
||||||
for ((i=$num_epochs; i>=15; i--));
|
for ((i=$num_epochs; i>=1; i--));
|
||||||
do
|
do
|
||||||
for ((j=1; j<=$i; j++));
|
for ((j=1; j<=$i; j++));
|
||||||
do
|
do
|
||||||
python3 ./zipformer/decode.py \
|
python3 ./zipformer/decode.py \
|
||||||
--epoch $i \
|
--epoch $i \
|
||||||
--avg $j \
|
--avg $j \
|
||||||
--exp-dir zipformer/exp \
|
--exp-dir zipformer/exp-large \
|
||||||
--max-duration 450 \
|
--max-duration 600 \
|
||||||
--lang data/lang_char \
|
--causal 0 \
|
||||||
--decoding-method greedy_search \
|
--decoding-method greedy_search \
|
||||||
--blank-penalty 2
|
--num-encoder-layers 2,2,4,5,4,2 \
|
||||||
|
--feedforward-dim 512,768,1536,2048,1536,768 \
|
||||||
|
--encoder-dim 192,256,512,768,512,256 \
|
||||||
|
--encoder-unmasked-dim 192,192,256,320,256,192 \
|
||||||
|
--lang data/lang_char \
|
||||||
|
--blank-penalty 0
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
@ -1,15 +1,20 @@
|
|||||||
num_epochs=30
|
num_epochs=60
|
||||||
for ((i=$num_epochs; i>=20; i--));
|
for ((i=$num_epochs; i>=40; i--));
|
||||||
do
|
do
|
||||||
for avg in 12 11 10 9 8 7 6 5;
|
for ((j=1; j<=$i; j++));
|
||||||
do
|
do
|
||||||
python3 ./zipformer/decode.py \
|
python3 ./zipformer/decode.py \
|
||||||
--epoch $i \
|
--epoch $i \
|
||||||
--avg $avg \
|
--avg $j \
|
||||||
--exp-dir zipformer/exp \
|
--exp-dir zipformer/exp-large \
|
||||||
--max-duration 450 \
|
--max-duration 600 \
|
||||||
--lang data/lang_char \
|
--causal 0 \
|
||||||
--decoding-method modified_beam_search \
|
--decoding-method modified_beam_search \
|
||||||
--blank-penalty 2.5
|
--num-encoder-layers 2,2,4,5,4,2 \
|
||||||
|
--feedforward-dim 512,768,1536,2048,1536,768 \
|
||||||
|
--encoder-dim 192,256,512,768,512,256 \
|
||||||
|
--encoder-unmasked-dim 192,192,256,320,256,192 \
|
||||||
|
--lang data/lang_char \
|
||||||
|
--blank-penalty 0
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
@ -328,7 +328,7 @@ def get_parser():
|
|||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--base-lr", type=float, default=0.035, help="The base learning rate."
|
"--base-lr", type=float, default=0.015, help="The base learning rate."
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@ -1069,6 +1069,9 @@ def train_one_epoch(
|
|||||||
tb_writer, "train/valid_", params.batch_idx_train
|
tb_writer, "train/valid_", params.batch_idx_train
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# print('--------------------debug------------------')
|
||||||
|
# print(tot_loss)
|
||||||
|
# print(tot_loss["frames"])
|
||||||
loss_value = tot_loss["loss"] / tot_loss["frames"]
|
loss_value = tot_loss["loss"] / tot_loss["frames"]
|
||||||
params.train_loss = loss_value
|
params.train_loss = loss_value
|
||||||
if params.train_loss < params.best_train_loss:
|
if params.train_loss < params.best_train_loss:
|
||||||
@ -1179,7 +1182,7 @@ def run(rank, world_size, args):
|
|||||||
# You should use ../local/display_manifest_statistics.py to get
|
# You should use ../local/display_manifest_statistics.py to get
|
||||||
# an utterance duration distribution for your dataset to select
|
# an utterance duration distribution for your dataset to select
|
||||||
# the threshold
|
# the threshold
|
||||||
if c.duration < 0.3 or c.duration > 30.0:
|
if c.duration < 1.0 or c.duration > 30.0:
|
||||||
# logging.warning(
|
# logging.warning(
|
||||||
# f"Exclude cut with ID {c.id} from training. Duration: {c.duration}"
|
# f"Exclude cut with ID {c.id} from training. Duration: {c.duration}"
|
||||||
# )
|
# )
|
||||||
|
Loading…
x
Reference in New Issue
Block a user