complete exp on zipformer-L

This commit is contained in:
root 2024-03-25 05:36:59 +09:00
parent 5d94a19026
commit 860a6b27fa
4 changed files with 30 additions and 17 deletions

View File

@ -1 +1 @@
export PYTHONPATH=/var/data/share20/qc/k2/Github/icefall:$PYTHONPATH
export PYTHONPATH=/root/k2/Github/icefall:$PYTHONPATH

View File

@ -1,15 +1,20 @@
num_epochs=30
for ((i=$num_epochs; i>=15; i--));
num_epochs=40
for ((i=$num_epochs; i>=1; i--));
do
for ((j=1; j<=$i; j++));
do
python3 ./zipformer/decode.py \
--epoch $i \
--avg $j \
--exp-dir zipformer/exp \
--max-duration 450 \
--lang data/lang_char \
--exp-dir zipformer/exp-large \
--max-duration 600 \
--causal 0 \
--decoding-method greedy_search \
--blank-penalty 2
--num-encoder-layers 2,2,4,5,4,2 \
--feedforward-dim 512,768,1536,2048,1536,768 \
--encoder-dim 192,256,512,768,512,256 \
--encoder-unmasked-dim 192,192,256,320,256,192 \
--lang data/lang_char \
--blank-penalty 0
done
done

View File

@ -1,15 +1,20 @@
num_epochs=30
for ((i=$num_epochs; i>=20; i--));
num_epochs=60
for ((i=$num_epochs; i>=40; i--));
do
for avg in 12 11 10 9 8 7 6 5;
for ((j=1; j<=$i; j++));
do
python3 ./zipformer/decode.py \
--epoch $i \
--avg $avg \
--exp-dir zipformer/exp \
--max-duration 450 \
--lang data/lang_char \
--avg $j \
--exp-dir zipformer/exp-large \
--max-duration 600 \
--causal 0 \
--decoding-method modified_beam_search \
--blank-penalty 2.5
--num-encoder-layers 2,2,4,5,4,2 \
--feedforward-dim 512,768,1536,2048,1536,768 \
--encoder-dim 192,256,512,768,512,256 \
--encoder-unmasked-dim 192,192,256,320,256,192 \
--lang data/lang_char \
--blank-penalty 0
done
done

View File

@ -328,7 +328,7 @@ def get_parser():
)
parser.add_argument(
"--base-lr", type=float, default=0.035, help="The base learning rate."
"--base-lr", type=float, default=0.015, help="The base learning rate."
)
parser.add_argument(
@ -1069,6 +1069,9 @@ def train_one_epoch(
tb_writer, "train/valid_", params.batch_idx_train
)
# print('--------------------debug------------------')
# print(tot_loss)
# print(tot_loss["frames"])
loss_value = tot_loss["loss"] / tot_loss["frames"]
params.train_loss = loss_value
if params.train_loss < params.best_train_loss:
@ -1179,7 +1182,7 @@ def run(rank, world_size, args):
# You should use ../local/display_manifest_statistics.py to get
# an utterance duration distribution for your dataset to select
# the threshold
if c.duration < 0.3 or c.duration > 30.0:
if c.duration < 1.0 or c.duration > 30.0:
# logging.warning(
# f"Exclude cut with ID {c.id} from training. Duration: {c.duration}"
# )