From 4929e4cf32f93860aad273223c86a6dc98d611df Mon Sep 17 00:00:00 2001 From: Daniel Povey Date: Mon, 4 Apr 2022 17:09:25 +0800 Subject: [PATCH] Change how warm-step is set --- .../ASR/pruned_transducer_stateless2/train.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/train.py b/egs/librispeech/ASR/pruned_transducer_stateless2/train.py index e8fbb6a71..bf7f23fab 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/train.py @@ -147,6 +147,13 @@ def get_parser(): help="The lr_factor for Noam optimizer", ) + parser.add_argument( + "--warm-step", + type=float, + default=60000, + help="The number of warmup steps for the (modified) Noam optimizer", + ) + parser.add_argument( "--context-size", type=int, @@ -296,7 +303,6 @@ def get_params() -> AttributeDict: # parameters for joiner "joiner_dim": 512, # parameters for Noam - "warm_step": 60000, # For the 100h subset, use 8k "model_warm_step": 4000, # arg given to model, not for lrate "env_info": get_env_info(), } @@ -709,7 +715,6 @@ def run(rank, world_size, args): params.update(vars(args)) if params.full_libri is False: params.valid_interval = 1600 - params.warm_step = 30000 fix_random_seed(params.seed) if world_size > 1: