diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.train.py.swp b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.train.py.swp index 540bfab81..d43475eac 100644 Binary files a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.train.py.swp and b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.train.py.swp differ diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train.py b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train.py index 5f51cc601..b6d3135de 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train.py @@ -1022,7 +1022,7 @@ def train_one_epoch( # in the batch and there is no normalization to it so far. scaler.scale(loss).backward() - if params.multi_optim and batch_idx % params.accum_grads == 0: + if params.multi_optim and (batch_idx+1) % params.accum_grads == 0: set_batch_count(model, params.batch_idx_train) scheduler_enc.step_batch(params.batch_idx_train) scheduler_dec.step_batch(params.batch_idx_train) @@ -1031,7 +1031,7 @@ def train_one_epoch( scaler.update() optimizer_enc.zero_grad() optimizer_dec.zero_grad() - elif not params.multi_optim and batch_idx % params.accum_grads == 0: + elif not params.multi_optim and (batch_idx+1) % params.accum_grads == 0: set_batch_count(model, params.batch_idx_train) scheduler.step_batch(params.batch_idx_train) scaler.step(optimizer)