diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.full_ft.py.swp b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.full_ft.py.swp index b7f87a1e5..b8410311b 100644 Binary files a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.full_ft.py.swp and b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.full_ft.py.swp differ diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/full_ft.py b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/full_ft.py index 0ecf7b37e..7522912d2 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/full_ft.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/full_ft.py @@ -1206,13 +1206,13 @@ def train_one_epoch( wb.log({"valid/simple_loss": valid_info["simple_loss"]*numel}) wb.log({"valid/pruned_loss": valid_info["pruned_loss"]*numel}) wb.log({"valid/ctc_loss": valid_info["ctc_loss"]*numel}) + ''' loss_value = tot_loss["loss"] / tot_loss["utterances"] params.train_loss = loss_value if params.train_loss < params.best_train_loss: params.best_train_epoch = params.cur_epoch params.best_train_loss = params.train_loss - ''' def run(rank, world_size, args, wb=None): """