From 69717980dda6c2300e36361fa3351eeb7ba51c5c Mon Sep 17 00:00:00 2001 From: dohe0342 Date: Sat, 10 Dec 2022 15:26:52 +0900 Subject: [PATCH] from local --- .../.train.py.swp | Bin 110592 -> 110592 bytes .../train.py | 10 +++++----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.train.py.swp b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.train.py.swp index 5f5c059d434556198d77ce6da31dfd927d2350b1..60faec18ea5471ebf5b9c7a540db4cdc42e602cb 100644 GIT binary patch delta 369 zcmZ|Jze>YU7zOZiH7056-$+F(Xz5TamDmm)?BXav>LPVgq*e#P(%Ksk95m?S(&;Jz zpTVUD%-|!qxTuRlTpYEoPM!y-AN=5O&hmMY?nQcXx3K9{(#v)Mym8<(dsXjtliyAK zRshjJ5N}P1(ceMfhRVdwKDraCCFj+u%vwvWTD#Ve_Hna)xVIw@PWDwpuJ^Aq>Rm4W zH|5>AKGTKchK?&95<^7;DETo@)X!2Qz{fCuq7AZWf?g6pKps^nMKd%_Uvc1*-sp)G zRmmdIYYcd#YwD1sD&=T_4CTU0Uwp`{IfWlfm=Y*~z{o~?zsOYx(L KVwmrU=gc2Z7+YKb delta 309 zcmYkwF-yZ>7zE&Z+n9byst8iGi&UW^BBhhyAPyQNXu(Nv)a)8#a8e8uK}($?d_O?Y zSvv%&h?6cZE{-K*HysMD-XeJLz}>}}8E0l`ufFXyDoeFGm{H((_S)aQFMS=D0|Dmp z9XlA8k)i=S=YfFu)eav-?_bKgAjv{kZ?(lq`r$;fbF`p)f8+80C`VIGFG{^gy3Wva z0jO##7XQp120n^Fi&iN|Laz>RLjf(5O%^HY*}x~g(+f4pr5t@*z#DbxoKDFnm&!Cr pKMKg`PGJnUEFm>$ld7sCMx~%vHbkY17cww(|3v1~wJUj=`31*CL|^~_ diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train.py b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train.py index 46e8f0067..28895ead9 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train.py @@ -1105,10 +1105,10 @@ def train_one_epoch( ) if wb is not None and rank == 0: - wb.log({"train/loss": loss_info["loss"]*numel}) - wb.log({"train/simple_loss": loss_info["simple_loss"]*numel}) - wb.log({"train/pruned_loss": loss_info["pruned_loss"]*numel}) - wb.log({"train/ctc_loss": loss_info["ctc_loss"]*numel}) + wb.log({"train/loss": loss_info["loss"]*numel/params.world_size}) + wb.log({"train/simple_loss": loss_info["simple_loss"]*numel/params.world_size}) + wb.log({"train/pruned_loss": loss_info["pruned_loss"]*numel/params.world_size}) + wb.log({"train/ctc_loss": loss_info["ctc_loss"]*numel/params.world_size}) #if batch_idx % params.valid_interval == 0 and not params.print_diagnostics: logging.info("Computing validation loss") @@ -1130,7 +1130,7 @@ def train_one_epoch( ) if wb is not None and rank == 0: - numel = params.world_size / (params.accum_grads * valid_info["utterances"]) + numel = 1 / (params.accum_grads * valid_info["utterances"]) wb.log({"valid/loss": valid_info["loss"]*numel}) wb.log({"valid/simple_loss": valid_info["simple_loss"]*numel}) wb.log({"valid/pruned_loss": valid_info["pruned_loss"]*numel})