diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.train_adapter.py.swp b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.train_adapter.py.swp index 695b64d9f..3361d30e6 100644 Binary files a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.train_adapter.py.swp and b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.train_adapter.py.swp differ diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train_adapter.py b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train_adapter.py index ef48c720e..9e7accc4e 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train_adapter.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train_adapter.py @@ -1055,7 +1055,8 @@ def train_one_epoch( is_training=True, decode = True if batch_idx % params.decode_interval == 0 else False, ) - loss_info.reduce(loss.device) + try: loss_info.reduce(loss.device) + except: pass numel = params.world_size / (params.accum_grads * loss_info["utterances"]) loss *= numel ## normalize loss over utts(batch size)