From 9a2a58e20da8b26c6d11938f83b797b29553a6f1 Mon Sep 17 00:00:00 2001 From: Daniel Povey Date: Fri, 2 Dec 2022 19:12:18 +0800 Subject: [PATCH] Fix bug one versus zero --- egs/librispeech/ASR/pruned_transducer_stateless7/scaling.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless7/scaling.py b/egs/librispeech/ASR/pruned_transducer_stateless7/scaling.py index 936a77b8c..52eef52da 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless7/scaling.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless7/scaling.py @@ -1229,13 +1229,13 @@ class SwooshFunction(torch.autograd.Function): if x.dtype == torch.float16: x = x.to(torch.float32) - one = torch.tensor(1.0, dtype=x.dtype, device=x.device) + zero = torch.tensor(0.0, dtype=x.dtype, device=x.device) with torch.cuda.amp.autocast(enabled=False): with torch.enable_grad(): x = x.detach() x.requires_grad = True - y = torch.logaddexp(one, x - 1.125) - 0.08 * x - 0.3 + y = torch.logaddexp(zero, x - 1.125) - 0.08 * x - 0.3 if not requires_grad: return y