From 80d51efd154520a38e77f0568b4481c3515a15b9 Mon Sep 17 00:00:00 2001 From: Daniel Povey Date: Fri, 14 Oct 2022 23:29:55 +0800 Subject: [PATCH] Change cutoff for small_grad_norm --- egs/librispeech/ASR/pruned_transducer_stateless7/conformer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless7/conformer.py b/egs/librispeech/ASR/pruned_transducer_stateless7/conformer.py index 6183a50d4..c8de95cab 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless7/conformer.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless7/conformer.py @@ -844,7 +844,7 @@ class EntropyPenaltyFunction(torch.autograd.Function): assert entropy.shape == (num_heads,) excess_entropy = (entropy - entropy_limit).relu() above_cutoff = (entropy > 0) # tensor of shape (num_heads,) - small_grad_norm = (grad_norms < 0.5 * grad_norms.mean()) + small_grad_norm = (grad_norms < grad_norms.mean()) will_penalize = torch.logical_and(above_cutoff, small_grad_norm) if random.random() < 0.005 or __name__ == "__main__": logging.info(f"entropy = {entropy}, entropy_limit={entropy_limit}, above_cutoff={above_cutoff}, small_grad_norm={small_grad_norm}, will_penalize={will_penalize}")