From 326cb7503356f760b77339c8a83abbdab5968b04 Mon Sep 17 00:00:00 2001 From: Daniel Povey Date: Sun, 8 Jan 2023 15:48:23 +0800 Subject: [PATCH] Increase layer_skip_rate slightly --- egs/librispeech/ASR/pruned_transducer_stateless7/zipformer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless7/zipformer.py b/egs/librispeech/ASR/pruned_transducer_stateless7/zipformer.py index 5396b1895..aca34b568 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless7/zipformer.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless7/zipformer.py @@ -180,7 +180,7 @@ class Zipformer(EncoderInterface): # the model from relying too much on them at the end. (They tend to train # slowly, so don't increase their skip-rate at the beginning.) layer_skip_rate = (ScheduledFloat((0.0, 0.5), (4000.0, 0.0), default=0,) + - (0.025 * (downsampling_factor[i] ** 0.5))) + (0.03 * (downsampling_factor[i] ** 0.5))) encoder_layer = ZipformerEncoderLayer( embed_dim=encoder_dim[i], @@ -217,7 +217,7 @@ class Zipformer(EncoderInterface): ) # we are adding a new attribute here. # this will be interpreted by get_named_parameter_groups_with_lrs(). - encoder.lr_scale = downsampling_factor[i] ** -0.25 + encoder.lr_scale = downsampling_factor[i] ** -0.33 encoders.append(encoder) self.encoders = nn.ModuleList(encoders)