Fix bug RE self.training

This commit is contained in:
Daniel Povey 2022-10-05 15:34:39 +08:00
parent 81542832bf
commit 61f62837fa

View File

@ -348,7 +348,7 @@ class ConformerEncoder(nn.Module):
def get_random_mask():
# 1.0 means don't drop the layer, 0.0 means drop the layer
mask = torch.ones(num_layers, device='cpu')
if self.training:
if not self.training:
return mask
r = rng.random()
if r < 0.1: