From 1a91afa118fad52c46542bbbcec5f6a10bfb7838 Mon Sep 17 00:00:00 2001 From: dohe0342 Date: Sat, 21 Jan 2023 15:33:21 +0900 Subject: [PATCH] from local --- .../ASR/conformer_ctc2/.conformer.py.swp | Bin 61440 -> 65536 bytes .../ASR/conformer_ctc2/conformer.py | 16 ++++++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/egs/librispeech/ASR/conformer_ctc2/.conformer.py.swp b/egs/librispeech/ASR/conformer_ctc2/.conformer.py.swp index f9f11eb4d9867a411eb7b411e93f5365a66c6e96..aa55848ac7774e514b7f11df469c5de3e4022ebb 100644 GIT binary patch delta 1125 zcmZ|NUr19?0D$o`o!fMqcSDvz58lx-6P>e+@E@6CVGv}3U{F@-#59XiBGNX91m{E6 zrJxrhC^9n1uBjzBvO+C;5KNFXjHonPW&}kD^-Z_lI`G@M=ib52`R=@PkzTLXcEnX$ zv`5Z#x&*@_#C*%kimN@gfF#8V54R$M_ZbC>)e^r+T3rZ7Xf6!Rn$ThLRMsKVP}xmz4ef?eR5k`BYV%+xf48Y^Ok$ z((ZQ{!im&g|6bWM&byo;N$|DjhI05hekZQOgF@sW8woH#4;|i;VhX`bPNZTzrb*C> zbo?Q~ARgl`Dv$}IiU02`IbLHD!{|i^e5k@M*s)Ba0Q%8~HuzABqgVqQK9j8vEvUtA zMBo?Me&QM0aR`M5J{*|Qs~YWuPMLjTQpV>cW$wWzHFlv|r_?QGCxwIkC#ncPS8gws zc@9?!k;XROAlTFqYPv+LQQSZiijaahe598OFDj4)7t)}^1P$+@90y^+B>g(z#ufxf i^u;JPiaT6%;tVQLj0_||iw`TC)*@P8g*jc1jtXT6RWzmhG6bxpi)J=hb(L0L@sf+C@$}ZM= zqKlNg3cZM4I2(r6mI#4lZ#UJ2R!~EGoghYCgnh^Mz~`LjJP+sa|NYOCN?B5$Y689C zo?S7$ZEIKBTH8Hd6tM9}~%-djcP;0(GE!g>Vp$09O?VRRu5GoICmT*H1e z;v>aZs%hRpaI~kt{OkgcC)tNDf&HBZGdlqX~;1kuP|QXjZUmMMV9aeqd1KS0$8Nj5^~6*T>OkN{b0bZ(!aLZ^~x~z zK8o1Xm3&w|$!}1_{7%(e*sU^!dLvti>q#rAJ-k@{d(mp@0x3r^f|EFbINH#P?a*My zJrWjkU!(?&UIT&u`{km+-EgZ3_SSg}LN(cS} D2bY$c diff --git a/egs/librispeech/ASR/conformer_ctc2/conformer.py b/egs/librispeech/ASR/conformer_ctc2/conformer.py index 6a3739841..49b45ec7f 100644 --- a/egs/librispeech/ASR/conformer_ctc2/conformer.py +++ b/egs/librispeech/ASR/conformer_ctc2/conformer.py @@ -64,6 +64,7 @@ class Conformer(Transformer): dropout: float = 0.1, layer_dropout: float = 0.075, cnn_module_kernel: int = 31, + group_num: int = 0, ) -> None: super(Conformer, self).__init__( num_features=num_features, @@ -101,12 +102,13 @@ class Conformer(Transformer): cnn_module_kernel, ) self.encoder = ConformerEncoder(encoder_layer, num_encoder_layers) - + self.group_num = group_num - self.group_layer_num = int(self.encoder_layers // self.group_num) - self.alpha = nn.Parameter(torch.rand(self.group_num)) - self.sigmoid = nn.Sigmoid() - self.layer_norm = nn.LayerNorm(d_model) + if self.group_num != 0: + self.group_layer_num = int(self.encoder_layers // self.group_num) + self.alpha = nn.Parameter(torch.rand(self.group_num)) + self.sigmoid = nn.Sigmoid() + self.layer_norm = nn.LayerNorm(d_model) def run_encoder( self, @@ -146,8 +148,10 @@ class Conformer(Transformer): x, layer_outputs = self.encoder( x, pos_emb, src_key_padding_mask=mask, warmup=warmup ) # (T, N, C) - + + if self.group_num != 0: # x = x.permute(1, 0, 2) # (T, N, C) ->(N, T, C) + # return x, lengths return x, mask