From 9e21e1f0e1e3a94da8936225ee7d54fac658fe93 Mon Sep 17 00:00:00 2001 From: dohe0342 Date: Mon, 9 Jan 2023 19:39:39 +0900 Subject: [PATCH] from local --- .../ASR/incremental_transf/.conformer.py.swp | Bin 110592 -> 110592 bytes .../ASR/incremental_transf/conformer.py | 36 ++++++++---------- 2 files changed, 15 insertions(+), 21 deletions(-) diff --git a/egs/librispeech/ASR/incremental_transf/.conformer.py.swp b/egs/librispeech/ASR/incremental_transf/.conformer.py.swp index db2caf2b96219919e971eae1df01467a2d4726c5..d619f0b5057873b000d5ffc50af4e11a2f1c64ae 100644 GIT binary patch delta 535 zcmXxfJxE(o6bJBgqCOKP;rS6E3D)PLQKKj*tqvlh6tONM=^{L0-U|kky!K%brwDdw zJE#Oa7{ozD+e|G8DK#!Gf|H$EaWIRHqFd?IfBd-cyZ79CE}U~#O3F$}@y_*zC#JN) zo?gOIiP~D%}()Wp*BW_OOA(vs!Y=b)ao zhin1a%?cS>;ybx4ot!L!#7P|w&VYtCR0K^F#MFxWJ#7{)1<8O)~)nPCILBKW}xY7vAP z4TG^)(6C^%It4|BV6X@S{{S!WhF`nO)f~9yz~!pN%4WH+x>OVkrQqj&XS10JYna0;U3b_+$;W5* TV7`~wci=zr)^+~r%1Y@!9?v&M diff --git a/egs/librispeech/ASR/incremental_transf/conformer.py b/egs/librispeech/ASR/incremental_transf/conformer.py index b6ff0a156..b2478e46b 100644 --- a/egs/librispeech/ASR/incremental_transf/conformer.py +++ b/egs/librispeech/ASR/incremental_transf/conformer.py @@ -480,27 +480,21 @@ class Tempformer(EncoderInterface): self.short_chunk_size = short_chunk_size self.num_left_chunks = num_left_chunks - encoder_layer = ConformerEncoderLayer( - d_model=d_model, - nhead=nhead, - dim_feedforward=dim_feedforward, - dropout=dropout, - layer_dropout=layer_dropout, - cnn_module_kernel=cnn_module_kernel, - causal=causal, - ) - # aux_layers from 1/3 - self.encoder = ConformerEncoder( - encoder_layer=encoder_layer, - num_layers=num_encoder_layers, - aux_layers=list( - range( - num_encoder_layers // 3, - num_encoder_layers - 1, - aux_layer_period, - ) - ), - ) + def build_conformer(d_model, nhead, dim_feedforward, dropout, layer_dropout, cnn_module_kernel, causal): + encoder_layer = ConformerEncoderLayer( + d_model=d_model, + nhead=nhead, + dim_feedforward=dim_feedforward, + dropout=dropout, + layer_dropout=layer_dropout, + cnn_module_kernel=cnn_module_kernel, + causal=causal, + ) + return encoder_layer + + + self.encoder_layers = nn.ModuleList( + self._init_state: List[torch.Tensor] = [torch.empty(0)] def forward(