From 45ce951b8c204f6595cdcd2071016ac9d62ce1bb Mon Sep 17 00:00:00 2001 From: dohe0342 Date: Sat, 10 Dec 2022 13:28:43 +0900 Subject: [PATCH] from local --- .../.train.py.swp | Bin 69632 -> 73728 bytes .../train.py | 33 +++++++++--------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.train.py.swp b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.train.py.swp index 0ace3e9a123e14bfe6b2c756d779d4cc380f223f..d13a7bd4153fe1605c8d935031d72fa8b370c994 100644 GIT binary patch delta 1030 zcmZ|OT}V@57{Kx8Y}1@y+ngf`%`rbhwB=V9UMPe{^r0wG5f-@8loE-dH)hPpDbkPSnMf;%W!ulW7%O3c>&wHK&@B1F!=XCU%I>tc|ur44#)aW^xbtVSQM*jv}R`8^g3Z#O>%lID{DD+J*)ZFG^I_ewsu>t zI89OI)QdE;Q&UQg8AZyGfh7Dihzufx+qeWTYEgvW>ua-wQ!Q{68=Ow9z?H8Uz4d3b z#`K={guz6SAJ9)6tJ->VQuU_^+`~oWA`6*F(T? zFsZ??Rh_#PtJ%ZLRlaKWKgc=t#oI`FeCGQnF#g}lTh?x6== zXvIzlX3Qcl@D$^CfktmTlyu(|JA_zauV>ebzBFmV^S4`m{Lhzyz zR?Kp>lX!*++`(0JLcjJc6%3nTK!kqmM<03-FkGIhSBq{YoAqYWqb_jWpi~8d1|=zP zwL-1_q^Gs1rNmX|Y4L1t`cK)@9JpXmwyH~R({RY3%$SBVW0XRpx)O9~HK75cx)j=| zx9O?q(6MloO3vE{xkFIoYXo8v_O5y z(Y*(P6~!e&=^`OlZ$tB=z;~bW8FoG5bXoh9ZXD7JRF~PJ-|+R!1#bnlj2gCMrZZ@& z*m1i^4MNbdWD^<12!_#(^EinZHrsk;ccm-yRiiTd!ijdTbA?D8Rtvh3Jy4W4&*~$_ z@c=0_!#d?Lj}5%?s*q8)smz!!4jAuRT}JUtrek-a;ap?47#-+MMW4*Bu@*AV+4a=+QaPI$ None: module.batch_count = batch_count +def add_rep_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--num-encoder-layers", + type=str, + default="2,4,3,2,4", + help="Number of zipformer encoder layers, comma separated.", + ) + + parser.add_argument( + "--feedforward-dims", + type=str, + default="1024,1024,2048,2048,1024", + help="Feedforward dimension of the zipformer encoder layers, comma separated.", + ) + def add_model_arguments(parser: argparse.ArgumentParser): parser.add_argument( "--num-encoder-layers", @@ -124,14 +139,7 @@ def add_model_arguments(parser: argparse.ArgumentParser): default="8,8,8,8,8", help="Number of attention heads in the zipformer encoder layers.", ) - - parser.add_argument( - "--encoder-dim", - type=int, - default=768, - help="Encoder embedding dimension", - ) - ''' + parser.add_argument( "--encoder-dims", type=str, @@ -169,7 +177,7 @@ def add_model_arguments(parser: argparse.ArgumentParser): default="31,31,31,31,31", help="Sizes of kernels in convolution modules", ) - ''' + parser.add_argument( "--decoder-dim", type=int, @@ -206,13 +214,6 @@ def get_parser(): default=12354, help="Master port to use for DDP training.", ) - - parser.add_argument( - "--wandb", - type=str2bool, - default=False, - help="Should various information be logged in wandb.", - ) parser.add_argument( "--tensorboard",