From f5e690d4c58c23eea54afe353b6e3d806494501d Mon Sep 17 00:00:00 2001 From: dohe0342 Date: Thu, 25 May 2023 21:05:41 +0900 Subject: [PATCH] from local --- .../.data2vec_audio.py.swp | Bin 40960 -> 40960 bytes .../.train_lora.py.swp | Bin 86016 -> 86016 bytes .../train_lora.py | 2 +- 3 files changed, 1 insertion(+), 1 deletion(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.data2vec_audio.py.swp b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.data2vec_audio.py.swp index b96febe5f8dc53c122e4bbf21a11114fa2f8a2c7..ffdb79811c6933495668181de172ea17aeaedb3d 100644 GIT binary patch delta 32 mcmZoTz|?SnNi@kI%+puFLeG!^2m}}yxcu`|gf@!4o(}+v!3b>t delta 32 mcmZoTz|?SnNi@kI%+puFLeG!^2m}}yF8k%DaBdWRJs$v)oe3-e diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.train_lora.py.swp b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.train_lora.py.swp index efcd80923437e788e362c8ee7e2b09210f1e5768..be2cc259f6eb7be6e6846a0942b7311481ea5ec8 100644 GIT binary patch delta 598 zcmXBRPbk9y6u|MfHR2Q!Ve$;l=qoVcJUijy2j z#!4Y4iuqG&Mtj>rx6v4BB1kPZzLT^ z@0_OOC5MpC5< zHk81E3_NK>ZgGMbjgg ioT_Q{{)AJdU0!HW^%(r2;Akjlw$__{{y={(qx=C@U|Vwl delta 598 zcmXBRO(??w7{~EvYcGr$qm`oml9#O3q)a4nlh+U_H5&(Gwb6EP*?*F7!U07|c9H|h zq7-tXn3qzHV&OF9gp;p4^?CX|J+Z7(@;}vqWyNj|B|F1q1ZZ;leDkixsFCK{cGP!-}6w zk=IO7B|+Fm3|p8-7(-}*1LY_~DN-3C4>-pTmNAYobfFOy$VJK|@{Tto5JwD)2x8Qv ziueefa6y3ykLe=!xW)n2F^d55kqaxn(?p(dg*e>kM-S>y1Pi{5BFBhh537j4uhRNp zM+p=d@oW&e#R-mZhz&&HLpR#ciYnNk!-t;t;1Xvz#SHvtMJ)>P$jk3xzM~ys4ReR) hCe5@qHrbT1y40Z=$SV$nL&0!JvDYj9z=X%mD8D~5Tq6Jg diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train_lora.py b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train_lora.py index d57a6fe06..60d25ee0e 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train_lora.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/train_lora.py @@ -141,7 +141,7 @@ class LoRAHook(): #print(input) #print(module, input, output[0].size()) lora_out = self.lora(input) - #output += lora_out + output += lora_out def save_checkpoint(self, i, iter_, save_dir): torch.save(self.lora.state_dict(), f"{save_dir}/lora_{iter_}_{i}.pt")