diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.decode.py.swp b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.decode.py.swp index 02cf1418b..8cc211d7e 100644 Binary files a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.decode.py.swp and b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.decode.py.swp differ diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/decode.py index 5338f14aa..194b98645 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/decode.py @@ -687,6 +687,17 @@ def main(): load_checkpoint(f"{params.exp_dir}/{params.model_name}", model) elif 'lora' in params.model_name: load_checkpoint(f"{params.exp_dir}/../d2v-base-T.pt", model) + + ## for lora hooking + lora_modules = [] + for modules in model.modules(): + if isinstance(modules, fairseq.modules.multihead_attention.MultiheadAttention): + for module in modules.modules(): + if isinstance(module, torch.nn.Linear): + lora_modules.append(LoRAHook(module)) + + for i, lora in enuemrate(lora_modules): + else: if not params.use_averaged_model: if params.iter > 0: