diff --git a/egs/librispeech/ASR/.lastlayer_all.sh.swp b/egs/librispeech/ASR/.lastlayer_all.sh.swp index 86e5b9297..575cc786c 100644 Binary files a/egs/librispeech/ASR/.lastlayer_all.sh.swp and b/egs/librispeech/ASR/.lastlayer_all.sh.swp differ diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.last_layer.py.swp b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.last_layer.py.swp index f8c8e6734..345620068 100644 Binary files a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.last_layer.py.swp and b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/.last_layer.py.swp differ diff --git a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/last_layer.py b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/last_layer.py index a5c5835b9..885a2be00 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/last_layer.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless_d2v_v2/last_layer.py @@ -1587,7 +1587,12 @@ def run_adapter(rank, world_size, args, wb=None): adapter_names = [] adapter_param = [] for enum, (n, p) in enumerate(model.named_parameters()): - print(enum, n, p) + #print(enum, n, p) + if 'encoder.encoders.layer_norm' in n or 'encoder.encoders.final_proj' in n or 'encoder.output_layer' in n or 'joiner' in n or 'simple' in n or 'ctc' in n: + adapter_names.append(n) + adapter_param.append(p) + else: + p.requires_grad = False ''' if 'bias' in n: adapter_names.append(n)