diff --git a/egs/librispeech/ASR/incremental_transf/.identity_train.py.swp b/egs/librispeech/ASR/incremental_transf/.identity_train.py.swp index b8326f879..e93956490 100644 Binary files a/egs/librispeech/ASR/incremental_transf/.identity_train.py.swp and b/egs/librispeech/ASR/incremental_transf/.identity_train.py.swp differ diff --git a/egs/librispeech/ASR/incremental_transf/.train.py.swp b/egs/librispeech/ASR/incremental_transf/.train.py.swp index b57017c9a..0d9989544 100644 Binary files a/egs/librispeech/ASR/incremental_transf/.train.py.swp and b/egs/librispeech/ASR/incremental_transf/.train.py.swp differ diff --git a/egs/librispeech/ASR/incremental_transf/train.py b/egs/librispeech/ASR/incremental_transf/train.py index 567ceafa7..6c72f6778 100755 --- a/egs/librispeech/ASR/incremental_transf/train.py +++ b/egs/librispeech/ASR/incremental_transf/train.py @@ -965,6 +965,16 @@ def run(rank, world_size, args): logging.info("About to create model") model = get_transducer_model(params) + + path = '/home/work/workspace/icefall/egs/librispeech/ASR/incremental_transf/conformer_24layers.pt' + pre_trained = torch.load(path) + pre_trained_model = pre_trained["model"] + #for n, p in model.named_parameters(): + # if 'layer' not in n: + for n, p in pre_trained_model.items(): + print(n) + + num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}")