diff --git a/egs/librispeech/ASR/incremental_transf/.train.py.swp b/egs/librispeech/ASR/incremental_transf/.train.py.swp index ee9785d92..a9966d01f 100644 Binary files a/egs/librispeech/ASR/incremental_transf/.train.py.swp and b/egs/librispeech/ASR/incremental_transf/.train.py.swp differ diff --git a/egs/librispeech/ASR/incremental_transf/train.py b/egs/librispeech/ASR/incremental_transf/train.py index dfcedf504..bf0cdcec1 100755 --- a/egs/librispeech/ASR/incremental_transf/train.py +++ b/egs/librispeech/ASR/incremental_transf/train.py @@ -966,14 +966,19 @@ def run(rank, world_size, args): logging.info("About to create model") model = get_transducer_model(params) - path = '/home/work/workspace/icefall/egs/librispeech/ASR/incremental_transf/conformer_24layers.pt' - pre_trained = torch.load(path) - pre_trained_model = pre_trained["model"] - #for n, p in model.named_parameters(): - # if 'layer' not in n: - for n, p in pre_trained_model.items(): - print(n) - exit() + path1 = '/home/work/workspace/icefall/egs/librispeech/ASR/incremental_transf/conformer_12layers.pt' + pre_trained1 = torch.load(path1) + pre_trained_model1 = pre_trained1["model"] + + path2 = '/home/work/workspace/icefall/egs/librispeech/ASR/incremental_transf/conformer_24layers.pt' + pre_trained2 = torch.load(path2) + pre_trained_model2 = pre_trained2["model"] + + for n, p in model.named_parameters(): + if 'layer' not in n: + + layer_name_splited = n.split('.') + num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}")