mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-10 10:32:17 +00:00
* Fix style and add copyright * Minor fix * Remove duplicate lines * Reformat conformer.py by black * Reformat code style with black. * Fix github workflows * Fix lhotse installation * Install icefall requirements * Update k2 version, remove lhotse from test workflow
149 lines
4.4 KiB
Python
149 lines
4.4 KiB
Python
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
|
|
#
|
|
# See ../../LICENSE for clarification regarding multiple authors
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
|
|
import logging
|
|
from pathlib import Path
|
|
from typing import Any, Dict, List, Optional, Union
|
|
|
|
import torch
|
|
import torch.nn as nn
|
|
from torch.cuda.amp import GradScaler
|
|
from torch.nn.parallel import DistributedDataParallel as DDP
|
|
from torch.optim import Optimizer
|
|
from torch.optim.lr_scheduler import _LRScheduler
|
|
|
|
|
|
def save_checkpoint(
|
|
filename: Path,
|
|
model: Union[nn.Module, DDP],
|
|
params: Optional[Dict[str, Any]] = None,
|
|
optimizer: Optional[Optimizer] = None,
|
|
scheduler: Optional[_LRScheduler] = None,
|
|
scaler: Optional[GradScaler] = None,
|
|
rank: int = 0,
|
|
) -> None:
|
|
"""Save training information to a file.
|
|
|
|
Args:
|
|
filename:
|
|
The checkpoint filename.
|
|
model:
|
|
The model to be saved. We only save its `state_dict()`.
|
|
params:
|
|
User defined parameters, e.g., epoch, loss.
|
|
optimizer:
|
|
The optimizer to be saved. We only save its `state_dict()`.
|
|
scheduler:
|
|
The scheduler to be saved. We only save its `state_dict()`.
|
|
scalar:
|
|
The GradScaler to be saved. We only save its `state_dict()`.
|
|
rank:
|
|
Used in DDP. We save checkpoint only for the node whose rank is 0.
|
|
Returns:
|
|
Return None.
|
|
"""
|
|
if rank != 0:
|
|
return
|
|
|
|
logging.info(f"Saving checkpoint to {filename}")
|
|
|
|
if isinstance(model, DDP):
|
|
model = model.module
|
|
|
|
checkpoint = {
|
|
"model": model.state_dict(),
|
|
"optimizer": optimizer.state_dict() if optimizer is not None else None,
|
|
"scheduler": scheduler.state_dict() if scheduler is not None else None,
|
|
"grad_scaler": scaler.state_dict() if scaler is not None else None,
|
|
}
|
|
|
|
if params:
|
|
for k, v in params.items():
|
|
assert k not in checkpoint
|
|
checkpoint[k] = v
|
|
|
|
torch.save(checkpoint, filename)
|
|
|
|
|
|
def load_checkpoint(
|
|
filename: Path,
|
|
model: nn.Module,
|
|
optimizer: Optional[Optimizer] = None,
|
|
scheduler: Optional[_LRScheduler] = None,
|
|
scaler: Optional[GradScaler] = None,
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
TODO: document it
|
|
"""
|
|
logging.info(f"Loading checkpoint from {filename}")
|
|
checkpoint = torch.load(filename, map_location="cpu")
|
|
|
|
if next(iter(checkpoint["model"])).startswith("module."):
|
|
logging.info("Loading checkpoint saved by DDP")
|
|
|
|
dst_state_dict = model.state_dict()
|
|
src_state_dict = checkpoint["model"]
|
|
for key in dst_state_dict.keys():
|
|
src_key = "{}.{}".format("module", key)
|
|
dst_state_dict[key] = src_state_dict.pop(src_key)
|
|
assert len(src_state_dict) == 0
|
|
model.load_state_dict(dst_state_dict, strict=False)
|
|
else:
|
|
model.load_state_dict(checkpoint["model"], strict=False)
|
|
|
|
checkpoint.pop("model")
|
|
|
|
def load(name, obj):
|
|
s = checkpoint.get(name, None)
|
|
if obj and s:
|
|
obj.load_state_dict(s)
|
|
checkpoint.pop(name)
|
|
|
|
load("optimizer", optimizer)
|
|
load("scheduler", scheduler)
|
|
load("grad_scaler", scaler)
|
|
|
|
return checkpoint
|
|
|
|
|
|
def average_checkpoints(filenames: List[Path]) -> dict:
|
|
"""Average a list of checkpoints.
|
|
|
|
Args:
|
|
filenames:
|
|
Filenames of the checkpoints to be averaged. We assume all
|
|
checkpoints are saved by :func:`save_checkpoint`.
|
|
Returns:
|
|
Return a dict (i.e., state_dict) which is the average of all
|
|
model state dicts contained in the checkpoints.
|
|
"""
|
|
n = len(filenames)
|
|
|
|
avg = torch.load(filenames[0], map_location="cpu")["model"]
|
|
for i in range(1, n):
|
|
state_dict = torch.load(filenames[i], map_location="cpu")["model"]
|
|
for k in avg:
|
|
avg[k] += state_dict[k]
|
|
|
|
for k in avg:
|
|
if avg[k].is_floating_point():
|
|
avg[k] /= n
|
|
else:
|
|
avg[k] //= n
|
|
|
|
return avg
|