Add grid AVSR task results

This commit is contained in:
Mingshuang Luo 2021-12-22 11:20:41 +08:00
parent 27bfcc4ea8
commit a9abcc5fda
13 changed files with 1508 additions and 29 deletions

View File

@ -30,8 +30,8 @@ import torch
import torch.nn as nn import torch.nn as nn
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from local.dataset_audio import MyDataset from local.dataset_audio import dataset_audio
from model import TdnnLstm from model import AudioNet
from icefall.checkpoint import average_checkpoints, load_checkpoint from icefall.checkpoint import average_checkpoints, load_checkpoint
from icefall.decode import ( from icefall.decode import (
@ -143,8 +143,9 @@ def get_params() -> AttributeDict:
"video_path": Path("download/GRID/lip/"), "video_path": Path("download/GRID/lip/"),
"anno_path": Path("download/GRID/GRID_align_txt"), "anno_path": Path("download/GRID/GRID_align_txt"),
"val_list": Path("download/GRID/unseen_val.txt"), "val_list": Path("download/GRID/unseen_val.txt"),
"aud_padding": 200, "aud_padding": 480,
"num_workers": 1, "sample_rate": 16000,
"num_workers": 16,
"batch_size": 120, "batch_size": 120,
} }
) )
@ -440,7 +441,7 @@ def main():
else: else:
G = None G = None
model = TdnnLstm( model = AudioNet(
num_features=params.feature_dim, num_features=params.feature_dim,
num_classes=max_token_id + 1, # +1 for the blank symbol num_classes=max_token_id + 1, # +1 for the blank symbol
subsampling_factor=params.subsampling_factor, subsampling_factor=params.subsampling_factor,
@ -466,14 +467,14 @@ def main():
model.to(device) model.to(device)
model.eval() model.eval()
grid = MyDataset( grid = dataset_audio(
params.video_path, params.video_path,
params.anno_path, params.anno_path,
params.val_list, params.val_list,
params.aud_padding, params.aud_padding,
"test", params.sample_rate,
16000,
params.feature_dim, params.feature_dim,
"test",
) )
test_dl = DataLoader( test_dl = DataLoader(
grid, grid,

View File

@ -19,7 +19,7 @@ import torch
import torch.nn as nn import torch.nn as nn
class TdnnLstm(nn.Module): class AudioNet(nn.Module):
def __init__( def __init__(
self, num_features: int, num_classes: int, subsampling_factor: int = 3 self, num_features: int, num_classes: int, subsampling_factor: int = 3
) -> None: ) -> None:

View File

@ -32,9 +32,9 @@ import torch.nn as nn
import torch.optim as optim import torch.optim as optim
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from local.dataset_audio import MyDataset from local.dataset_audio import dataset_audio
from lhotse.utils import fix_random_seed from lhotse.utils import fix_random_seed
from model import TdnnLstm from model import AudioNet
from torch import Tensor from torch import Tensor
from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP
from torch.nn.utils import clip_grad_norm_ from torch.nn.utils import clip_grad_norm_
@ -179,8 +179,9 @@ def get_params() -> AttributeDict:
"video_path": Path("download/GRID/lip/"), "video_path": Path("download/GRID/lip/"),
"anno_path": Path("download/GRID/GRID_align_txt"), "anno_path": Path("download/GRID/GRID_align_txt"),
"train_list": Path("download/GRID/unseen_train.txt"), "train_list": Path("download/GRID/unseen_train.txt"),
"aud_padding": 200, "aud_padding": 480,
"num_workers": 1, "sample_rate": 16000,
"num_workers": 16,
"batch_size": 120, "batch_size": 120,
} }
) )
@ -510,7 +511,7 @@ def run(rank, world_size, args):
graph_compiler = CtcTrainingGraphCompiler(lexicon=lexicon, device=device) graph_compiler = CtcTrainingGraphCompiler(lexicon=lexicon, device=device)
model = TdnnLstm( model = AudioNet(
num_features=params.feature_dim, num_features=params.feature_dim,
num_classes=max_token_id + 1, # +1 for the blank symbol num_classes=max_token_id + 1, # +1 for the blank symbol
subsampling_factor=params.subsampling_factor, subsampling_factor=params.subsampling_factor,
@ -532,14 +533,14 @@ def run(rank, world_size, args):
optimizer.load_state_dict(checkpoints["optimizer"]) optimizer.load_state_dict(checkpoints["optimizer"])
scheduler.load_state_dict(checkpoints["scheduler"]) scheduler.load_state_dict(checkpoints["scheduler"])
grid = MyDataset( grid = dataset_audio(
params.video_path, params.video_path,
params.anno_path, params.anno_path,
params.train_list, params.train_list,
params.vid_padding, params.aud_padding,
"train", params.sample_rate,
16000,
params.feature_dim, params.feature_dim,
"train",
) )
train_dl = DataLoader( train_dl = DataLoader(

View File

@ -0,0 +1,514 @@
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang
# Mingshuang Luo)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from utils import encode_supervisions
import k2
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from local.dataset_av import dataset_av
from model import CombineNet
from icefall.checkpoint import average_checkpoints, load_checkpoint
from icefall.decode import (
get_lattice,
nbest_decoding,
one_best_decoding,
rescore_with_n_best_list,
rescore_with_whole_lattice,
)
from icefall.lexicon import Lexicon
from icefall.utils import (
AttributeDict,
get_texts,
setup_logger,
store_transcripts,
str2bool,
write_error_stats,
)
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--epoch",
type=int,
default=19,
help="It specifies the checkpoint to use for decoding."
"Note: Epoch counts from 0.",
)
parser.add_argument(
"--avg",
type=int,
default=5,
help="Number of checkpoints to average. Automatically select "
"consecutive checkpoints before the checkpoint specified by "
"'--epoch'. ",
)
parser.add_argument(
"--method",
type=str,
default="whole-lattice-rescoring",
help="""Decoding method.
Supported values are:
- (1) 1best. Extract the best path from the decoding lattice as the
decoding result.
- (2) nbest. Extract n paths from the decoding lattice; the path
with the highest score is the decoding result.
- (3) nbest-rescoring. Extract n paths from the decoding lattice,
rescore them with an n-gram LM (e.g., a 4-gram LM), the path with
the highest score is the decoding result.
- (4) whole-lattice-rescoring. Rescore the decoding lattice with an
n-gram LM (e.g., a 4-gram LM), the best path of rescored lattice
is the decoding result.
""",
)
parser.add_argument(
"--num-paths",
type=int,
default=100,
help="""Number of paths for n-best based decoding method.
Used only when "method" is one of the following values:
nbest, nbest-rescoring
""",
)
parser.add_argument(
"--nbest-scale",
type=float,
default=0.5,
help="""The scale to be applied to `lattice.scores`.
It's needed if you use any kinds of n-best based rescoring.
Used only when "method" is one of the following values:
nbest, nbest-rescoring
A smaller value results in more unique paths.
""",
)
parser.add_argument(
"--export",
type=str2bool,
default=False,
help="""When enabled, the averaged model is saved to
tdnn/exp/pretrained.pt. Note: only model.state_dict() is saved.
pretrained.pt contains a dict {"model": model.state_dict()},
which can be loaded by `icefall.checkpoint.load_checkpoint()`.
""",
)
return parser
def get_params() -> AttributeDict:
params = AttributeDict(
{
"exp_dir": Path("combinenet_ctc_avsr/exp"),
"lang_dir": Path("data/lang_character"),
"lm_dir": Path("data/lm"),
"feature_dim": 80,
"subsampling_factor": 3,
"search_beam": 20,
"output_beam": 5,
"min_active_states": 30,
"max_active_states": 10000,
"use_double_scores": True,
# parameters for dataset
"video_path": Path("download/GRID/lip/"),
"anno_path": Path("download/GRID/GRID_align_txt"),
"val_list": Path("download/GRID/unseen_val.txt"),
"vid_padding": 75,
"aud_padding": 450,
"sample_rate": 16000,
"num_workers": 16,
"batch_size": 120,
}
)
return params
def decode_one_batch(
params: AttributeDict,
model: nn.Module,
HLG: k2.Fsa,
batch: dict,
lexicon: Lexicon,
G: Optional[k2.Fsa] = None,
) -> Dict[str, List[List[str]]]:
"""Decode one batch and return the result in a dict. The dict has the
following format:
- key: It indicates the setting used for decoding. For example,
if no rescoring is used, the key is the string `no_rescore`.
If LM rescoring is used, the key is the string `lm_scale_xxx`,
where `xxx` is the value of `lm_scale`. An example key is
`lm_scale_0.7`
- value: It contains the decoding result. `len(value)` equals to
batch size. `value[i]` is the decoding result for the i-th
utterance in the given batch.
Args:
params:
It's the return value of :func:`get_params`.
- params.method is "1best", it uses 1best decoding without LM rescoring.
- params.method is "nbest", it uses nbest decoding without LM rescoring.
- params.method is "nbest-rescoring", it uses nbest LM rescoring.
- params.method is "whole-lattice-rescoring", it uses whole lattice LM
rescoring.
model:
The neural model.
HLG:
The decoding graph.
batch:
It is the return value from iterating
`lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation
for the format of the `batch`.
lexicon:
It contains word symbol table.
G:
An LM. It is not None when params.method is "nbest-rescoring"
or "whole-lattice-rescoring". In general, the G in HLG
is a 3-gram LM, while this G is a 4-gram LM.
Returns:
Return the decoding result. See above description for the format of
the returned dict.
"""
device = HLG.device
audio_feature = batch["aud"]
video_feature = batch["vid"]
audio_feature = audio_feature.permute(0, 2, 1)
assert audio_feature.ndim == 3
audio_feature = audio_feature.to(device)
assert video_feature.ndim == 5
video_feature = video_feature.to(device)
nnet_output = model(video_feature, audio_feature)
nnet_output_shape = nnet_output.size()
supervision_segments, text = encode_supervisions(nnet_output_shape, batch)
lattice = get_lattice(
nnet_output=nnet_output,
decoding_graph=HLG,
supervision_segments=supervision_segments,
search_beam=params.search_beam,
output_beam=params.output_beam,
min_active_states=params.min_active_states,
max_active_states=params.max_active_states,
)
if params.method in ["1best", "nbest"]:
if params.method == "1best":
best_path = one_best_decoding(
lattice=lattice, use_double_scores=params.use_double_scores
)
key = "no_rescore"
else:
best_path = nbest_decoding(
lattice=lattice,
num_paths=params.num_paths,
use_double_scores=params.use_double_scores,
nbest_scale=params.nbest_scale,
)
key = f"no_rescore-{params.num_paths}"
hyps = get_texts(best_path)
hyps = [[lexicon.word_table[i] for i in ids] for ids in hyps]
return {key: hyps}
assert params.method in ["nbest-rescoring", "whole-lattice-rescoring"]
lm_scale_list = [0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09]
lm_scale_list += [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
lm_scale_list += [0.8, 0.9, 1.0, 1.1, 1.2, 1.3]
lm_scale_list += [1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0]
if params.method == "nbest-rescoring":
best_path_dict = rescore_with_n_best_list(
lattice=lattice,
G=G,
num_paths=params.num_paths,
lm_scale_list=lm_scale_list,
nbest_scale=params.nbest_scale,
)
else:
best_path_dict = rescore_with_whole_lattice(
lattice=lattice,
G_with_epsilon_loops=G,
lm_scale_list=lm_scale_list,
)
ans = dict()
for lm_scale_str, best_path in best_path_dict.items():
hyps = get_texts(best_path)
hyps = [[lexicon.word_table[i] for i in ids] for ids in hyps]
ans[lm_scale_str] = hyps
return ans
def decode_dataset(
dl: torch.utils.data.DataLoader,
params: AttributeDict,
model: nn.Module,
HLG: k2.Fsa,
lexicon: Lexicon,
G: Optional[k2.Fsa] = None,
) -> Dict[str, List[Tuple[List[str], List[str]]]]:
"""Decode dataset.
Args:
dl:
PyTorch's dataloader containing the dataset to decode.
params:
It is returned by :func:`get_params`.
model:
The neural model.
HLG:
The decoding graph.
lexicon:
It contains word symbol table.
G:
An LM. It is not None when params.method is "nbest-rescoring"
or "whole-lattice-rescoring". In general, the G in HLG
is a 3-gram LM, while this G is a 4-gram LM.
Returns:
Return a dict, whose key may be "no-rescore" if no LM rescoring
is used, or it may be "lm_scale_0.7" if LM rescoring is used.
Its value is a list of tuples. Each tuple contains two elements:
The first is the reference transcript, and the second is the
predicted result.
"""
results = []
num_cuts = 0
try:
num_batches = len(dl)
except TypeError:
num_batches = "?"
results = defaultdict(list)
for batch_idx, batch in enumerate(dl):
texts = batch["txt"]
hyps_dict = decode_one_batch(
params=params,
model=model,
HLG=HLG,
batch=batch,
lexicon=lexicon,
G=G,
)
for lm_scale, hyps in hyps_dict.items():
this_batch = []
assert len(hyps) == len(texts)
for hyp_words, ref_text in zip(hyps, texts):
ref_words = ref_text.split()
this_batch.append((ref_words, hyp_words))
results[lm_scale].extend(this_batch)
num_cuts += len(batch["txt"])
if batch_idx % 10 == 0:
batch_str = f"{batch_idx}/{num_batches}"
logging.info(
f"batch {batch_str}, cuts processed until now is {num_cuts}"
)
return results
def save_results(
params: AttributeDict,
test_set_name: str,
results_dict: Dict[str, List[Tuple[List[int], List[int]]]],
):
test_set_wers = dict()
for key, results in results_dict.items():
recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt"
store_transcripts(filename=recog_path, texts=results)
logging.info(f"The transcripts are stored in {recog_path}")
# The following prints out PERs, per-phone error statistics and aligned
# ref/hyp pairs.
errs_filename = params.exp_dir / f"errs-{test_set_name}-{key}.txt"
with open(errs_filename, "w") as f:
wer = write_error_stats(f, f"{test_set_name}-{key}", results)
test_set_wers[key] = wer
logging.info("Wrote detailed error stats to {}".format(errs_filename))
test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1])
errs_info = params.exp_dir / f"per-summary-{test_set_name}.txt"
with open(errs_info, "w") as f:
print("settings\tPER", file=f)
for key, val in test_set_wers:
print("{}\t{}".format(key, val), file=f)
s = "\nFor {}, PER of different settings are:\n".format(test_set_name)
note = "\tbest for {}".format(test_set_name)
for key, val in test_set_wers:
s += "{}\t{}{}\n".format(key, val, note)
note = ""
logging.info(s)
@torch.no_grad()
def main():
parser = get_parser()
args = parser.parse_args()
params = get_params()
params.update(vars(args))
setup_logger(f"{params.exp_dir}/log/log-decode")
logging.info("Decoding started")
logging.info(params)
lexicon = Lexicon(params.lang_dir)
max_token_id = max(lexicon.tokens)
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda", 0)
logging.info(f"device: {device}")
HLG = k2.Fsa.from_dict(
torch.load(f"{params.lang_dir}/HLG.pt", map_location="cpu")
)
HLG = HLG.to(device)
assert HLG.requires_grad is False
if not hasattr(HLG, "lm_scores"):
HLG.lm_scores = HLG.scores.clone()
if params.method in ["nbest-rescoring", "whole-lattice-rescoring"]:
if not (params.lm_dir / "G_4_gram.pt").is_file():
logging.info("Loading G_4_gram.fst.txt")
logging.warning("It may take 8 minutes.")
with open(params.lm_dir / "G_4_gram.fst.txt") as f:
first_word_disambig_id = lexicon.word_table["#0"]
G = k2.Fsa.from_openfst(f.read(), acceptor=False)
# G.aux_labels is not needed in later computations, so
# remove it here.
del G.aux_labels
# CAUTION: The following line is crucial.
# Arcs entering the back-off state have label equal to #0.
# We have to change it to 0 here.
G.labels[G.labels >= first_word_disambig_id] = 0
G = k2.Fsa.from_fsas([G]).to(device)
G = k2.arc_sort(G)
torch.save(G.as_dict(), params.lm_dir / "G_4_gram.pt")
else:
logging.info("Loading pre-compiled G_4_gram.pt")
d = torch.load(params.lm_dir / "G_4_gram.pt", map_location="cpu")
G = k2.Fsa.from_dict(d).to(device)
if params.method == "whole-lattice-rescoring":
# Add epsilon self-loops to G as we will compose
# it with the whole lattice later
G = k2.add_epsilon_self_loops(G)
G = k2.arc_sort(G)
G = G.to(device)
# G.lm_scores is used to replace HLG.lm_scores during
# LM rescoring.
G.lm_scores = G.scores.clone()
else:
G = None
model = CombineNet(
num_features=params.feature_dim,
num_classes=max_token_id + 1, # +1 for the blank symbol
subsampling_factor=params.subsampling_factor,
)
if params.avg == 1:
load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
else:
start = params.epoch - params.avg + 1
filenames = []
for i in range(start, params.epoch + 1):
if start >= 0:
filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
logging.info(f"averaging {filenames}")
model.load_state_dict(average_checkpoints(filenames))
if params.export:
logging.info(f"Export averaged model to {params.exp_dir}/pretrained.pt")
torch.save(
{"model": model.state_dict()}, f"{params.exp_dir}/pretrained.pt"
)
return
model.to(device)
model.eval()
grid = dataset_av(
params.video_path,
params.anno_path,
params.val_list,
params.feature_dim,
params.vid_padding,
params.aud_padding,
params.sample_rate,
"test",
)
test_dl = DataLoader(
grid,
batch_size=params.batch_size,
shuffle=False,
num_workers=params.num_workers,
drop_last=False,
)
test_set = "test"
results_dict = decode_dataset(
dl=test_dl,
params=params,
model=model,
HLG=HLG,
lexicon=lexicon,
G=G,
)
save_results(
params=params, test_set_name=test_set, results_dict=results_dict
)
logging.info("Done!")
if __name__ == "__main__":
main()

View File

@ -0,0 +1,176 @@
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
class CombineNet(nn.Module):
def __init__(
self, num_features: int, num_classes: int, subsampling_factor: int = 3
) -> None:
"""
Args:
num_features:
The input dimension of the audio encoder.
num_classes:
The output dimension of the combinenet model.
subsampling_factor:
It reduces the number of output frames by this factor.
"""
super().__init__()
self.num_features = num_features
self.num_classes = num_classes
self.subsampling_factor = subsampling_factor
# the audio encoder
self.audio_encoder = nn.Sequential(
nn.Conv1d(
in_channels=num_features,
out_channels=512,
kernel_size=3,
stride=1,
padding=1,
),
nn.ReLU(inplace=True),
nn.BatchNorm1d(num_features=512, affine=False),
nn.Conv1d(
in_channels=512,
out_channels=512,
kernel_size=3,
stride=1,
padding=1,
),
nn.ReLU(inplace=True),
nn.BatchNorm1d(num_features=512, affine=False),
nn.Conv1d(
in_channels=512,
out_channels=512,
kernel_size=3,
# stride=self.subsampling_factor, # stride: subsampling_factor!
stride=1,
padding=1,
),
nn.ReLU(inplace=True),
nn.BatchNorm1d(num_features=512, affine=False),
nn.Conv1d(
in_channels=512,
out_channels=512,
kernel_size=3,
stride=self.subsampling_factor, # stride: subsampling_factor!
),
nn.ReLU(inplace=True),
nn.BatchNorm1d(num_features=512, affine=False),
)
# the video encoder
self.video_encoder = nn.Sequential(
nn.Conv3d(
in_channels=3,
out_channels=32,
kernel_size=(3, 5, 5),
stride=(1, 2, 2),
padding=(1, 2, 2),
),
nn.ReLU(inplace=True),
nn.Dropout3d(p=0.1),
nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)),
nn.Conv3d(
in_channels=32,
out_channels=64,
kernel_size=(3, 5, 5),
stride=(1, 1, 1),
padding=(1, 2, 2),
),
nn.ReLU(inplace=True),
nn.Dropout3d(p=0.1),
nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)),
nn.Conv3d(
in_channels=64,
out_channels=96,
kernel_size=(3, 3, 3),
stride=(1, 1, 1),
padding=(1, 1, 1),
),
nn.ReLU(inplace=True),
nn.Dropout3d(p=0.1),
nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)),
)
self.linear_visual = nn.Linear(96 * 4 * 8, 512)
# the audio-visual combining encoder based on GRU
self.grus = nn.ModuleList(
[
nn.GRU(
input_size=512 * 2,
hidden_size=512,
num_layers=1,
bidirectional=True,
)
for _ in range(4)
]
)
self.gru_bnorms = nn.ModuleList(
[nn.BatchNorm1d(num_features=1024, affine=False) for _ in range(4)]
)
self.dropout = nn.Dropout(0.2)
self.linear = nn.Linear(
in_features=512 * 2, out_features=self.num_classes
)
def forward(self, x_v, x_a):
"""
Args:
x_v:
Its shape is [N, 3, H, W]
x_a:
Its shape is [N, C, T]
Returns:
The output tensor has shape [N, T, C]
"""
x_v = self.video_encoder(x_v)
x_v = x_v.permute(2, 0, 1, 3, 4).contiguous()
x_v = x_v.view(x_v.size(0), x_v.size(1), -1)
x_v = self.linear_visual(x_v)
x_a = self.audio_encoder(x_a)
x_v = x_v.permute(1, 0, 2)
x_a = x_a.permute(0, 2, 1)
# Repeat the visual features
# to cat with the audio features in time axis.
x_v_copy = x_v
x_v_stack = torch.stack((x_v, x_v_copy), dim=2)
x_v = x_v_stack.view(
x_v_stack.size(0), 2 * x_v_stack.size(1), x_v_stack.size(3)
)
x = torch.cat((x_v, x_a), dim=2)
x = x.permute(1, 0, 2) # (N, C, T) -> (T, N, C) -> how GRU expects it
for gru, bnorm in zip(self.grus, self.gru_bnorms):
x_new, _ = gru(x)
x_new = bnorm(x_new.permute(1, 2, 0)).permute(
2, 0, 1
) # (T, N, C) -> (N, C, T) -> (T, N, C)
x_new = self.dropout(x_new)
x = x_new + x # skip connections
x = x.transpose(
1, 0
) # (T, N, C) -> (N, T, C) -> linear expects "features" in the last dim
x = self.linear(x)
x = nn.functional.log_softmax(x, dim=-1)
return x

View File

@ -0,0 +1,625 @@
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang
# Mingshuang Luo)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from pathlib import Path
from shutil import copyfile
from typing import Optional, Tuple
from utils import encode_supervisions
import k2
import torch
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from local.dataset_av import dataset_av
from lhotse.utils import fix_random_seed
from model import CombineNet
from torch import Tensor
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.nn.utils import clip_grad_norm_
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
from icefall.checkpoint import load_checkpoint
from icefall.checkpoint import save_checkpoint as save_checkpoint_impl
from icefall.dist import cleanup_dist, setup_dist
from icefall.graph_compiler import CtcTrainingGraphCompiler
from icefall.lexicon import Lexicon
from icefall.utils import (
AttributeDict,
MetricsTracker,
get_env_info,
setup_logger,
str2bool,
)
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--world-size",
type=int,
default=1,
help="Number of GPUs for DDP training.",
)
parser.add_argument(
"--master-port",
type=int,
default=12354,
help="Master port to use for DDP training.",
)
parser.add_argument(
"--tensorboard",
type=str2bool,
default=True,
help="Should various information be logged in tensorboard.",
)
parser.add_argument(
"--num-epochs",
type=int,
default=30,
help="Number of epochs to train.",
)
parser.add_argument(
"--start-epoch",
type=int,
default=0,
help="""Resume training from from this epoch.
If it is positive, it will load checkpoint from
audionet_ctc_asr/exp/epoch-{start_epoch-1}.pt
""",
)
return parser
def get_params() -> AttributeDict:
"""Return a dict containing training parameters.
All training related parameters that are not passed from the commandline
is saved in the variable `params`.
Commandline options are merged into `params` after they are parsed, so
you can also access them via `params`.
Explanation of options saved in `params`:
- exp_dir: It specifies the directory where all training related
files, e.g., checkpoints, log, etc, are saved
- lang_dir: It contains language related input files such as
"lexicon.txt"
- lr: It specifies the initial learning rate
- feature_dim: The model input dim. It has to match the one used
in computing features.
- weight_decay: The weight_decay for the optimizer.
- subsampling_factor: The subsampling factor for the model.
- best_train_loss: Best training loss so far. It is used to select
the model that has the lowest training loss. It is
updated during the training.
- best_valid_loss: Best validation loss so far. It is used to select
the model that has the lowest validation loss. It is
updated during the training.
- best_train_epoch: It is the epoch that has the best training loss.
- best_valid_epoch: It is the epoch that has the best validation loss.
- batch_idx_train: Used to writing statistics to tensorboard. It
contains number of batches trained so far across
epochs.
- log_interval: Print training loss if batch_idx % log_interval` is 0
- reset_interval: Reset statistics if batch_idx % reset_interval is 0
- valid_interval: Run validation if batch_idx % valid_interval` is 0
- beam_size: It is used in k2.ctc_loss
- reduction: It is used in k2.ctc_loss
- use_double_scores: It is used in k2.ctc_loss
"""
params = AttributeDict(
{
"exp_dir": Path("combinenet_ctc_avsr/exp"),
"lang_dir": Path("data/lang_character"),
"lr": 4e-4,
"feature_dim": 80,
"weight_decay": 5e-4,
"subsampling_factor": 3,
"best_train_loss": float("inf"),
"best_valid_loss": float("inf"),
"best_train_epoch": -1,
"best_valid_epoch": -1,
"batch_idx_train": 0,
"log_interval": 1,
"reset_interval": 200,
"valid_interval": 1000,
"beam_size": 10,
"reduction": "sum",
"use_double_scores": True,
"env_info": get_env_info(),
# parameters for dataset
"video_path": Path("download/GRID/lip/"),
"anno_path": Path("download/GRID/GRID_align_txt"),
"train_list": Path("download/GRID/unseen_train.txt"),
"vid_padding": 80,
"aud_padding": 480,
"sample_rate": 16000,
"num_workers": 16,
"batch_size": 120,
}
)
return params
def load_checkpoint_if_available(
params: AttributeDict,
model: nn.Module,
optimizer: Optional[torch.optim.Optimizer] = None,
scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
) -> None:
"""Load checkpoint from file.
If params.start_epoch is positive, it will load the checkpoint from
`params.start_epoch - 1`. Otherwise, this function does nothing.
Apart from loading state dict for `model`, `optimizer` and `scheduler`,
it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`,
and `best_valid_loss` in `params`.
Args:
params:
The return value of :func:`get_params`.
model:
The training model.
optimizer:
The optimizer that we are using.
scheduler:
The learning rate scheduler we are using.
Returns:
Return None.
"""
if params.start_epoch <= 0:
return
filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt"
saved_params = load_checkpoint(
filename,
model=model,
optimizer=optimizer,
scheduler=scheduler,
)
keys = [
"best_train_epoch",
"best_valid_epoch",
"batch_idx_train",
"best_train_loss",
"best_valid_loss",
]
for k in keys:
params[k] = saved_params[k]
return saved_params
def save_checkpoint(
params: AttributeDict,
model: nn.Module,
optimizer: torch.optim.Optimizer,
scheduler: torch.optim.lr_scheduler._LRScheduler,
rank: int = 0,
) -> None:
"""Save model, optimizer, scheduler and training stats to file.
Args:
params:
It is returned by :func:`get_params`.
model:
The training model.
"""
if rank != 0:
return
filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt"
save_checkpoint_impl(
filename=filename,
model=model,
params=params,
optimizer=optimizer,
scheduler=scheduler,
rank=rank,
)
if params.best_train_epoch == params.cur_epoch:
best_train_filename = params.exp_dir / "best-train-loss.pt"
copyfile(src=filename, dst=best_train_filename)
if params.best_valid_epoch == params.cur_epoch:
best_valid_filename = params.exp_dir / "best-valid-loss.pt"
copyfile(src=filename, dst=best_valid_filename)
def compute_loss(
params: AttributeDict,
model: nn.Module,
batch: dict,
graph_compiler: CtcTrainingGraphCompiler,
is_training: bool,
) -> Tuple[Tensor, MetricsTracker]:
"""
Compute CTC loss given the model and its inputs.
Args:
params:
Parameters for training. See :func:`get_params`.
model:
The model for training. It is an instance of TdnnLstm in our case.
batch:
A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()`
for the content in it.
graph_compiler:
It is used to build a decoding graph from a ctc topo and training
transcript. The training transcript is contained in the given `batch`,
while the ctc topo is built when this compiler is instantiated.
is_training:
True for training. False for validation. When it is True, this
function enables autograd during computation; when it is False, it
disables autograd.
"""
device = graph_compiler.device
audio_feature = batch["aud"]
video_feature = batch["vid"]
audio_feature = audio_feature.permute(
0, 2, 1
) # now feature size is (N, C, T)
assert audio_feature.ndim == 3
audio_feature = audio_feature.to(device)
assert video_feature.ndim == 5
video_feature = video_feature.to(device)
with torch.set_grad_enabled(is_training):
nnet_output = model(
video_feature, audio_feature
) # nnet_output size is (N, T, C)
# NOTE: We need `encode_supervisions` to sort sequences with
# different duration in decreasing order, required by
# `k2.intersect_dense` called in `k2.ctc_loss`
supervision_segments, texts = encode_supervisions(nnet_output.size(), batch)
decoding_graph = graph_compiler.compile(texts)
dense_fsa_vec = k2.DenseFsaVec(
nnet_output,
supervision_segments,
allow_truncate=params.subsampling_factor - 1,
)
loss = k2.ctc_loss(
decoding_graph=decoding_graph,
dense_fsa_vec=dense_fsa_vec,
output_beam=params.beam_size,
reduction=params.reduction,
use_double_scores=params.use_double_scores,
)
assert loss.requires_grad == is_training
info = MetricsTracker()
info["frames"] = supervision_segments[:, 2].sum().item()
info["loss"] = loss.detach().cpu().item()
return loss, info
def compute_validation_loss(
params: AttributeDict,
model: nn.Module,
graph_compiler: CtcTrainingGraphCompiler,
valid_dl: torch.utils.data.DataLoader,
world_size: int = 1,
) -> MetricsTracker:
"""Run the validation process. The validation loss
is saved in `params.valid_loss`.
"""
model.eval()
tot_loss = MetricsTracker()
for batch_idx, batch in enumerate(valid_dl):
loss, loss_info = compute_loss(
params=params,
model=model,
batch=batch,
graph_compiler=graph_compiler,
is_training=False,
)
assert loss.requires_grad is False
tot_loss = tot_loss + loss_info
if world_size > 1:
tot_loss.reduce(loss.device)
loss_value = tot_loss["loss"] / tot_loss["frames"]
if loss_value < params.best_valid_loss:
params.best_valid_epoch = params.cur_epoch
params.best_valid_loss = loss_value
return tot_loss
def train_one_epoch(
params: AttributeDict,
model: nn.Module,
optimizer: torch.optim.Optimizer,
graph_compiler: CtcTrainingGraphCompiler,
train_dl: torch.utils.data.DataLoader,
valid_dl: torch.utils.data.DataLoader,
tb_writer: Optional[SummaryWriter] = None,
world_size: int = 1,
) -> None:
"""Train the model for one epoch.
The training loss from the mean of all frames is saved in
`params.train_loss`. It runs the validation process every
`params.valid_interval` batches.
Args:
params:
It is returned by :func:`get_params`.
model:
The model for training.
optimizer:
The optimizer we are using.
graph_compiler:
It is used to convert transcripts to FSAs.
train_dl:
Dataloader for the training dataset.
valid_dl:
Dataloader for the validation dataset.
tb_writer:
Writer to write log messages to tensorboard.
world_size:
Number of nodes in DDP training. If it is 1, DDP is disabled.
"""
model.train()
tot_loss = MetricsTracker()
for batch_idx, batch in enumerate(train_dl):
params.batch_idx_train += 1
batch_size = len(batch["txt"])
loss, loss_info = compute_loss(
params=params,
model=model,
batch=batch,
graph_compiler=graph_compiler,
is_training=True,
)
# summary stats.
tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info
optimizer.zero_grad()
loss.backward()
clip_grad_norm_(model.parameters(), 5.0, 2.0)
optimizer.step()
if batch_idx % params.log_interval == 0:
logging.info(
f"Epoch {params.cur_epoch}, "
f"batch {batch_idx}, loss[{loss_info}], "
f"tot_loss[{tot_loss}], batch size: {batch_size}"
)
if batch_idx % params.log_interval == 0:
if tb_writer is not None:
loss_info.write_summary(
tb_writer, "train/current_", params.batch_idx_train
)
tot_loss.write_summary(
tb_writer, "train/tot_", params.batch_idx_train
)
if batch_idx > 0 and batch_idx % params.valid_interval == 0:
valid_info = compute_validation_loss(
params=params,
model=model,
graph_compiler=graph_compiler,
valid_dl=valid_dl,
world_size=world_size,
)
model.train()
logging.info(f"Epoch {params.cur_epoch}, validation {valid_info}")
if tb_writer is not None:
valid_info.write_summary(
tb_writer,
"train/valid_",
params.batch_idx_train,
)
loss_value = tot_loss["loss"] / tot_loss["frames"]
params.train_loss = loss_value
if params.train_loss < params.best_train_loss:
params.best_train_epoch = params.cur_epoch
params.best_train_loss = params.train_loss
def run(rank, world_size, args):
"""
Args:
rank:
It is a value between 0 and `world_size-1`, which is
passed automatically by `mp.spawn()` in :func:`main`.
The node with rank 0 is responsible for saving checkpoint.
world_size:
Number of GPUs for DDP training.
args:
The return value of get_parser().parse_args()
"""
params = get_params()
params.update(vars(args))
fix_random_seed(42)
if world_size > 1:
setup_dist(rank, world_size, params.master_port)
setup_logger(f"{params.exp_dir}/log/log-train")
logging.info("Training started")
logging.info(params)
if args.tensorboard and rank == 0:
tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard")
else:
tb_writer = None
lexicon = Lexicon(params.lang_dir)
max_token_id = max(lexicon.tokens)
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda", rank)
graph_compiler = CtcTrainingGraphCompiler(lexicon=lexicon, device=device)
model = CombineNet(
num_features=params.feature_dim,
num_classes=max_token_id + 1, # +1 for the blank symbol
subsampling_factor=params.subsampling_factor,
)
checkpoints = load_checkpoint_if_available(params=params, model=model)
model.to(device)
if world_size > 1:
model = DDP(model, device_ids=[rank])
optimizer = optim.AdamW(
model.parameters(),
lr=params.lr,
weight_decay=params.weight_decay,
)
scheduler = StepLR(optimizer, step_size=10, gamma=0.8)
if checkpoints:
optimizer.load_state_dict(checkpoints["optimizer"])
scheduler.load_state_dict(checkpoints["scheduler"])
grid = dataset_av(
params.video_path,
params.anno_path,
params.train_list,
params.feature_dim,
params.vid_padding,
params.aud_padding,
params.sample_rate,
"train",
)
train_dl = DataLoader(
grid,
batch_size=params.batch_size,
shuffle=True,
num_workers=params.num_workers,
drop_last=False,
)
valid_dl = train_dl
for epoch in range(params.start_epoch, params.num_epochs):
# train_dl.sampler.set_epoch(epoch)
if epoch > params.start_epoch:
logging.info(f"epoch {epoch}, lr: {scheduler.get_last_lr()[0]}")
if tb_writer is not None:
tb_writer.add_scalar(
"train/lr",
scheduler.get_last_lr()[0],
params.batch_idx_train,
)
tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train)
params.cur_epoch = epoch
train_one_epoch(
params=params,
model=model,
optimizer=optimizer,
graph_compiler=graph_compiler,
train_dl=train_dl,
valid_dl=valid_dl,
tb_writer=tb_writer,
world_size=world_size,
)
scheduler.step()
if epoch % 1 == 0:
save_checkpoint(
params=params,
model=model,
optimizer=optimizer,
scheduler=scheduler,
rank=rank,
)
logging.info("Done!")
if world_size > 1:
torch.distributed.barrier()
cleanup_dist()
def main():
parser = get_parser()
# TimitAsrDataModule.add_arguments(parser)
args = parser.parse_args()
world_size = args.world_size
assert world_size >= 1
if world_size > 1:
mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True)
else:
run(rank=0, world_size=1, args=args)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,51 @@
# Copyright 2021 Xiaomi Corp. (authors: Mingshuang Luo)
#
# See ../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def encode_supervisions(nnet_output_shape, batch):
"""
Encodes Lhotse's ``batch["supervisions"]`` dict into
a pair of torch Tensor, and a list of transcription strings.
The supervision tensor has shape ``(batch_size, 3)``.
Its second dimension contains information about sequence index [0],
start frames [1] and num frames [2].
The batch items might become re-ordered during this operation -- the
returned tensor and list of strings are guaranteed to be consistent with
each other.
"""
N, T, D = nnet_output_shape
supervisions_idx = torch.arange(0, N).to(torch.int32)
start_frames = [0 for _ in range(N)]
supervisions_start_frame = torch.tensor(start_frames).to(torch.int32)
num_frames = [T for _ in range(N)]
supervisions_num_frames = torch.tensor(num_frames).to(torch.int32)
supervision_segments = torch.stack(
(
supervisions_idx,
supervisions_start_frame,
supervisions_num_frames,
),
1,
).to(torch.int32)
texts = batch["txt"]
return supervision_segments, texts

View File

@ -30,7 +30,7 @@ import torch
import torch.nn as nn import torch.nn as nn
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from local.dataset import dataset_GRID from local.dataset_visual import dataset_visual
from model import LipNet from model import LipNet
from icefall.checkpoint import average_checkpoints, load_checkpoint from icefall.checkpoint import average_checkpoints, load_checkpoint
@ -461,7 +461,7 @@ def main():
model.to(device) model.to(device)
model.eval() model.eval()
grid = dataset_GRID( grid = dataset_visual(
params.video_path, params.video_path,
params.anno_path, params.anno_path,
params.val_list, params.val_list,

View File

@ -32,7 +32,7 @@ import torch.nn as nn
import torch.optim as optim import torch.optim as optim
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from local.dataset import dataset_GRID from local.dataset_visual import dataset_visual
from lhotse.utils import fix_random_seed from lhotse.utils import fix_random_seed
from model import LipNet from model import LipNet
from torch import Tensor from torch import Tensor
@ -528,7 +528,7 @@ def run(rank, world_size, args):
optimizer.load_state_dict(checkpoints["optimizer"]) optimizer.load_state_dict(checkpoints["optimizer"])
scheduler.load_state_dict(checkpoints["scheduler"]) scheduler.load_state_dict(checkpoints["scheduler"])
grid = dataset_GRID( grid = dataset_visual(
params.video_path, params.video_path,
params.anno_path, params.anno_path,
params.train_list, params.train_list,

View File

@ -8,22 +8,22 @@ import torchaudio
from torch.utils.data import Dataset from torch.utils.data import Dataset
class MyDataset(Dataset): class dataset_audio(Dataset):
def __init__( def __init__(
self, self,
video_path, video_path,
anno_path, anno_path,
file_list, file_list,
aud_padding, aud_padding,
phase,
sample_rate, sample_rate,
feature_dim, feature_dim,
phase,
): ):
self.anno_path = anno_path self.anno_path = anno_path
self.aud_padding = aud_padding self.aud_padding = aud_padding
self.phase = phase
self.sample_rate = sample_rate self.sample_rate = sample_rate
self.feature_dim = feature_dim self.feature_dim = feature_dim
self.phase = phase
with open(file_list, "r") as f: with open(file_list, "r") as f:
self.videos = [ self.videos = [
os.path.join(video_path, line.strip()) for line in f.readlines() os.path.join(video_path, line.strip()) for line in f.readlines()

View File

@ -0,0 +1,111 @@
# encoding: utf-8
import cv2
import os
import kaldifeat
import numpy as np
import torch
import torchaudio
from torch.utils.data import Dataset
from cvtransforms import HorizontalFlip, ColorNormalize
class dataset_av(Dataset):
def __init__(
self,
video_path,
anno_path,
file_list,
feature_dim,
vid_pad,
aud_pad,
sample_rate,
phase,
):
self.anno_path = anno_path
self.vid_pad = vid_pad
self.aud_pad = aud_pad
self.feature_dim = feature_dim
self.sample_rate = sample_rate
self.phase = phase
with open(file_list, "r") as f:
self.videos = [
os.path.join(video_path, line.strip()) for line in f.readlines()
]
self.data = []
for vid in self.videos:
items = vid.split(os.path.sep)
aud = (
vid.replace("lip", "audio_25k").replace("/video/mpg_6000", "")
+ ".wav"
)
self.data.append((vid, aud, items[-4], items[-1]))
def __getitem__(self, idx):
(vid, aud, spk, name) = self.data[idx]
vid = self._load_vid(vid)
aud = self._load_aud(aud)
vid = self._padding(vid, self.vid_pad)
aud = self._padding(aud, self.aud_pad)
anno = self._load_anno(
os.path.join(self.anno_path, spk, "align", name + ".align")
)
if self.phase == "train":
vid = HorizontalFlip(vid)
vid = ColorNormalize(vid)
vid = self._padding(vid, self.vid_pad)
aud = self._padding(aud, self.aud_pad)
return {
"vid": torch.FloatTensor(vid.transpose(3, 0, 1, 2)),
"aud": torch.FloatTensor(aud),
"txt": anno.upper(),
}
def __len__(self):
return len(self.data)
def _load_vid(self, p):
files = os.listdir(p)
files = list(filter(lambda file: file.find(".jpg") != -1, files))
files = sorted(files, key=lambda file: int(os.path.splitext(file)[0]))
array = [cv2.imread(os.path.join(p, file)) for file in files]
array = list(filter(lambda im: im is not None, array))
array = [
cv2.resize(im, (128, 64), interpolation=cv2.INTER_LANCZOS4)
for im in array
]
array = np.stack(array, axis=0).astype(np.float32)
return array
def _load_aud(self, filename):
opts = kaldifeat.FbankOptions()
opts.frame_opts.dither = 0
opts.frame_opts.snip_edges = False
opts.frame_opts.samp_freq = self.sample_rate
opts.mel_opts.num_bins = self.feature_dim
fbank = kaldifeat.Fbank(opts)
wave, sample_rate = torchaudio.load(filename)
features = fbank(wave[0])
return features
def _load_anno(self, name):
with open(name, "r") as f:
lines = [line.strip().split(" ") for line in f.readlines()]
txt = [line[2] for line in lines]
txt = list(filter(lambda s: not s.upper() in ["SIL", "SP"], txt))
txt = " ".join(txt)
return txt
def _padding(self, array, length):
array = [array[_] for _ in range(array.shape[0])]
size = array[0].shape
for i in range(length - len(array)):
array.append(np.zeros(size))
return np.stack(array, axis=0)

View File

@ -7,17 +7,17 @@ from torch.utils.data import Dataset
from cvtransforms import HorizontalFlip, ColorNormalize from cvtransforms import HorizontalFlip, ColorNormalize
class dataset_GRID(Dataset): class dataset_visual(Dataset):
def __init__( def __init__(
self, self,
video_path, video_path,
anno_path, anno_path,
file_list, file_list,
vid_pad, vid_padding,
phase, phase,
): ):
self.anno_path = anno_path self.anno_path = anno_path
self.vid_pad = vid_pad self.vid_padding = vid_padding
self.phase = phase self.phase = phase
with open(file_list, "r") as f: with open(file_list, "r") as f:
self.videos = [ self.videos = [
@ -44,7 +44,7 @@ class dataset_GRID(Dataset):
vid = HorizontalFlip(vid) vid = HorizontalFlip(vid)
vid = ColorNormalize(vid) vid = ColorNormalize(vid)
vid = self._padding(vid, self.vid_pad) vid = self._padding(vid, self.vid_padding)
return { return {
"vid": torch.FloatTensor(vid.transpose(3, 0, 1, 2)), "vid": torch.FloatTensor(vid.transpose(3, 0, 1, 2)),