adding hent-srt recipe

This commit is contained in:
AmirHussein96 2025-09-17 17:56:06 -04:00
parent 0c7ce5256f
commit 173c278b42
16 changed files with 10253 additions and 0 deletions

View File

@ -0,0 +1 @@
../zipformer_multijoiner_st/asr_datamodule.py

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,261 @@
# Copyright 2025 Johns Hopkins University (author: Amir Hussein)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from scaling import Balancer
# Copyright 2021-2024 Xiaomi Corp. (authors: Fangjun Kuang,
# Zengrui Jin,
# Yifan Yang,)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from scaling import Balancer
class LSTMDecoder(nn.Module):
"""LSTM decoder."""
def __init__(
self,
vocab_size: int,
blank_id: int,
decoder_dim: int,
num_layers: int,
hidden_dim: int,
embedding_dropout: float = 0.0,
rnn_dropout: float = 0.0,
):
"""
Args:
vocab_size:
Number of tokens of the modeling unit including blank.
blank_id:
The ID of the blank symbol.
decoder_dim:
Dimension of the input embedding.
num_layers:
Number of LSTM layers.
hidden_dim:
Hidden dimension of LSTM layers.
embedding_dropout:
Dropout rate for the embedding layer.
rnn_dropout:
Dropout for LSTM layers.
"""
super().__init__()
self.embedding = nn.Embedding(
num_embeddings=vocab_size,
embedding_dim=decoder_dim,
)
# the balancers are to avoid any drift in the magnitude of the
# embeddings, which would interact badly with parameter averaging.
self.balancer = Balancer(
decoder_dim,
channel_dim=-1,
min_positive=0.0,
max_positive=1.0,
min_abs=0.5,
max_abs=1.0,
prob=0.05,
)
self.blank_id = blank_id
self.vocab_size = vocab_size
self.embedding_dropout = nn.Dropout(embedding_dropout)
self.rnn = nn.LSTM(
input_size=decoder_dim,
hidden_size=hidden_dim,
num_layers=num_layers,
batch_first=True,
dropout=rnn_dropout,
)
self.balancer2 = Balancer(
decoder_dim,
channel_dim=-1,
min_positive=0.0,
max_positive=1.0,
min_abs=0.5,
max_abs=1.0,
prob=0.05,
)
def forward(
self,
y: torch.Tensor,
states: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
need_pad: bool = False
) -> torch.Tensor:
"""
Args:
y:
A 2-D tensor of shape (N, U).
Returns:
Return a tensor of shape (N, U, decoder_dim).
"""
y = y.to(torch.int64)
# this stuff about clamp() is a temporary fix for a mismatch
# at utterance start, we use negative ids in beam_search.py
embedding_out = self.embedding(y.clamp(min=0)) * (y >= 0).unsqueeze(-1)
embedding_out = self.embedding_dropout(embedding_out)
embedding_out = self.balancer(embedding_out)
if need_pad is True:
embedding_out = pad_sequence(embedding_out, batch_first=True, padding_value=0)
rnn_out, (h, c) = self.rnn(embedding_out, states)
rnn_out = F.relu(rnn_out)
rnn_out = self.balancer2(rnn_out)
return rnn_out, (h, c)
class Decoder(nn.Module):
"""This class modifies the stateless decoder from the following paper:
RNN-transducer with stateless prediction network
https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9054419
It removes the recurrent connection from the decoder, i.e., the prediction
network. Different from the above paper, it adds an extra Conv1d
right after the embedding layer.
TODO: Implement https://arxiv.org/pdf/2109.07513.pdf
"""
def __init__(
self,
vocab_size: int,
decoder_dim: int,
blank_id: int,
context_size: int,
):
"""
Args:
vocab_size:
Number of tokens of the modeling unit including blank.
decoder_dim:
Dimension of the input embedding, and of the decoder output.
blank_id:
The ID of the blank symbol.
context_size:
Number of previous words to use to predict the next word.
1 means bigram; 2 means trigram. n means (n+1)-gram.
"""
super().__init__()
self.embedding = nn.Embedding(
num_embeddings=vocab_size,
embedding_dim=decoder_dim,
)
# the balancers are to avoid any drift in the magnitude of the
# embeddings, which would interact badly with parameter averaging.
self.balancer = Balancer(
decoder_dim,
channel_dim=-1,
min_positive=0.0,
max_positive=1.0,
min_abs=0.5,
max_abs=1.0,
prob=0.05,
)
self.blank_id = blank_id
assert context_size >= 1, context_size
self.context_size = context_size
self.vocab_size = vocab_size
if context_size > 1:
self.conv = nn.Conv1d(
in_channels=decoder_dim,
out_channels=decoder_dim,
kernel_size=context_size,
padding=0,
groups=decoder_dim // 4, # group size == 4
bias=False,
)
self.balancer2 = Balancer(
decoder_dim,
channel_dim=-1,
min_positive=0.0,
max_positive=1.0,
min_abs=0.5,
max_abs=1.0,
prob=0.05,
)
def forward(self, y: torch.Tensor, need_pad: bool = True) -> torch.Tensor:
"""
Args:
y:
A 2-D tensor of shape (N, U).
need_pad:
True to left pad the input. Should be True during training.
False to not pad the input. Should be False during inference.
Returns:
Return a tensor of shape (N, U, decoder_dim).
"""
y = y.to(torch.int64)
# this stuff about clamp() is a temporary fix for a mismatch
# at utterance start, we use negative ids in beam_search.py
embedding_out = self.embedding(y.clamp(min=0)) * (y >= 0).unsqueeze(-1)
embedding_out = self.balancer(embedding_out)
if self.context_size > 1:
embedding_out = embedding_out.permute(0, 2, 1)
if need_pad is True:
embedding_out = F.pad(embedding_out, pad=(self.context_size - 1, 0))
else:
# During inference time, there is no need to do extra padding
# as we only need one output
assert embedding_out.size(-1) == self.context_size
embedding_out = self.conv(embedding_out)
embedding_out = embedding_out.permute(0, 2, 1)
embedding_out = F.relu(embedding_out)
embedding_out = self.balancer2(embedding_out)
return embedding_out

View File

@ -0,0 +1,640 @@
#!/usr/bin/env python3
#
# Copyright 2025 Johns Hopkins University (author: Amir Hussein)
# Copyright 2021-2023 Xiaomi Corporation (Author: Fangjun Kuang,
# Zengwei Yao,
# Wei Kang)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script converts several saved checkpoints
# to a single one using model averaging.
"""
Usage:
Note: This is a example for librispeech dataset, if you are using different
dataset, you should change the argument values according to your dataset.
(1) Export to torchscript model using torch.jit.script()
- For non-streaming model:
./hent_srt/export.py \
--exp-dir ./hent_srt/exp-st \
--causal 0 \
--use-averaged-model 1 \
--tokens data/lang_bpe_5000/tokens.txt \
--st-tokens data/lang_st_bpe_4000/tokens.txt \
--num-encoder-layers 2,2,2,2,2 \
--feedforward-dim 512,768,1024,1024,1024 \
--encoder-dim 192,256,384,512,384 \
--encoder-unmasked-dim 192,192,256,256,256 \
--downsampling-factor 1,2,4,8,4 \
--cnn-module-kernel 31,31,15,15,15 \
--num-heads 4,4,4,8,8 \
--st-num-encoder-layers 2,2,2,2,2 \
--st-feedforward-dim 512,512,256,256,256 \
--st-encoder-dim 384,512,256,256,256 \
--st-encoder-unmasked-dim 256,256,256,256,192 \
--st-downsampling-factor 1,2,4,4,4 \
--st-cnn-module-kernel 15,31,31,15,15 \
--st-num-heads 8,8,8,8,8 \
--epoch 25 \
--avg 13 \
--jit 1 \
--output-downsampling-factor 2 \
--st-output-downsampling-factor 1 \
--use-st-joiner True \
--use-hat-decode False \
--use-ctc 1 \
--use-st-ctc 1
It will generate a file `jit_script.pt` in the given `exp_dir`. You can later
load it by `torch.jit.load("jit_script.pt")`.
Check ./jit_pretrained.py for its usage.
Check https://github.com/k2-fsa/sherpa
for how to use the exported models outside of icefall.
- For streaming model:
./zipformer/export.py \
--exp-dir ./zipformer/exp \
--causal 1 \
--chunk-size 32 \
--left-context-frames 128 \
--tokens data/lang_bpe_500/tokens.txt \
--epoch 30 \
--avg 9 \
--jit 1
It will generate a file `jit_script_chunk_16_left_128.pt` in the given `exp_dir`.
You can later load it by `torch.jit.load("jit_script_chunk_16_left_128.pt")`.
Check ./jit_pretrained_streaming.py for its usage.
Check https://github.com/k2-fsa/sherpa
for how to use the exported models outside of icefall.
(2) Export `model.state_dict()`
- For non-streaming model:
./hent_srt/export.py \
--exp-dir ./hent_srt/exp-st \
--causal 0 \
--use-averaged-model 1 \
--tokens data/lang_bpe_5000/tokens.txt \
--st-tokens data/lang_st_bpe_4000/tokens.txt \
--num-encoder-layers 2,2,2,2,2 \
--feedforward-dim 512,768,1024,1024,1024 \
--encoder-dim 192,256,384,512,384 \
--encoder-unmasked-dim 192,192,256,256,256 \
--downsampling-factor 1,2,4,8,4 \
--cnn-module-kernel 31,31,15,15,15 \
--num-heads 4,4,4,8,8 \
--st-num-encoder-layers 2,2,2,2,2 \
--st-feedforward-dim 512,512,256,256,256 \
--st-encoder-dim 384,512,256,256,256 \
--st-encoder-unmasked-dim 256,256,256,256,192 \
--st-downsampling-factor 1,2,4,4,4 \
--st-cnn-module-kernel 15,31,31,15,15 \
--st-num-heads 8,8,8,8,8 \
--epoch 20 \
--avg 13 \
--jit 0 \
--output-downsampling-factor 2 \
--st-output-downsampling-factor 1 \
--use-st-joiner True \
--use-hat False \
--use-ctc 1 \
--use-st-ctc 1
- For streaming model:
./hent_srt/export.py \
--exp-dir ./hent_srt/exp-st_causal \
--causal 1 \
--use-averaged-model 1 \
--tokens data/lang_bpe_5000/tokens.txt \
--st-tokens data/lang_st_bpe_4000/tokens.txt \
--num-encoder-layers 2,2,2,2,2 \
--feedforward-dim 512,768,1024,1024,1024 \
--encoder-dim 192,256,384,512,384 \
--encoder-unmasked-dim 192,192,256,256,256 \
--downsampling-factor 1,2,4,8,4 \
--cnn-module-kernel 31,31,15,15,15 \
--num-heads 4,4,4,8,8 \
--st-num-encoder-layers 2,2,2,2,2 \
--st-feedforward-dim 512,512,256,256,256 \
--st-encoder-dim 384,512,256,256,256 \
--st-encoder-unmasked-dim 256,256,256,256,192 \
--st-downsampling-factor 1,2,4,4,4 \
--st-cnn-module-kernel 15,31,31,15,15 \
--st-num-heads 8,8,8,8,8 \
--epoch 20 \
--avg 13 \
--jit 0 \
--output-downsampling-factor 2 \
--st-output-downsampling-factor 1 \
--use-st-joiner True \
--use-hat False \
--use-ctc 1 \
--use-st-ctc 1
It will generate a file `pretrained.pt` in the given `exp_dir`. You can later
load it by `icefall.checkpoint.load_checkpoint()`.
- For non-streaming model:
To use the generated file with `zipformer/decode.py`,
you can do:
cd /path/to/exp_dir
ln -s pretrained.pt epoch-9999.pt
cd /path/to/egs/multi_conv_zh_es_ta/ST
./hent_srt/decode.py \
--epoch 9999 --avg 1 --use-averaged-model 0 \
--beam-size 20 \
--causal 0 \
--exp-dir hent_srt/exp-st \
--bpe-model data/lang_bpe_5000/bpe.model \
--bpe-st-model data/lang_st_bpe_4000/bpe.model \
--output-downsampling-factor 2 \
--st-output-downsampling-factor 1 \
--max-duration 800 \
--num-encoder-layers 2,2,2,2,2 \
--feedforward-dim 512,768,1024,1024,1024 \
--encoder-dim 192,256,384,512,384 \
--encoder-unmasked-dim 192,192,256,256,256 \
--downsampling-factor 1,2,4,8,4 \
--cnn-module-kernel 31,31,15,15,15 \
--num-heads 4,4,4,8,8 \
--st-num-encoder-layers 2,2,2,2,2 \
--st-feedforward-dim 512,512,256,256,256 \
--st-encoder-dim 384,512,256,256,256 \
--st-encoder-unmasked-dim 256,256,256,256,192 \
--st-downsampling-factor 1,2,4,4,4 \
--st-cnn-module-kernel 15,31,31,15,15 \
--st-num-heads 8,8,8,8,8 \
--decoding-method modified_beam_search \
--use-st-joiner True \
--use-hat-decode False \
--use-ctc 1 \
--use-st-ctc 1 \
--st-blank-penalty 1
- For streaming model:
To use the generated file with `zipformer/decode.py` and `zipformer/streaming_decode.py`, you can do:
cd /path/to/exp_dir
ln -s pretrained.pt epoch-9999.pt
cd /path/to/egs/multi_conv_zh_es_ta/ST
./hent_srt/decode.py \
--epoch 9999 --avg 1 --use-averaged-model 0 \
--causal 1 \
--exp-dir hent_srt/exp-st_causal \
--bpe-model data/lang_bpe_5000/bpe.model \
--bpe-st-model data/lang_st_bpe_4000/bpe.model \
--output-downsampling-factor 2 \
--st-output-downsampling-factor 1 \
--max-duration 800 \
--num-encoder-layers 2,2,2,2,2 \
--feedforward-dim 512,768,1024,1024,1024 \
--encoder-dim 192,256,384,512,384 \
--encoder-unmasked-dim 192,192,256,256,256 \
--downsampling-factor 1,2,4,8,4 \
--cnn-module-kernel 31,31,15,15,15 \
--num-heads 4,4,4,8,8 \
--st-num-encoder-layers 2,2,2,2,2 \
--st-feedforward-dim 512,512,256,256,256 \
--st-encoder-dim 384,512,256,256,256 \
--st-encoder-unmasked-dim 256,256,256,256,192 \
--st-downsampling-factor 1,2,4,4,4 \
--st-cnn-module-kernel 15,31,31,15,15 \
--st-num-heads 8,8,8,8,8 \
--decoding-method greedy_search \
--use-st-joiner True \
--use-hat-decode False \
--use-ctc 1 \
--use-st-ctc 1 \
--st-blank-penalty 2 \
--chunk-size 64 \
--left-context-frames 128 \
--use-hat False --max-sym-per-frame 20
Note: If you don't want to train a model from scratch, we have
provided one for you. You can get it at
- non-streaming model:
https://huggingface.co/Zengwei/icefall-asr-librispeech-zipformer-2023-05-15
- streaming model:
https://huggingface.co/Zengwei/icefall-asr-librispeech-streaming-zipformer-2023-05-17
with the following commands:
sudo apt-get install git-lfs
git lfs install
git clone https://huggingface.co/Zengwei/icefall-asr-librispeech-zipformer-2023-05-15
git clone https://huggingface.co/Zengwei/icefall-asr-librispeech-streaming-zipformer-2023-05-17
# You will find the pre-trained models in exp dir
"""
import argparse
import logging
from pathlib import Path
from typing import List, Tuple
import k2
import torch
from scaling_converter import convert_scaled_to_non_scaled
from torch import Tensor, nn
from train import add_model_arguments, get_model, get_params
from icefall.checkpoint import (
average_checkpoints,
average_checkpoints_with_averaged_model,
find_checkpoints,
load_checkpoint,
)
from icefall.utils import make_pad_mask, num_tokens, str2bool
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--epoch",
type=int,
default=30,
help="""It specifies the checkpoint to use for decoding.
Note: Epoch counts from 1.
You can specify --avg to use more checkpoints for model averaging.""",
)
parser.add_argument(
"--iter",
type=int,
default=0,
help="""If positive, --epoch is ignored and it
will use the checkpoint exp_dir/checkpoint-iter.pt.
You can specify --avg to use more checkpoints for model averaging.
""",
)
parser.add_argument(
"--avg",
type=int,
default=9,
help="Number of checkpoints to average. Automatically select "
"consecutive checkpoints before the checkpoint specified by "
"'--epoch' and '--iter'",
)
parser.add_argument(
"--use-averaged-model",
type=str2bool,
default=True,
help="Whether to load averaged model. Currently it only supports "
"using --epoch. If True, it would decode with the averaged model "
"over the epoch range from `epoch-avg` (excluded) to `epoch`."
"Actually only the models with epoch number of `epoch-avg` and "
"`epoch` are loaded for averaging. ",
)
parser.add_argument(
"--exp-dir",
type=str,
default="zipformer/exp",
help="""It specifies the directory where all training related
files, e.g., checkpoints, log, etc, are saved
""",
)
parser.add_argument(
"--tokens",
type=str,
default="data/lang_bpe_5000/tokens.txt",
help="Path to the tokens.txt",
)
parser.add_argument(
"--st-tokens",
type=str,
default="data/lang_st_bpe_4000/tokens.txt",
help="Path to the tokens.txt",
)
parser.add_argument(
"--jit",
type=str2bool,
default=False,
help="""True to save a model after applying torch.jit.script.
It will generate a file named jit_script.pt.
Check ./jit_pretrained.py for how to use it.
""",
)
parser.add_argument(
"--context-size",
type=int,
default=2,
help="The context size in the decoder. 1 means bigram; 2 means tri-gram",
)
parser.add_argument(
"--st-context-size",
type=int,
default=2,
help="The context size in the decoder. 1 means bigram; " "2 means tri-gram",
)
add_model_arguments(parser)
return parser
class EncoderModel(nn.Module):
"""A wrapper for encoder and encoder_embed"""
def __init__(self, model: nn.Module) -> None:
super().__init__()
self.model = model
def forward(
self, features: Tensor, feature_lengths: Tensor
) -> Tuple[Tensor, Tensor]:
"""
Args:
features: (N, T, C)
feature_lengths: (N,)
"""
encoder_out, encoder_out_lens, st_encoder_out, st_encoder_out_lens = model.forward_encoder(feature, feature_lengths)
encoder_out = encoder_out.permute(1, 0, 2) # (T, N, C) ->(N, T, C)
st_encoder_out = st_encoder_out.permute(1, 0, 2)
return encoder_out, encoder_out_lens, st_encoder_out, st_encoder_out_lens
class StreamingEncoderModel(nn.Module):
"""A wrapper for encoder and encoder_embed"""
def __init__(self, encoder: nn.Module, encoder_embed: nn.Module) -> None:
super().__init__()
assert len(encoder.chunk_size) == 1, encoder.chunk_size
assert len(encoder.left_context_frames) == 1, encoder.left_context_frames
self.chunk_size = encoder.chunk_size[0]
self.left_context_len = encoder.left_context_frames[0]
# The encoder_embed subsample features (T - 7) // 2
# The ConvNeXt module needs (7 - 1) // 2 = 3 frames of right padding after subsampling
self.pad_length = 7 + 2 * 3
self.encoder = encoder
self.encoder_embed = encoder_embed
def forward(
self, features: Tensor, feature_lengths: Tensor, states: List[Tensor]
) -> Tuple[Tensor, Tensor, List[Tensor]]:
"""Streaming forward for encoder_embed and encoder.
Args:
features: (N, T, C)
feature_lengths: (N,)
states: a list of Tensors
Returns encoder outputs, output lengths, and updated states.
"""
chunk_size = self.chunk_size
left_context_len = self.left_context_len
cached_embed_left_pad = states[-2]
x, x_lens, new_cached_embed_left_pad = self.encoder_embed.streaming_forward(
x=features,
x_lens=feature_lengths,
cached_left_pad=cached_embed_left_pad,
)
assert x.size(1) == chunk_size, (x.size(1), chunk_size)
src_key_padding_mask = make_pad_mask(x_lens)
# processed_mask is used to mask out initial states
processed_mask = torch.arange(left_context_len, device=x.device).expand(
x.size(0), left_context_len
)
processed_lens = states[-1] # (batch,)
# (batch, left_context_size)
processed_mask = (processed_lens.unsqueeze(1) <= processed_mask).flip(1)
# Update processed lengths
new_processed_lens = processed_lens + x_lens
# (batch, left_context_size + chunk_size)
src_key_padding_mask = torch.cat([processed_mask, src_key_padding_mask], dim=1)
x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C)
encoder_states = states[:-2]
(
encoder_out,
encoder_out_lens,
new_encoder_states,
) = self.encoder.streaming_forward(
x=x,
x_lens=x_lens,
states=encoder_states,
src_key_padding_mask=src_key_padding_mask,
)
encoder_out = encoder_out.permute(1, 0, 2) # (T, N, C) ->(N, T, C)
new_states = new_encoder_states + [
new_cached_embed_left_pad,
new_processed_lens,
]
return encoder_out, encoder_out_lens, new_states
@torch.jit.export
def get_init_states(
self,
batch_size: int = 1,
device: torch.device = torch.device("cpu"),
) -> List[torch.Tensor]:
"""
Returns a list of cached tensors of all encoder layers. For layer-i, states[i*6:(i+1)*6]
is (cached_key, cached_nonlin_attn, cached_val1, cached_val2, cached_conv1, cached_conv2).
states[-2] is the cached left padding for ConvNeXt module,
of shape (batch_size, num_channels, left_pad, num_freqs)
states[-1] is processed_lens of shape (batch,), which records the number
of processed frames (at 50hz frame rate, after encoder_embed) for each sample in batch.
"""
states = self.encoder.get_init_states(batch_size, device)
embed_states = self.encoder_embed.get_init_states(batch_size, device)
states.append(embed_states)
processed_lens = torch.zeros(batch_size, dtype=torch.int32, device=device)
states.append(processed_lens)
return states
@torch.no_grad()
def main():
args = get_parser().parse_args()
args.exp_dir = Path(args.exp_dir)
params = get_params()
params.update(vars(args))
device = torch.device("cpu")
# if torch.cuda.is_available():
# device = torch.device("cuda", 0)
logging.info(f"device: {device}")
token_table = k2.SymbolTable.from_file(params.tokens)
st_token_table = k2.SymbolTable.from_file(params.st_tokens)
params.blank_id = token_table["<blk>"]
# params.unk_id = sp.piece_to_id("<unk>")
# params.st_unk_id = sp_st.piece_to_id("<unk>")
params.blank_st_id = st_token_table["<blk>"]
params.vocab_size = num_tokens(token_table) + 1
params.vocab_st_size = num_tokens(st_token_table) + 1
logging.info(params)
logging.info("About to create model")
model = get_model(params)
if not params.use_averaged_model:
if params.iter > 0:
filenames = find_checkpoints(params.exp_dir, iteration=-params.iter)[
: params.avg
]
if len(filenames) == 0:
raise ValueError(
f"No checkpoints found for"
f" --iter {params.iter}, --avg {params.avg}"
)
elif len(filenames) < params.avg:
raise ValueError(
f"Not enough checkpoints ({len(filenames)}) found for"
f" --iter {params.iter}, --avg {params.avg}"
)
logging.info(f"averaging {filenames}")
model.load_state_dict(average_checkpoints(filenames, device=device))
elif params.avg == 1:
load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
else:
start = params.epoch - params.avg + 1
filenames = []
for i in range(start, params.epoch + 1):
if i >= 1:
filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
logging.info(f"averaging {filenames}")
model.load_state_dict(average_checkpoints(filenames, device=device))
else:
if params.iter > 0:
filenames = find_checkpoints(params.exp_dir, iteration=-params.iter)[
: params.avg + 1
]
if len(filenames) == 0:
raise ValueError(
f"No checkpoints found for"
f" --iter {params.iter}, --avg {params.avg}"
)
elif len(filenames) < params.avg + 1:
raise ValueError(
f"Not enough checkpoints ({len(filenames)}) found for"
f" --iter {params.iter}, --avg {params.avg}"
)
filename_start = filenames[-1]
filename_end = filenames[0]
logging.info(
"Calculating the averaged model over iteration checkpoints"
f" from {filename_start} (excluded) to {filename_end}"
)
model.load_state_dict(
average_checkpoints_with_averaged_model(
filename_start=filename_start,
filename_end=filename_end,
device=device,
)
)
elif params.avg == 1:
load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
else:
assert params.avg > 0, params.avg
start = params.epoch - params.avg
assert start >= 1, start
filename_start = f"{params.exp_dir}/epoch-{start}.pt"
filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt"
logging.info(
f"Calculating the averaged model over epoch range from "
f"{start} (excluded) to {params.epoch}"
)
model.load_state_dict(
average_checkpoints_with_averaged_model(
filename_start=filename_start,
filename_end=filename_end,
device=device,
), strict=False
)
model.eval()
if params.jit is True:
convert_scaled_to_non_scaled(model, inplace=True)
# We won't use the forward() method of the model in C++, so just ignore
# it here.
# Otherwise, one of its arguments is a ragged tensor and is not
# torch scriptabe.
model.__class__.forward = torch.jit.ignore(model.__class__.forward)
# Wrap encoder and encoder_embed as a module
if params.causal:
model.encoder = StreamingEncoderModel(model.encoder, model.encoder_embed)
chunk_size = model.encoder.chunk_size
left_context_len = model.encoder.left_context_len
filename = f"jit_script_chunk_{chunk_size}_left_{left_context_len}.pt"
else:
model.encoder = EncoderModel(model)
filename = "jit_script.pt"
logging.info("Using torch.jit.script")
model = torch.jit.script(model)
model.save(str(params.exp_dir / filename))
logging.info(f"Saved to {filename}")
else:
logging.info("Not using torchscript. Export model.state_dict()")
# Save it using a format so that it can be loaded
# by :func:`load_checkpoint`
filename = params.exp_dir / "pretrained.pt"
torch.save({"model": model.state_dict()}, str(filename))
logging.info(f"Saved to {filename}")
if __name__ == "__main__":
formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
logging.basicConfig(format=formatter, level=logging.INFO)
main()

View File

@ -0,0 +1 @@
../zipformer_multijoiner_st/joiner.py

View File

@ -0,0 +1,117 @@
# Copyright 2025 Johns Hopkins University (author: Amir Hussein)
import logging
from typing import Any, Dict, Union
import torch
import torch.nn
import torch.optim
def filter_state_dict(
dst_state: Dict[str, Union[float, torch.Tensor]],
src_state: Dict[str, Union[float, torch.Tensor]],
):
"""Filter name, size mismatch instances between dicts.
Args:
dst_state: reference state dict for filtering
src_state: target state dict for filtering
"""
match_state = {}
for key, value in src_state.items():
if key in dst_state and (dst_state[key].size() == src_state[key].size()):
match_state[key] = value
else:
if key not in dst_state:
logging.warning(
f"Filter out {key} from pretrained dict"
+ " because of name not found in target dict"
)
else:
logging.warning(
f"Filter out {key} from pretrained dict"
+ " because of size mismatch"
+ f"({dst_state[key].size()}-{src_state[key].size()})"
)
return match_state
def load_pretrained_model(
init_param: str,
model: torch.nn.Module,
ignore_init_mismatch: bool,
map_location: str = "cpu",
):
"""Load a model state and set it to the model.
Args:
init_param: <file_path>:<src_key>:<dst_key>:<exclude_Keys>
Examples:
>>> load_pretrained_model("somewhere/model.pth", model)
>>> load_pretrained_model("somewhere/model.pth:decoder:decoder", model)
>>> load_pretrained_model("somewhere/model.pth:decoder:decoder:", model)
>>> load_pretrained_model(
... "somewhere/model.pth:decoder:decoder:decoder.embed", model
... )
>>> load_pretrained_model("somewhere/decoder.pth::decoder", model)
"""
sps = init_param.split(":", 4)
if len(sps) == 4:
path, src_key, dst_key, excludes = sps
elif len(sps) == 3:
path, src_key, dst_key = sps
excludes = None
elif len(sps) == 2:
path, src_key = sps
dst_key, excludes = None, None
else:
(path,) = sps
src_key, dst_key, excludes = None, None, None
if src_key == "":
src_key = None
if dst_key == "":
dst_key = None
if dst_key is None:
obj = model
else:
def get_attr(obj: Any, key: str):
"""Get an nested attribute.
>>> class A(torch.nn.Module):
... def __init__(self):
... super().__init__()
... self.linear = torch.nn.Linear(10, 10)
>>> a = A()
>>> assert A.linear.weight is get_attr(A, 'linear.weight')
"""
if key.strip() == "":
return obj
for k in key.split("."):
obj = getattr(obj, k)
return obj
obj = get_attr(model, dst_key)
src_state = torch.load(path, map_location=map_location)
if excludes is not None:
for e in excludes.split(","):
src_state = {k: v for k, v in src_state.items() if not k.startswith(e)}
if src_key is not None:
src_state['model'] = {
k[len(src_key) + 1 :]: v
for k, v in src_state['model'].items()
if k.startswith(src_key)
}
dst_state = obj.state_dict()
if ignore_init_mismatch:
src_state = filter_state_dict(dst_state, src_state['model'])
dst_state.update(src_state)
obj.load_state_dict(dst_state)

View File

@ -0,0 +1,811 @@
# Copyright 2025 Johns Hopkins University (author: Amir Hussein)
# Copyright 2021-2023 Xiaomi Corp. (authors: Fangjun Kuang,
# Wei Kang,
# Zengwei Yao)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple, Union
import k2
import torch
from torch import Tensor
from lhotse.dataset import SpecAugment
import torch.nn as nn
from encoder_interface import EncoderInterface
from scaling import ScaledLinear
from icefall.utils import add_sos, make_pad_mask, time_warp
class HENT_SRT(nn.Module):
def __init__(
self,
encoder_embed: nn.Module,
encoder: EncoderInterface,
decoder: Optional[nn.Module] = None,
joiner: Optional[nn.Module] = None,
st_joiner: Optional[nn.Module] = None,
st_decoder: Optional[nn.Module] = None,
st_encoder: Optional[nn.Module] = None,
encoder_dim: int = 384,
st_encoder_dim: int = 384,
decoder_dim: int = 512,
vocab_size: int = 500,
st_vocab_size: int = 500,
use_transducer: bool = True,
use_ctc: bool = False,
use_st_ctc: bool = False,
use_hat: bool = False,
use_lstm_pred:bool=False,
):
"""A multitask Transducer ASR-ST model with seperate joiners and predictors but shared acoustic encoder.
- Connectionist temporal classification: labelling unsegmented sequence data with recurrent neural networks (http://imagine.enpc.fr/~obozinsg/teaching/mva_gm/papers/ctc.pdf)
- Sequence Transduction with Recurrent Neural Networks (https://arxiv.org/pdf/1211.3711.pdf)
- Pruned RNN-T for fast, memory-efficient ASR training (https://arxiv.org/pdf/2206.13236.pdf)
Args:
encoder_embed:
It is a Convolutional 2D subsampling module. It converts
an input of shape (N, T, idim) to an output of of shape
(N, T', odim), where T' = (T-3)//2-2 = (T-7)//2.
encoder:
It is the transcription network in the paper. Its accepts
two inputs: `x` of (N, T, encoder_dim) and `x_lens` of shape (N,).
It returns two tensors: `logits` of shape (N, T, encoder_dim) and
`logit_lens` of shape (N,).
decoder:
It is the prediction network in the paper. Its input shape
is (N, U) and its output shape is (N, U, decoder_dim).
It should contain one attribute: `blank_id`.
It is used when use_transducer is True.
joiner:
It has two inputs with shapes: (N, T, encoder_dim) and (N, U, decoder_dim).
Its output shape is (N, T, U, vocab_size). Note that its output contains
unnormalized probs, i.e., not processed by log-softmax.
It is used when use_transducer is True.
use_transducer:
Whether use transducer head. Default: True.
use_ctc:
Whether use CTC head. Default: False.
"""
super().__init__()
assert (
use_transducer or use_ctc
), f"At least one of them should be True, but got use_transducer={use_transducer}, use_ctc={use_ctc}"
assert isinstance(encoder, EncoderInterface), type(encoder)
self.encoder_embed = encoder_embed
self.encoder = encoder
self.use_hat = use_hat
self.use_lstm_pred = use_lstm_pred
self.use_transducer = use_transducer
if use_transducer:
# Modules for Transducer head
assert decoder is not None
assert hasattr(decoder, "blank_id")
assert joiner is not None
self.decoder = decoder
self.joiner = joiner
self.st_joiner = st_joiner
self.st_decoder = st_decoder
self.st_encoder = st_encoder
self.simple_am_proj = ScaledLinear(
encoder_dim, vocab_size, initial_scale=0.25
)
self.simple_lm_proj = ScaledLinear(
decoder_dim, vocab_size, initial_scale=0.25
)
self.simple_st_am_proj = ScaledLinear(
st_encoder_dim, st_vocab_size, initial_scale=0.25
)
self.simple_st_lm_proj = ScaledLinear(
decoder_dim, st_vocab_size, initial_scale=0.25
)
else:
assert decoder is None
assert joiner is None
self.use_ctc = use_ctc
self.use_st_ctc = use_st_ctc
if self.use_ctc:
# Modules for CTC head
self.ctc_output = nn.Sequential(
nn.Dropout(p=0.1),
nn.Linear(encoder_dim, vocab_size),
nn.LogSoftmax(dim=-1),
)
if self.use_st_ctc:
# Modules for CTC head
self.st_ctc_output = nn.Sequential(
nn.Dropout(p=0.1),
nn.Linear(st_encoder_dim, st_vocab_size),
nn.LogSoftmax(dim=-1),
)
def forward_encoder(
self, x: torch.Tensor, x_lens: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute encoder outputs.
Args:
x:
A 3-D tensor of shape (N, T, C).
x_lens:
A 1-D tensor of shape (N,). It contains the number of frames in `x`
before padding.
Returns:
encoder_out:
Encoder output, of shape (N, T, C).
encoder_out_lens:
Encoder output lengths, of shape (N,).
"""
# logging.info(f"Memory allocated at entry: {torch.cuda.memory_allocated() // 1000000}M")
x, x_lens = self.encoder_embed(x, x_lens)
# logging.info(f"Memory allocated after encoder_embed: {torch.cuda.memory_allocated() // 1000000}M")
src_key_padding_mask = make_pad_mask(x_lens)
x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C)
encoder_out, encoder_out_lens, st_input = self.encoder(x, x_lens, src_key_padding_mask)
if self.st_encoder is not None:
st_src_key_padding_mask = make_pad_mask(encoder_out_lens)
st_encoder_out, st_encoder_out_lens = self.st_encoder(
st_input, x_lens, src_key_padding_mask
)
st_encoder_out = st_encoder_out.permute(1, 0, 2) # (T, N, C) ->(N, T, C)
else:
st_encoder_out_lens = None
st_encoder_out = None
encoder_out = encoder_out.permute(1, 0, 2) # (T, N, C) ->(N, T, C)
assert torch.all(encoder_out_lens > 0), (x_lens, encoder_out_lens)
return encoder_out, encoder_out_lens, st_encoder_out, st_encoder_out_lens
def forward_st_ctc(
self,
st_encoder_out: torch.Tensor,
st_encoder_out_lens: torch.Tensor,
targets: torch.Tensor,
target_lengths: torch.Tensor,
) -> torch.Tensor:
"""Compute CTC loss.
Args:
encoder_out:
Encoder output, of shape (N, T, C).
encoder_out_lens:
Encoder output lengths, of shape (N,).
targets:
Target Tensor of shape (sum(target_lengths)). The targets are assumed
to be un-padded and concatenated within 1 dimension.
"""
# Compute CTC log-prob
ctc_output = self.st_ctc_output(st_encoder_out) # (N, T, C)
ctc_loss = torch.nn.functional.ctc_loss(
log_probs=ctc_output.permute(1, 0, 2), # (T, N, C)
targets=targets,
input_lengths=st_encoder_out_lens,
target_lengths=target_lengths,
reduction="sum",
)
return ctc_loss
def forward_ctc(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
targets: torch.Tensor,
target_lengths: torch.Tensor,
) -> torch.Tensor:
"""Compute CTC loss.
Args:
encoder_out:
Encoder output, of shape (N, T, C).
encoder_out_lens:
Encoder output lengths, of shape (N,).
targets:
Target Tensor of shape (sum(target_lengths)). The targets are assumed
to be un-padded and concatenated within 1 dimension.
"""
# Compute CTC log-prob
ctc_output = self.ctc_output(encoder_out) # (N, T, C)
ctc_loss = torch.nn.functional.ctc_loss(
log_probs=ctc_output.permute(1, 0, 2), # (T, N, C)
targets=targets,
input_lengths=encoder_out_lens,
target_lengths=target_lengths,
reduction="sum",
)
return ctc_loss
def forward_cr_ctc(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
targets: torch.Tensor,
target_lengths: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute CTC loss with consistency regularization loss.
Args:
encoder_out:
Encoder output, of shape (2 * N, T, C).
encoder_out_lens:
Encoder output lengths, of shape (2 * N,).
targets:
Target Tensor of shape (2 * sum(target_lengths)). The targets are assumed
to be un-padded and concatenated within 1 dimension.
"""
# Compute CTC loss
ctc_output = self.ctc_output(encoder_out) # (2 * N, T, C)
ctc_loss = torch.nn.functional.ctc_loss(
log_probs=ctc_output.permute(1, 0, 2), # (T, 2 * N, C)
targets=targets.cpu(),
input_lengths=encoder_out_lens.cpu(),
target_lengths=target_lengths.cpu(),
reduction="none",
)
ctc_loss_is_finite = torch.isfinite(ctc_loss)
ctc_loss = ctc_loss[ctc_loss_is_finite]
ctc_loss = ctc_loss.sum()
# Compute consistency regularization loss
exchanged_targets = ctc_output.detach().chunk(2, dim=0)
exchanged_targets = torch.cat(
[exchanged_targets[1], exchanged_targets[0]], dim=0
) # exchange: [x1, x2] -> [x2, x1]
cr_loss = nn.functional.kl_div(
input=ctc_output,
target=exchanged_targets,
reduction="none",
log_target=True,
) # (2 * N, T, C)
length_mask = make_pad_mask(encoder_out_lens).unsqueeze(-1)
cr_loss = cr_loss.masked_fill(length_mask, 0.0).sum()
return ctc_loss, cr_loss
def forward_st_cr_ctc(
self,
st_encoder_out: torch.Tensor,
st_encoder_out_lens: torch.Tensor,
st_targets: torch.Tensor,
st_target_lengths: torch.Tensor,
# encoder_out: torch.Tensor,
# encoder_out_lens: torch.Tensor,
# targets: torch.Tensor,
# target_lengths: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute CTC loss with consistency regularization loss.
Args:
encoder_out:
Encoder output, of shape (2 * N, T, C).
encoder_out_lens:
Encoder output lengths, of shape (2 * N,).
targets:
Target Tensor of shape (2 * sum(target_lengths)). The targets are assumed
to be un-padded and concatenated within 1 dimension.
"""
# Compute CTC loss
st_ctc_output = self.st_ctc_output(st_encoder_out) # (2 * N, T, C)
st_ctc_loss = torch.nn.functional.ctc_loss(
log_probs=st_ctc_output.permute(1, 0, 2), # (T, 2 * N, C)
targets=st_targets.cpu(),
input_lengths=st_encoder_out_lens.cpu(),
target_lengths=st_target_lengths.cpu(),
reduction="none",
)
st_ctc_loss_is_finite = torch.isfinite(st_ctc_loss)
st_ctc_loss = st_ctc_loss[st_ctc_loss_is_finite]
st_ctc_loss = st_ctc_loss.sum()
# ctc_output = self.ctc_output(encoder_out) # (2 * N, T, C)
# ctc_loss = torch.nn.functional.ctc_loss(
# log_probs=ctc_output.permute(1, 0, 2), # (T, 2 * N, C)
# targets=targets.cpu(),
# input_lengths=encoder_out_lens.cpu(),
# target_lengths=target_lengths.cpu(),
# reduction="sum",
# )
# if not torch.isfinite(st_ctc_loss):
# breakpoint()
# Compute consistency regularization loss
exchanged_targets = st_ctc_output.detach().chunk(2, dim=0)
exchanged_targets = torch.cat(
[exchanged_targets[1], exchanged_targets[0]], dim=0
) # exchange: [x1, x2] -> [x2, x1]
cr_loss = nn.functional.kl_div(
input=st_ctc_output,
target=exchanged_targets,
reduction="none",
log_target=True,
) # (2 * N, T, C)
length_mask = make_pad_mask(st_encoder_out_lens).unsqueeze(-1)
cr_loss = cr_loss.masked_fill(length_mask, 0.0).sum()
return st_ctc_loss, cr_loss
def forward_st_transducer(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
st_encoder_out: torch.Tensor,
st_encoder_out_lens: torch.Tensor,
y: k2.RaggedTensor,
y_lens: torch.Tensor,
st_y: k2.RaggedTensor,
st_y_lens: torch.Tensor,
prune_range: int = 5,
st_prune_range: int = 10,
am_scale: float = 0.0,
lm_scale: float = 0.0,
) -> Union[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor, Tensor]]:
"""Compute Transducer loss.
Args:
encoder_out:
Encoder output, of shape (N, T, C).
encoder_out_lens:
Encoder output lengths, of shape (N,).
y:
A ragged tensor with 2 axes [utt][label]. It contains labels of each
utterance.
prune_range:
The prune range for rnnt loss, it means how many symbols(context)
we are considering for each frame to compute the loss.
am_scale:
The scale to smooth the loss with am (output of encoder network)
part
lm_scale:
The scale to smooth the loss with lm (output of predictor network)
part
"""
# Now for the decoder, i.e., the prediction network
blank_id = self.decoder.blank_id
st_blank_id = self.st_decoder.blank_id
sos_y = add_sos(y, sos_id=blank_id)
st_sos_y = add_sos(st_y, sos_id=st_blank_id)
# sos_y_padded: [B, S + 1], start with SOS.
sos_y_padded = sos_y.pad(mode="constant", padding_value=blank_id)
st_sos_y_padded = st_sos_y.pad(mode="constant", padding_value=st_blank_id)
# decoder_out: [B, S + 1, decoder_dim]
decoder_out = self.decoder(sos_y_padded)
if self.use_lstm_pred:
st_decoder_out, _ = self.st_decoder(st_sos_y_padded)
else:
st_decoder_out = self.st_decoder(st_sos_y_padded)
# Note: y does not start with SOS
# y_padded : [B, S]
y_padded = y.pad(mode="constant", padding_value=0)
y_padded = y_padded.to(torch.int64)
boundary = torch.zeros(
(encoder_out.size(0), 4),
dtype=torch.int64,
device=encoder_out.device,
)
boundary[:, 2] = y_lens
boundary[:, 3] = encoder_out_lens
st_y_padded = st_y.pad(mode="constant", padding_value=0)
st_y_padded = st_y_padded.to(torch.int64)
st_boundary = torch.zeros(
(encoder_out.size(0), 4),
dtype=torch.int64,
device=encoder_out.device,
)
st_boundary[:, 2] = st_y_lens
st_boundary[:, 3] = st_encoder_out_lens
lm = self.simple_lm_proj(decoder_out)
am = self.simple_am_proj(encoder_out)
st_lm = self.simple_st_lm_proj(st_decoder_out)
st_am = self.simple_st_am_proj(st_encoder_out)
# if self.training and random.random() < 0.25:
# lm = penalize_abs_values_gt(lm, 100.0, 1.0e-04)
# if self.training and random.random() < 0.25:
# am = penalize_abs_values_gt(am, 30.0, 1.0e-04)
with torch.cuda.amp.autocast(enabled=False):
simple_loss, (px_grad, py_grad) = k2.rnnt_loss_smoothed(
lm=lm.float(),
am=am.float(),
symbols=y_padded,
termination_symbol=blank_id,
lm_only_scale=lm_scale,
am_only_scale=am_scale,
boundary=boundary,
reduction="sum",
return_grad=True,
)
st_simple_loss, (st_px_grad, st_py_grad) = k2.rnnt_loss_smoothed(
lm=st_lm.float(),
am=st_am.float(),
symbols=st_y_padded,
termination_symbol=st_blank_id,
lm_only_scale=lm_scale,
am_only_scale=am_scale,
boundary=st_boundary,
reduction="sum",
return_grad=True,
)
# am_pruned : [B, T, prune_range, encoder_dim]
# lm_pruned : [B, T, prune_range, decoder_dim]
# ranges : [B, T, prune_range]
ranges = k2.get_rnnt_prune_ranges(
px_grad=px_grad,
py_grad=py_grad,
boundary=boundary,
s_range=prune_range,
)
am_pruned, lm_pruned = k2.do_rnnt_pruning(
am=self.joiner.encoder_proj(encoder_out),
lm=self.joiner.decoder_proj(decoder_out),
ranges=ranges,
)
# project_input=False since we applied the decoder's input projections
# prior to do_rnnt_pruning (this is an optimization for speed).
logits = self.joiner(am_pruned, lm_pruned, project_input=False)
with torch.cuda.amp.autocast(enabled=False):
pruned_loss = k2.rnnt_loss_pruned(
logits=logits.float(),
symbols=y_padded,
ranges=ranges,
termination_symbol=blank_id,
boundary=boundary,
reduction="sum",
use_hat_loss=self.use_hat,
)
# logits : [B, T, prune_range, vocab_size]
st_ranges = k2.get_rnnt_prune_ranges(
px_grad=st_px_grad,
py_grad=st_py_grad,
boundary=st_boundary,
s_range=st_prune_range,
)
st_am_pruned, st_lm_pruned = k2.do_rnnt_pruning(
am=self.st_joiner.encoder_proj(st_encoder_out),
lm=self.st_joiner.decoder_proj(st_decoder_out),
ranges=st_ranges,
)
st_logits = self.st_joiner(st_am_pruned, st_lm_pruned, project_input=False)
# Compute HAT loss for st
with torch.cuda.amp.autocast(enabled=False):
pruned_st_loss = k2.rnnt_loss_pruned(
logits=st_logits.float(),
symbols=st_y.pad(mode="constant", padding_value=blank_id).to(torch.int64),
ranges=st_ranges,
termination_symbol=st_blank_id,
boundary=st_boundary,
reduction="sum",
use_hat_loss=self.use_hat,
)
return simple_loss, st_simple_loss, pruned_loss, pruned_st_loss
def forward_transducer(
self,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
y: k2.RaggedTensor,
y_lens: torch.Tensor,
prune_range: int = 5,
am_scale: float = 0.0,
lm_scale: float = 0.0,
) -> Union[Tuple[Tensor, Tensor], Tuple[Tensor, Tensor, Tensor]]:
"""Compute Transducer loss.
Args:
encoder_out:
Encoder output, of shape (N, T, C).
encoder_out_lens:
Encoder output lengths, of shape (N,).
y:
A ragged tensor with 2 axes [utt][label]. It contains labels of each
utterance.
prune_range:
The prune range for rnnt loss, it means how many symbols(context)
we are considering for each frame to compute the loss.
am_scale:
The scale to smooth the loss with am (output of encoder network)
part
lm_scale:
The scale to smooth the loss with lm (output of predictor network)
part
"""
# Now for the decoder, i.e., the prediction network
blank_id = self.decoder.blank_id
sos_y = add_sos(y, sos_id=blank_id)
# sos_y_padded: [B, S + 1], start with SOS.
sos_y_padded = sos_y.pad(mode="constant", padding_value=blank_id)
# decoder_out: [B, S + 1, decoder_dim]
decoder_out = self.decoder(sos_y_padded)
# Note: y does not start with SOS
# y_padded : [B, S]
y_padded = y.pad(mode="constant", padding_value=0)
y_padded = y_padded.to(torch.int64)
boundary = torch.zeros(
(encoder_out.size(0), 4),
dtype=torch.int64,
device=encoder_out.device,
)
boundary[:, 2] = y_lens
boundary[:, 3] = encoder_out_lens
lm = self.simple_lm_proj(decoder_out)
am = self.simple_am_proj(encoder_out)
# if self.training and random.random() < 0.25:
# lm = penalize_abs_values_gt(lm, 100.0, 1.0e-04)
# if self.training and random.random() < 0.25:
# am = penalize_abs_values_gt(am, 30.0, 1.0e-04)
with torch.cuda.amp.autocast(enabled=False):
simple_loss, (px_grad, py_grad) = k2.rnnt_loss_smoothed(
lm=lm.float(),
am=am.float(),
symbols=y_padded,
termination_symbol=blank_id,
lm_only_scale=lm_scale,
am_only_scale=am_scale,
boundary=boundary,
reduction="sum",
return_grad=True,
)
# am_pruned : [B, T, prune_range, encoder_dim]
# lm_pruned : [B, T, prune_range, decoder_dim]
# ranges : [B, T, prune_range]
ranges = k2.get_rnnt_prune_ranges(
px_grad=px_grad,
py_grad=py_grad,
boundary=boundary,
s_range=prune_range,
)
am_pruned, lm_pruned = k2.do_rnnt_pruning(
am=self.joiner.encoder_proj(encoder_out),
lm=self.joiner.decoder_proj(decoder_out),
ranges=ranges,
)
# project_input=False since we applied the decoder's input projections
# prior to do_rnnt_pruning (this is an optimization for speed).
logits = self.joiner(am_pruned, lm_pruned, project_input=False)
with torch.cuda.amp.autocast(enabled=False):
pruned_loss = k2.rnnt_loss_pruned(
logits=logits.float(),
symbols=y_padded,
ranges=ranges,
termination_symbol=blank_id,
boundary=boundary,
reduction="sum",
use_hat_loss=self.use_hat,
)
# logits : [B, T, prune_range, vocab_size]
return simple_loss, pruned_loss
def forward(
self,
x: torch.Tensor,
x_lens: torch.Tensor,
y: k2.RaggedTensor,
st_y: k2.RaggedTensor,
prune_range: int = 5,
st_prune_range: int =10,
am_scale: float = 0.0,
lm_scale: float = 0.0,
use_st_cr_ctc: bool = False,
use_asr_cr_ctc: bool = False,
use_spec_aug: bool = False,
spec_augment: Optional[SpecAugment] = None,
supervision_segments: Optional[torch.Tensor] = None,
time_warp_factor: Optional[int] = 80,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Args:
x:
A 3-D tensor of shape (N, T, C).
x_lens:
A 1-D tensor of shape (N,). It contains the number of frames in `x`
before padding.
y:
A ragged tensor with 2 axes [utt][label]. It contains labels of each
utterance.
prune_range:
The prune range for rnnt loss, it means how many symbols(context)
we are considering for each frame to compute the loss.
am_scale:
The scale to smooth the loss with am (output of encoder network)
part
lm_scale:
The scale to smooth the loss with lm (output of predictor network)
part
use_cr_ctc:
Whether use consistency-regularized CTC.
use_spec_aug:
Whether apply spec-augment manually, used only if use_cr_ctc is True.
spec_augment:
The SpecAugment instance that returns time masks,
used only if use_cr_ctc is True.
supervision_segments:
An int tensor of shape ``(S, 3)``. ``S`` is the number of
supervision segments that exist in ``features``.
Used only if use_cr_ctc is True.
time_warp_factor:
Parameter for the time warping; larger values mean more warping.
Set to ``None``, or less than ``1``, to disable.
Used only if use_cr_ctc is True.
Returns:
Return the transducer losses and CTC loss,
in form of (simple_loss, pruned_loss, ctc_loss)
Note:
Regarding am_scale & lm_scale, it will make the loss-function one of
the form:
lm_scale * lm_probs + am_scale * am_probs +
(1-lm_scale-am_scale) * combined_probs
"""
assert x.ndim == 3, x.shape
assert x_lens.ndim == 1, x_lens.shape
assert y.num_axes == 2, y.num_axes
assert st_y.num_axes == 2, st_y.num_axes
assert x.size(0) == x_lens.size(0) == y.dim0, (x.shape, x_lens.shape, y.dim0)
device = x.device
if use_st_cr_ctc or use_asr_cr_ctc:
assert self.use_ctc or self.use_st_ctc
if use_spec_aug:
assert spec_augment is not None and spec_augment.time_warp_factor < 1
# Apply time warping before input duplicating
assert supervision_segments is not None
x = time_warp(
x,
time_warp_factor=time_warp_factor,
supervision_segments=supervision_segments,
)
# Independently apply frequency masking and time masking to the two copies
x = spec_augment(x.repeat(2, 1, 1))
else:
x = x.repeat(2, 1, 1)
x_lens = x_lens.repeat(2)
y = k2.ragged.cat([y, y], axis=0)
if self.st_joiner != None and self.use_st_ctc:
st_y = k2.ragged.cat([st_y, st_y], axis=0)
# Compute encoder outputs
encoder_out, encoder_out_lens, st_encoder_out, st_encoder_out_lens = self.forward_encoder(x, x_lens)
row_splits = y.shape.row_splits(1)
y_lens = row_splits[1:] - row_splits[:-1]
st_row_splits = st_y.shape.row_splits(1)
st_y_lens = st_row_splits[1:] - st_row_splits[:-1]
if self.use_transducer:
# Compute transducer loss
if self.st_joiner != None:
simple_loss, st_simple_loss, pruned_loss, st_pruned_loss = self.forward_st_transducer(
st_encoder_out=st_encoder_out,
st_encoder_out_lens=st_encoder_out_lens,
encoder_out=encoder_out,
encoder_out_lens=encoder_out_lens,
y=y.to(x.device),
y_lens=y_lens,
st_y=st_y.to(x.device),
st_y_lens=st_y_lens,
prune_range=st_prune_range,
am_scale=am_scale,
lm_scale=lm_scale,
)
if use_asr_cr_ctc:
simple_loss = simple_loss * 0.5
pruned_loss = pruned_loss * 0.5
if use_st_cr_ctc:
st_simple_loss = st_simple_loss * 0.5
st_pruned_loss = st_pruned_loss * 0.5
else:
simple_loss, pruned_loss = self.forward_transducer(
encoder_out=encoder_out,
encoder_out_lens=encoder_out_lens,
y=y.to(x.device),
y_lens=y_lens,
prune_range=prune_range,
am_scale=am_scale,
lm_scale=lm_scale,
)
if use_asr_cr_ctc:
simple_loss = simple_loss * 0.5
pruned_loss = pruned_loss * 0.5
st_simple_loss, st_pruned_loss = torch.empty(0), torch.empty(0)
else:
simple_loss = torch.empty(0)
pruned_loss = torch.empty(0)
if self.use_ctc:
# Compute CTC loss
targets = y.values
if not use_asr_cr_ctc:
ctc_loss = self.forward_ctc(
encoder_out=encoder_out,
encoder_out_lens=encoder_out_lens,
targets=targets,
target_lengths=y_lens,
)
cr_loss = torch.empty(0)
else:
ctc_loss, cr_loss = self.forward_cr_ctc(
encoder_out=encoder_out,
encoder_out_lens=encoder_out_lens,
targets=targets,
target_lengths=y_lens,
)
ctc_loss = ctc_loss * 0.5
cr_loss = cr_loss * 0.5
else:
ctc_loss = torch.empty(0)
cr_loss = torch.empty(0)
if self.use_st_ctc:
# Compute CTC loss
st_targets = st_y.values
if not use_st_cr_ctc:
st_ctc_loss = self.forward_st_ctc(
st_encoder_out=st_encoder_out,
st_encoder_out_lens=st_encoder_out_lens,
targets=st_targets,
target_lengths=st_y_lens,
)
st_cr_loss = torch.empty(0)
else:
st_ctc_loss, st_cr_loss = self.forward_st_cr_ctc(
st_encoder_out=st_encoder_out,
st_encoder_out_lens=st_encoder_out_lens,
st_targets=st_targets,
st_target_lengths=st_y_lens,
# encoder_out=encoder_out,
# encoder_out_lens=encoder_out_lens,
# targets=targets,
# target_lengths=y_lens,
)
st_ctc_loss = st_ctc_loss * 0.5
st_cr_loss = st_cr_loss * 0.5
else:
st_ctc_loss = torch.empty(0)
st_cr_loss = torch.empty(0)
return simple_loss, st_simple_loss, pruned_loss, st_pruned_loss, ctc_loss, st_ctc_loss, cr_loss, st_cr_loss

View File

@ -0,0 +1 @@
../zipformer_multijoiner_st/optim.py

View File

@ -0,0 +1 @@
../zipformer_multijoiner_st/profile.py

View File

@ -0,0 +1 @@
../zipformer_multijoiner_st/scaling.py

View File

@ -0,0 +1 @@
../zipformer_multijoiner_st/scaling_converter.py

View File

@ -0,0 +1,244 @@
# Copyright 2025 Johns Hopkins University (author: Amir Hussein)
# Copyright 2022 Xiaomi Corp. (authors: Wei Kang)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
import k2
import torch
import torch.nn as nn
from beam_search import Hypothesis, HypothesisList, get_hyps_shape
from decode_stream import DecodeStream
from icefall.decode import one_best_decoding
from icefall.utils import get_texts
def greedy_search_st(
model: nn.Module,
encoder_out: torch.Tensor,
encoder_out_st: torch.Tensor,
max_sym_per_frame: int,
streams: List[DecodeStream],
st_blank_penalty: float = 0.0,
) -> None:
"""Greedy search in batch mode. It hardcodes --max-sym-per-frame=1.
Args:
model:
The transducer model.
encoder_out:
Output from the encoder. Its shape is (N, T, C), where N >= 1.
streams:
A list of Stream objects.
"""
assert len(streams) == encoder_out_st.size(0)
assert encoder_out_st.ndim == 3
# ST
blank_id_st = model.st_decoder.blank_id
context_size_st = model.st_decoder.context_size
unk_id_st = getattr(model, "unk_id", blank_id_st)
device = model.device
T = encoder_out_st.size(1)
# ASR
blank_id = model.decoder.blank_id
context_size = model.decoder.context_size
unk_id = getattr(model, "unk_id", blank_id_st)
#ST
decoder_input_st = torch.tensor(
[stream.hyp_st[-context_size_st:] for stream in streams],
device=device,
dtype=torch.int64,
)
# decoder_out is of shape (N, 1, decoder_out_dim)
decoder_out_st = model.st_decoder(decoder_input_st, need_pad=False)
decoder_out_st = model.st_joiner.decoder_proj(decoder_out_st)
# ASR
decoder_input = torch.tensor(
[stream.hyp_asr[-context_size:] for stream in streams],
device=device,
dtype=torch.int64,
)
decoder_out = model.decoder(decoder_input, need_pad=False)
decoder_out = model.joiner.decoder_proj(decoder_out)
# Maximum symbols per utterance.
max_sym_per_utt = 10000
# symbols per frame
sym_per_frame = 0
# symbols per utterance decoded so far
sym_per_utt = 0
t = 0
# for t in range(T):
while t < T and sym_per_utt < max_sym_per_utt:
if sym_per_frame >= max_sym_per_frame:
sym_per_frame = 0
t += 1
continue
# current_encoder_out's shape: (batch_size, 1, encoder_out_dim)
# current_encoder_out_st = encoder_out_st[:, t : t + 1, :] # noqa
current_encoder_out_st = encoder_out_st[:, t : t + 1, :].unsqueeze(2)
st_logits = model.st_joiner(
current_encoder_out_st,
decoder_out_st.unsqueeze(1),
project_input=False,
)
# logits'shape (batch_size, vocab_size)
st_logits = st_logits.squeeze(1).squeeze(1)
if st_blank_penalty != 0.0:
st_logits[:, 0] -= st_blank_penalty
assert st_logits.ndim == 2, st_logits.shape
y_st = st_logits.argmax(dim=1).tolist()
for i, v in enumerate(y_st):
if v not in (blank_id_st, unk_id_st):
streams[i].hyp_st.append(v)
# update decoder output
# decoder_input_st = torch.tensor(
# [stream.hyp_st[-context_size_st:].reshape(
# 1, context_size_st) for stream in streams],
# device=device,
# dtype=torch.int64,
# )
decoder_input_st = torch.stack([
torch.tensor(stream.hyp_st[-context_size_st:], device=device, dtype=torch.int64)
for stream in streams]).reshape(len(streams), context_size_st)
decoder_out_st = model.st_decoder(
decoder_input_st,
need_pad=False,
)
decoder_out_st = model.st_joiner.decoder_proj(decoder_out_st)
sym_per_utt += 1
sym_per_frame += 1
else:
sym_per_frame = 0
t += 1
def modified_beam_search(
model: nn.Module,
encoder_out: torch.Tensor,
streams: List[DecodeStream],
num_active_paths: int = 4,
blank_penalty: float = 0.0,
) -> None:
"""Beam search in batch mode with --max-sym-per-frame=1 being hardcoded.
Args:
model:
The RNN-T model.
encoder_out:
A 3-D tensor of shape (N, T, encoder_out_dim) containing the output of
the encoder model.
streams:
A list of stream objects.
num_active_paths:
Number of active paths during the beam search.
"""
assert encoder_out.ndim == 3, encoder_out.shape
assert len(streams) == encoder_out.size(0)
blank_id = model.decoder.blank_id
context_size = model.decoder.context_size
device = next(model.parameters()).device
batch_size = len(streams)
T = encoder_out.size(1)
B = [stream.hyps for stream in streams]
for t in range(T):
current_encoder_out = encoder_out[:, t].unsqueeze(1).unsqueeze(1)
# current_encoder_out's shape: (batch_size, 1, 1, encoder_out_dim)
hyps_shape = get_hyps_shape(B).to(device)
A = [list(b) for b in B]
B = [HypothesisList() for _ in range(batch_size)]
ys_log_probs = torch.stack(
[hyp.log_prob.reshape(1) for hyps in A for hyp in hyps], dim=0
) # (num_hyps, 1)
decoder_input = torch.tensor(
[hyp.ys[-context_size:] for hyps in A for hyp in hyps],
device=device,
dtype=torch.int64,
) # (num_hyps, context_size)
decoder_out = model.decoder(decoder_input, need_pad=False).unsqueeze(1)
decoder_out = model.joiner.decoder_proj(decoder_out)
# decoder_out is of shape (num_hyps, 1, 1, decoder_output_dim)
# Note: For torch 1.7.1 and below, it requires a torch.int64 tensor
# as index, so we use `to(torch.int64)` below.
current_encoder_out = torch.index_select(
current_encoder_out,
dim=0,
index=hyps_shape.row_ids(1).to(torch.int64),
) # (num_hyps, encoder_out_dim)
logits = model.joiner(current_encoder_out, decoder_out, project_input=False)
# logits is of shape (num_hyps, 1, 1, vocab_size)
logits = logits.squeeze(1).squeeze(1)
if blank_penalty != 0.0:
logits[:, 0] -= blank_penalty
log_probs = logits.log_softmax(dim=-1) # (num_hyps, vocab_size)
log_probs.add_(ys_log_probs)
vocab_size = log_probs.size(-1)
log_probs = log_probs.reshape(-1)
row_splits = hyps_shape.row_splits(1) * vocab_size
log_probs_shape = k2.ragged.create_ragged_shape2(
row_splits=row_splits, cached_tot_size=log_probs.numel()
)
ragged_log_probs = k2.RaggedTensor(shape=log_probs_shape, value=log_probs)
for i in range(batch_size):
topk_log_probs, topk_indexes = ragged_log_probs[i].topk(num_active_paths)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
topk_hyp_indexes = (topk_indexes // vocab_size).tolist()
topk_token_indexes = (topk_indexes % vocab_size).tolist()
for k in range(len(topk_hyp_indexes)):
hyp_idx = topk_hyp_indexes[k]
hyp = A[i][hyp_idx]
new_ys = hyp.ys[:]
new_token = topk_token_indexes[k]
if new_token != blank_id:
new_ys.append(new_token)
new_log_prob = topk_log_probs[k]
new_hyp = Hypothesis(ys=new_ys, log_prob=new_log_prob)
B[i].add(new_hyp)
for i in range(batch_size):
streams[i].hyps = B[i]

View File

@ -0,0 +1 @@
../zipformer_multijoiner_st/subsampling.py

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff