This commit is contained in:
Yifan Yang 2024-07-11 14:45:35 +08:00 committed by GitHub
parent 785f3f0bcf
commit d65187ec52
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 6 additions and 5 deletions

View File

@ -636,8 +636,9 @@ class ChunkCausalDepthwiseConv1d(torch.nn.Module):
)
def forward(self, x: Tensor, chunk_size: int = -1) -> Tensor:
"""
Forward function. Args:
"""Forward function.
Args:
x: a Tensor of shape (batch_size, channels, seq_len)
chunk_size: the chunk size, in frames; does not have to divide seq_len exactly.
"""

View File

@ -406,7 +406,7 @@ def get_parser():
"--context-size",
type=int,
default=2,
help="The context size in the decoder. 1 means bigram; " "2 means tri-gram",
help="The context size in the decoder. 1 means bigram; 2 means tri-gram",
)
parser.add_argument(
@ -429,7 +429,7 @@ def get_parser():
"--am-scale",
type=float,
default=0.0,
help="The scale to smooth the loss with am (output of encoder network)" "part.",
help="The scale to smooth the loss with am (output of encoder network) part.",
)
parser.add_argument(
@ -848,7 +848,7 @@ def compute_loss(
True for training. False for validation. When it is True, this
function enables autograd during computation; when it is False, it
disables autograd.
warmup: a floating point value which increases throughout training;
warmup: a floating point value which increases throughout training;
values >= 1.0 are fully warmed up and have all modules present.
"""
device = model.device if isinstance(model, DDP) else next(model.parameters()).device