add stats for other recipes

This commit is contained in:
yaozengwei 2022-07-24 19:41:09 +08:00
parent 2daf4fec4c
commit fdb8371c8c
6 changed files with 48 additions and 0 deletions

View File

@ -686,6 +686,14 @@ def compute_loss(
(feature_lens // params.subsampling_factor).sum().item()
)
info["utterances"] = feature.size(0)
# `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa
info["utt_duration"] = feature_lens.sum().item()
# padding proportion of each utterance
info["utt_pad_proportion"] = (
((feature.size(1) - feature_lens) / feature.size(1)).sum().item()
)
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
info["simple_loss"] = simple_loss.detach().cpu().item()

View File

@ -504,6 +504,14 @@ def compute_loss(
(feature_lens // params.subsampling_factor).sum().item()
)
info["utterances"] = feature.size(0)
# `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa
info["utt_duration"] = feature_lens.sum().item()
# padding proportion of each utterance
info["utt_pad_proportion"] = (
((feature.size(1) - feature_lens) / feature.size(1)).sum().item()
)
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
info["simple_loss"] = simple_loss.detach().cpu().item()

View File

@ -573,6 +573,14 @@ def compute_loss(
(feature_lens // params.subsampling_factor).sum().item()
)
info["utterances"] = feature.size(0)
# `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa
info["utt_duration"] = feature_lens.sum().item()
# padding proportion of each utterance
info["utt_pad_proportion"] = (
((feature.size(1) - feature_lens) / feature.size(1)).sum().item()
)
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
info["simple_loss"] = simple_loss.detach().cpu().item()

View File

@ -612,6 +612,14 @@ def compute_loss(
(feature_lens // params.subsampling_factor).sum().item()
)
info["utterances"] = feature.size(0)
# `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa
info["utt_duration"] = feature_lens.sum().item()
# padding proportion of each utterance
info["utt_pad_proportion"] = (
((feature.size(1) - feature_lens) / feature.size(1)).sum().item()
)
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
info["simple_loss"] = simple_loss.detach().cpu().item()

View File

@ -644,6 +644,14 @@ def compute_loss(
(feature_lens // params.subsampling_factor).sum().item()
)
info["utterances"] = feature.size(0)
# `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa
info["utt_duration"] = feature_lens.sum().item()
# padding proportion of each utterance
info["utt_pad_proportion"] = (
((feature.size(1) - feature_lens) / feature.size(1)).sum().item()
)
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
info["simple_loss"] = simple_loss.detach().cpu().item()

View File

@ -657,6 +657,14 @@ def compute_loss(
(feature_lens // params.subsampling_factor).sum().item()
)
info["utterances"] = feature.size(0)
# `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa
info["utt_duration"] = feature_lens.sum().item()
# padding proportion of each utterance
info["utt_pad_proportion"] = (
((feature.size(1) - feature_lens) / feature.size(1)).sum().item()
)
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
info["simple_loss"] = simple_loss.detach().cpu().item()