fix about tensorboard (#516)

* fix metricstracker

* fix style
This commit is contained in:
Zengwei Yao 2022-08-04 19:57:12 +08:00 committed by GitHub
parent e538232485
commit a4dd273776
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 91 additions and 3 deletions

View File

@ -447,6 +447,17 @@ def compute_loss(
info["loss"] = loss.detach().cpu().item()
# `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa
info["utterances"] = feature.size(0)
# averaged input duration in frames over utterances
info["utt_duration"] = supervisions["num_frames"].sum().item()
# averaged padding proportion over utterances
info["utt_pad_proportion"] = (
((feature.size(1) - supervisions["num_frames"]) / feature.size(1))
.sum()
.item()
)
return loss, info

View File

@ -605,6 +605,15 @@ def compute_loss(
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
# `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa
info["utterances"] = feature.size(0)
# averaged input duration in frames over utterances
info["utt_duration"] = feature_lens.sum().item()
# averaged padding proportion over utterances
info["utt_pad_proportion"] = (
((feature.size(1) - feature_lens) / feature.size(1)).sum().item()
)
return loss, info

View File

@ -430,6 +430,17 @@ def compute_loss(
info["loss"] = loss.detach().cpu().item()
# `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa
info["utterances"] = feature.size(0)
# averaged input duration in frames over utterances
info["utt_duration"] = supervisions["num_frames"].sum().item()
# averaged padding proportion over utterances
info["utt_pad_proportion"] = (
((feature.size(1) - supervisions["num_frames"]) / feature.size(1))
.sum()
.item()
)
return loss, info

View File

@ -349,6 +349,17 @@ def compute_loss(
info["frames"] = supervision_segments[:, 2].sum().item()
info["loss"] = loss.detach().cpu().item()
# `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa
info["utterances"] = feature.size(0)
# averaged input duration in frames over utterances
info["utt_duration"] = supervisions["num_frames"].sum().item()
# averaged padding proportion over utterances
info["utt_pad_proportion"] = (
((feature.size(2) - supervisions["num_frames"]) / feature.size(2))
.sum()
.item()
)
return loss, info

View File

@ -403,6 +403,15 @@ def compute_loss(
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
# `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa
info["utterances"] = feature.size(0)
# averaged input duration in frames over utterances
info["utt_duration"] = feature_lens.sum().item()
# averaged padding proportion over utterances
info["utt_pad_proportion"] = (
((feature.size(1) - feature_lens) / feature.size(1)).sum().item()
)
return loss, info

View File

@ -407,6 +407,15 @@ def compute_loss(
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
# `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa
info["utterances"] = feature.size(0)
# averaged input duration in frames over utterances
info["utt_duration"] = feature_lens.sum().item()
# averaged padding proportion over utterances
info["utt_pad_proportion"] = (
((feature.size(1) - feature_lens) / feature.size(1)).sum().item()
)
return loss, info

View File

@ -429,6 +429,15 @@ def compute_loss(
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
# `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa
info["utterances"] = feature.size(0)
# averaged input duration in frames over utterances
info["utt_duration"] = feature_lens.sum().item()
# averaged padding proportion over utterances
info["utt_pad_proportion"] = (
((feature.size(1) - feature_lens) / feature.size(1)).sum().item()
)
return loss, info

View File

@ -417,6 +417,15 @@ def compute_loss(
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
# `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa
info["utterances"] = feature.size(0)
# averaged input duration in frames over utterances
info["utt_duration"] = feature_lens.sum().item()
# averaged padding proportion over utterances
info["utt_pad_proportion"] = (
((feature.size(1) - feature_lens) / feature.size(1)).sum().item()
)
return loss, info

View File

@ -476,6 +476,15 @@ def compute_loss(
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
# `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa
info["utterances"] = feature.size(0)
# averaged input duration in frames over utterances
info["utt_duration"] = feature_lens.sum().item()
# averaged padding proportion over utterances
info["utt_pad_proportion"] = (
((feature.size(1) - feature_lens) / feature.size(1)).sum().item()
)
return loss, info

View File

@ -544,9 +544,10 @@ class MetricsTracker(collections.defaultdict):
else:
raise ValueError(f"Unexpected key: {k}")
frames = "%.2f" % self["frames"]
ans_frames += "over " + str(frames) + " frames; "
utterances = "%.2f" % self["utterances"]
ans_utterances += "over " + str(utterances) + " utterances."
ans_frames += "over " + str(frames) + " frames. "
if ans_utterances != "":
utterances = "%.2f" % self["utterances"]
ans_utterances += "over " + str(utterances) + " utterances."
return ans_frames + ans_utterances