mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-16 04:32:19 +00:00
Replace with autocast(...)
with with autocast("cuda", ...)
This commit is contained in:
parent
2d9825aa29
commit
30ba83a7b2
@ -148,7 +148,7 @@ class Encodec(nn.Module):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# calculate losses
|
# calculate losses
|
||||||
with autocast(enabled=False):
|
with autocast("cuda", enabled=False):
|
||||||
gen_stft_adv_loss = self.generator_adversarial_loss(outputs=y_hat)
|
gen_stft_adv_loss = self.generator_adversarial_loss(outputs=y_hat)
|
||||||
|
|
||||||
if self.multi_period_discriminator is not None:
|
if self.multi_period_discriminator is not None:
|
||||||
@ -272,7 +272,7 @@ class Encodec(nn.Module):
|
|||||||
speech_hat.contiguous().detach(),
|
speech_hat.contiguous().detach(),
|
||||||
)
|
)
|
||||||
# calculate losses
|
# calculate losses
|
||||||
with autocast(enabled=False):
|
with autocast("cuda", enabled=False):
|
||||||
(
|
(
|
||||||
disc_stft_real_adv_loss,
|
disc_stft_real_adv_loss,
|
||||||
disc_stft_fake_adv_loss,
|
disc_stft_fake_adv_loss,
|
||||||
|
@ -466,7 +466,7 @@ def train_one_epoch(
|
|||||||
loss_info["samples"] = batch_size
|
loss_info["samples"] = batch_size
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
d_weight = train_discriminator(
|
d_weight = train_discriminator(
|
||||||
params.lambda_adv,
|
params.lambda_adv,
|
||||||
params.cur_epoch,
|
params.cur_epoch,
|
||||||
@ -502,7 +502,7 @@ def train_one_epoch(
|
|||||||
scaler.scale(disc_loss).backward()
|
scaler.scale(disc_loss).backward()
|
||||||
scaler.step(optimizer_d)
|
scaler.step(optimizer_d)
|
||||||
|
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
g_weight = train_discriminator(
|
g_weight = train_discriminator(
|
||||||
params.lambda_adv,
|
params.lambda_adv,
|
||||||
params.cur_epoch,
|
params.cur_epoch,
|
||||||
@ -846,7 +846,7 @@ def scan_pessimistic_batches_for_oom(
|
|||||||
) = prepare_input(params, batch, device)
|
) = prepare_input(params, batch, device)
|
||||||
try:
|
try:
|
||||||
# for discriminator
|
# for discriminator
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
(
|
(
|
||||||
disc_stft_real_adv_loss,
|
disc_stft_real_adv_loss,
|
||||||
disc_stft_fake_adv_loss,
|
disc_stft_fake_adv_loss,
|
||||||
@ -876,7 +876,7 @@ def scan_pessimistic_batches_for_oom(
|
|||||||
optimizer_d.zero_grad()
|
optimizer_d.zero_grad()
|
||||||
loss_d.backward()
|
loss_d.backward()
|
||||||
# for generator
|
# for generator
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
(
|
(
|
||||||
commit_loss,
|
commit_loss,
|
||||||
gen_stft_adv_loss,
|
gen_stft_adv_loss,
|
||||||
|
@ -456,7 +456,7 @@ def train_one_epoch(
|
|||||||
loss_info["samples"] = batch_size
|
loss_info["samples"] = batch_size
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
# forward discriminator
|
# forward discriminator
|
||||||
loss_d, stats_d = model(
|
loss_d, stats_d = model(
|
||||||
text=tokens,
|
text=tokens,
|
||||||
@ -475,7 +475,7 @@ def train_one_epoch(
|
|||||||
scaler.scale(loss_d).backward()
|
scaler.scale(loss_d).backward()
|
||||||
scaler.step(optimizer_d)
|
scaler.step(optimizer_d)
|
||||||
|
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
# forward generator
|
# forward generator
|
||||||
loss_g, stats_g = model(
|
loss_g, stats_g = model(
|
||||||
text=tokens,
|
text=tokens,
|
||||||
@ -748,7 +748,7 @@ def scan_pessimistic_batches_for_oom(
|
|||||||
) = prepare_input(batch, tokenizer, device, train_speaker_map)
|
) = prepare_input(batch, tokenizer, device, train_speaker_map)
|
||||||
try:
|
try:
|
||||||
# for discriminator
|
# for discriminator
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
loss_d, stats_d = model(
|
loss_d, stats_d = model(
|
||||||
text=tokens,
|
text=tokens,
|
||||||
text_lengths=tokens_lens,
|
text_lengths=tokens_lens,
|
||||||
@ -762,7 +762,7 @@ def scan_pessimistic_batches_for_oom(
|
|||||||
optimizer_d.zero_grad()
|
optimizer_d.zero_grad()
|
||||||
loss_d.backward()
|
loss_d.backward()
|
||||||
# for generator
|
# for generator
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
loss_g, stats_g = model(
|
loss_g, stats_g = model(
|
||||||
text=tokens,
|
text=tokens,
|
||||||
text_lengths=tokens_lens,
|
text_lengths=tokens_lens,
|
||||||
|
@ -479,7 +479,7 @@ def train_one_epoch(
|
|||||||
tokens_lens,
|
tokens_lens,
|
||||||
) = prepare_input(batch, tokenizer, device, params)
|
) = prepare_input(batch, tokenizer, device, params)
|
||||||
try:
|
try:
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
losses = get_losses(
|
losses = get_losses(
|
||||||
{
|
{
|
||||||
"x": tokens,
|
"x": tokens,
|
||||||
|
@ -396,7 +396,7 @@ def train_one_epoch(
|
|||||||
loss_info["samples"] = batch_size
|
loss_info["samples"] = batch_size
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
# forward discriminator
|
# forward discriminator
|
||||||
loss_d, stats_d = model(
|
loss_d, stats_d = model(
|
||||||
text=tokens,
|
text=tokens,
|
||||||
@ -414,7 +414,7 @@ def train_one_epoch(
|
|||||||
scaler.scale(loss_d).backward()
|
scaler.scale(loss_d).backward()
|
||||||
scaler.step(optimizer_d)
|
scaler.step(optimizer_d)
|
||||||
|
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
# forward generator
|
# forward generator
|
||||||
loss_g, stats_g = model(
|
loss_g, stats_g = model(
|
||||||
text=tokens,
|
text=tokens,
|
||||||
@ -673,7 +673,7 @@ def scan_pessimistic_batches_for_oom(
|
|||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
# for discriminator
|
# for discriminator
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
loss_d, stats_d = model(
|
loss_d, stats_d = model(
|
||||||
text=tokens,
|
text=tokens,
|
||||||
text_lengths=tokens_lens,
|
text_lengths=tokens_lens,
|
||||||
@ -686,7 +686,7 @@ def scan_pessimistic_batches_for_oom(
|
|||||||
optimizer_d.zero_grad()
|
optimizer_d.zero_grad()
|
||||||
loss_d.backward()
|
loss_d.backward()
|
||||||
# for generator
|
# for generator
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
loss_g, stats_g = model(
|
loss_g, stats_g = model(
|
||||||
text=tokens,
|
text=tokens,
|
||||||
text_lengths=tokens_lens,
|
text_lengths=tokens_lens,
|
||||||
|
@ -410,7 +410,7 @@ class VITS(nn.Module):
|
|||||||
p = self.discriminator(speech_)
|
p = self.discriminator(speech_)
|
||||||
|
|
||||||
# calculate losses
|
# calculate losses
|
||||||
with autocast(enabled=False):
|
with autocast("cuda", enabled=False):
|
||||||
if not return_sample:
|
if not return_sample:
|
||||||
mel_loss = self.mel_loss(speech_hat_, speech_)
|
mel_loss = self.mel_loss(speech_hat_, speech_)
|
||||||
else:
|
else:
|
||||||
@ -518,7 +518,7 @@ class VITS(nn.Module):
|
|||||||
p = self.discriminator(speech_)
|
p = self.discriminator(speech_)
|
||||||
|
|
||||||
# calculate losses
|
# calculate losses
|
||||||
with autocast(enabled=False):
|
with autocast("cuda", enabled=False):
|
||||||
real_loss, fake_loss = self.discriminator_adv_loss(p_hat, p)
|
real_loss, fake_loss = self.discriminator_adv_loss(p_hat, p)
|
||||||
loss = real_loss + fake_loss
|
loss = real_loss + fake_loss
|
||||||
|
|
||||||
|
@ -448,7 +448,7 @@ def train_one_epoch(
|
|||||||
loss_info["samples"] = batch_size
|
loss_info["samples"] = batch_size
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
# forward discriminator
|
# forward discriminator
|
||||||
loss_d, stats_d = model(
|
loss_d, stats_d = model(
|
||||||
text=tokens,
|
text=tokens,
|
||||||
@ -467,7 +467,7 @@ def train_one_epoch(
|
|||||||
scaler.scale(loss_d).backward()
|
scaler.scale(loss_d).backward()
|
||||||
scaler.step(optimizer_d)
|
scaler.step(optimizer_d)
|
||||||
|
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
# forward generator
|
# forward generator
|
||||||
loss_g, stats_g = model(
|
loss_g, stats_g = model(
|
||||||
text=tokens,
|
text=tokens,
|
||||||
@ -740,7 +740,7 @@ def scan_pessimistic_batches_for_oom(
|
|||||||
) = prepare_input(batch, tokenizer, device, speaker_map)
|
) = prepare_input(batch, tokenizer, device, speaker_map)
|
||||||
try:
|
try:
|
||||||
# for discriminator
|
# for discriminator
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
loss_d, stats_d = model(
|
loss_d, stats_d = model(
|
||||||
text=tokens,
|
text=tokens,
|
||||||
text_lengths=tokens_lens,
|
text_lengths=tokens_lens,
|
||||||
@ -754,7 +754,7 @@ def scan_pessimistic_batches_for_oom(
|
|||||||
optimizer_d.zero_grad()
|
optimizer_d.zero_grad()
|
||||||
loss_d.backward()
|
loss_d.backward()
|
||||||
# for generator
|
# for generator
|
||||||
with autocast(enabled=params.use_fp16):
|
with autocast("cuda", enabled=params.use_fp16):
|
||||||
loss_g, stats_g = model(
|
loss_g, stats_g = model(
|
||||||
text=tokens,
|
text=tokens,
|
||||||
text_lengths=tokens_lens,
|
text_lengths=tokens_lens,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user