diff --git a/egs/ljspeech/TTS/matcha/models/components/decoder.py b/egs/ljspeech/TTS/matcha/models/components/decoder.py index 14d19f5d4..102d87713 100644 --- a/egs/ljspeech/TTS/matcha/models/components/decoder.py +++ b/egs/ljspeech/TTS/matcha/models/components/decoder.py @@ -7,7 +7,7 @@ import torch.nn.functional as F from conformer import ConformerBlock from diffusers.models.activations import get_activation from einops import pack, rearrange, repeat -from matcha.models.components.transformer import BasicTransformerBlock +from models.components.transformer import BasicTransformerBlock class SinusoidalPosEmb(torch.nn.Module): diff --git a/egs/ljspeech/TTS/matcha/models/components/flow_matching.py b/egs/ljspeech/TTS/matcha/models/components/flow_matching.py index 997689b1c..eb795ef32 100644 --- a/egs/ljspeech/TTS/matcha/models/components/flow_matching.py +++ b/egs/ljspeech/TTS/matcha/models/components/flow_matching.py @@ -2,7 +2,7 @@ from abc import ABC import torch import torch.nn.functional as F -from matcha.models.components.decoder import Decoder +from models.components.decoder import Decoder class BASECFM(torch.nn.Module, ABC): diff --git a/egs/ljspeech/TTS/matcha/models/components/text_encoder.py b/egs/ljspeech/TTS/matcha/models/components/text_encoder.py index ca77cba51..364ff1938 100644 --- a/egs/ljspeech/TTS/matcha/models/components/text_encoder.py +++ b/egs/ljspeech/TTS/matcha/models/components/text_encoder.py @@ -5,7 +5,7 @@ import math import torch import torch.nn as nn from einops import rearrange -from matcha.model import sequence_mask +from model import sequence_mask class LayerNorm(nn.Module):