mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-09-06 23:54:17 +00:00
fix style
This commit is contained in:
parent
0030d1b766
commit
70343a81f4
2
.flake8
2
.flake8
@ -15,7 +15,7 @@ per-file-ignores =
|
|||||||
egs/librispeech/ASR/zipformer_mmi/*.py: E501, E203
|
egs/librispeech/ASR/zipformer_mmi/*.py: E501, E203
|
||||||
egs/librispeech/ASR/zipformer/*.py: E501, E203
|
egs/librispeech/ASR/zipformer/*.py: E501, E203
|
||||||
egs/librispeech/ASR/RESULTS.md: E999,
|
egs/librispeech/ASR/RESULTS.md: E999,
|
||||||
|
egs/ljspeech/TTS/vits/*.py: E501, E203
|
||||||
# invalid escape sequence (cause by tex formular), W605
|
# invalid escape sequence (cause by tex formular), W605
|
||||||
icefall/utils.py: E501, W605
|
icefall/utils.py: E501, W605
|
||||||
|
|
||||||
|
@ -103,4 +103,10 @@ You can test the exported ONNX model with:
|
|||||||
--model-filename vits/exp/vits-epoch-1000.onnx \
|
--model-filename vits/exp/vits-epoch-1000.onnx \
|
||||||
--tokens data/tokens.txt
|
--tokens data/tokens.txt
|
||||||
|
|
||||||
|
Download pretrained models
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
If you don't want to train from scratch, you can download the pretrained models
|
||||||
|
by visiting the following link:
|
||||||
|
|
||||||
|
- `<https://huggingface.co/Zengwei/icefall-tts-ljspeech-vits-2023-11-29>`_
|
||||||
|
@ -29,7 +29,13 @@ import os
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from lhotse import CutSet, Spectrogram, SpectrogramConfig, LilcomChunkyWriter, load_manifest
|
from lhotse import (
|
||||||
|
CutSet,
|
||||||
|
LilcomChunkyWriter,
|
||||||
|
Spectrogram,
|
||||||
|
SpectrogramConfig,
|
||||||
|
load_manifest,
|
||||||
|
)
|
||||||
from lhotse.audio import RecordingSet
|
from lhotse.audio import RecordingSet
|
||||||
from lhotse.supervision import SupervisionSet
|
from lhotse.supervision import SupervisionSet
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ def get_token2id(manifest_file: Path) -> Dict[str, int]:
|
|||||||
extra_tokens = [
|
extra_tokens = [
|
||||||
"<blk>", # 0 for blank
|
"<blk>", # 0 for blank
|
||||||
"<sos/eos>", # 1 for sos and eos symbols.
|
"<sos/eos>", # 1 for sos and eos symbols.
|
||||||
"<unk>" # 2 for OOV
|
"<unk>", # 2 for OOV
|
||||||
]
|
]
|
||||||
all_tokens = set()
|
all_tokens = set()
|
||||||
|
|
||||||
|
@ -34,9 +34,7 @@ def prepare_tokens_ljspeech():
|
|||||||
suffix = "jsonl.gz"
|
suffix = "jsonl.gz"
|
||||||
partition = "all"
|
partition = "all"
|
||||||
|
|
||||||
cut_set = load_manifest(
|
cut_set = load_manifest(output_dir / f"{prefix}_cuts_{partition}.{suffix}")
|
||||||
output_dir / f"{prefix}_cuts_{partition}.{suffix}"
|
|
||||||
)
|
|
||||||
g2p = g2p_en.G2p()
|
g2p = g2p_en.G2p()
|
||||||
|
|
||||||
new_cuts = []
|
new_cuts = []
|
||||||
@ -51,9 +49,7 @@ def prepare_tokens_ljspeech():
|
|||||||
new_cuts.append(cut)
|
new_cuts.append(cut)
|
||||||
|
|
||||||
new_cut_set = CutSet.from_cuts(new_cuts)
|
new_cut_set = CutSet.from_cuts(new_cuts)
|
||||||
new_cut_set.to_file(
|
new_cut_set.to_file(output_dir / f"{prefix}_cuts_with_tokens_{partition}.{suffix}")
|
||||||
output_dir / f"{prefix}_cuts_with_tokens_{partition}.{suffix}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -1 +1,3 @@
|
|||||||
See https://k2-fsa.github.io/icefall/recipes/TTS/ljspeech/vits.html for detailed tutorials.
|
See https://k2-fsa.github.io/icefall/recipes/TTS/ljspeech/vits.html for detailed tutorials.
|
||||||
|
|
||||||
|
Training logs, Tensorboard logs, and checkpoints are uploaded to https://huggingface.co/Zengwei/icefall-tts-ljspeech-vits-2023-11-29.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user