pre-commit hooks

This commit is contained in:
Kinan Martin 2025-04-16 08:05:05 +09:00
parent e69e1c04b2
commit bb6d672b54
7 changed files with 47 additions and 45 deletions

View File

@ -36,6 +36,7 @@ from torch.utils.data import DataLoader
from icefall.utils import str2bool from icefall.utils import str2bool
class MLSEnglishHFAsrDataModule: class MLSEnglishHFAsrDataModule:
""" """
DataModule for MLS English ASR experiments using HuggingFace dataset. DataModule for MLS English ASR experiments using HuggingFace dataset.
@ -46,6 +47,7 @@ class MLSEnglishHFAsrDataModule:
def __init__(self, args: argparse.Namespace): def __init__(self, args: argparse.Namespace):
self.args = args self.args = args
self.dataset = None self.dataset = None
# self._validate_args() # self._validate_args()
# def _validate_args(self) -> None: # def _validate_args(self) -> None:
@ -123,7 +125,6 @@ class MLSEnglishHFAsrDataModule:
help="Whether to drop last incomplete batch", help="Whether to drop last incomplete batch",
) )
return parser return parser
def load_dataset(self, dataset_path: Optional[str] = None) -> None: def load_dataset(self, dataset_path: Optional[str] = None) -> None:
@ -133,16 +134,17 @@ class MLSEnglishHFAsrDataModule:
try: try:
from datasets import load_dataset from datasets import load_dataset
self.dataset = load_dataset(dataset_path) self.dataset = load_dataset(dataset_path)
logging.info("Dataset loaded successfully") logging.info("Dataset loaded successfully")
except ImportError: except ImportError:
raise ImportError( raise ImportError("Please install datasets package: pip install datasets")
"Please install datasets package: pip install datasets"
)
except Exception as e: except Exception as e:
raise RuntimeError(f"Failed to load dataset: {e}") raise RuntimeError(f"Failed to load dataset: {e}")
def _create_dataset(self, cuts: CutSet, is_train: bool = False) -> K2SpeechRecognitionDataset: def _create_dataset(
self, cuts: CutSet, is_train: bool = False
) -> K2SpeechRecognitionDataset:
"""Create appropriate dataset with transforms.""" """Create appropriate dataset with transforms."""
transforms = [] transforms = []
input_transforms = [] input_transforms = []
@ -160,9 +162,9 @@ class MLSEnglishHFAsrDataModule:
def _create_spec_augment(self) -> SpecAugment: def _create_spec_augment(self) -> SpecAugment:
"""Create SpecAugment transform based on config.""" """Create SpecAugment transform based on config."""
num_frame_masks = 10 num_frame_masks = 10
num_frame_masks_parameter = inspect.signature( num_frame_masks_parameter = inspect.signature(SpecAugment.__init__).parameters[
SpecAugment.__init__ "num_frame_masks"
).parameters["num_frame_masks"] ]
if num_frame_masks_parameter.default == 1: if num_frame_masks_parameter.default == 1:
num_frame_masks = 2 num_frame_masks = 2
@ -174,7 +176,9 @@ class MLSEnglishHFAsrDataModule:
frames_mask_size=100, frames_mask_size=100,
) )
def _create_sampler(self, cuts: CutSet, shuffle: bool) -> Union[DynamicBucketingSampler, SimpleCutSampler]: def _create_sampler(
self, cuts: CutSet, shuffle: bool
) -> Union[DynamicBucketingSampler, SimpleCutSampler]:
"""Create appropriate sampler based on config.""" """Create appropriate sampler based on config."""
if self.args.bucketing_sampler: if self.args.bucketing_sampler:
return DynamicBucketingSampler( return DynamicBucketingSampler(
@ -190,7 +194,9 @@ class MLSEnglishHFAsrDataModule:
shuffle=shuffle, shuffle=shuffle,
) )
def train_dataloader(self, sampler_state_dict: Optional[Dict[str, Any]] = None) -> DataLoader: def train_dataloader(
self, sampler_state_dict: Optional[Dict[str, Any]] = None
) -> DataLoader:
"""Create train dataloader.""" """Create train dataloader."""
cuts = self.train_cuts() cuts = self.train_cuts()
dataset = self._create_dataset(cuts, is_train=True) dataset = self._create_dataset(cuts, is_train=True)
@ -231,20 +237,17 @@ class MLSEnglishHFAsrDataModule:
@lru_cache() @lru_cache()
def train_cuts(self) -> CutSet: def train_cuts(self) -> CutSet:
return CutSet.from_huggingface_dataset( return CutSet.from_huggingface_dataset(
self.dataset["train"], self.dataset["train"], text_key="transcript"
text_key="transcript"
) )
@lru_cache() @lru_cache()
def valid_cuts(self) -> CutSet: def valid_cuts(self) -> CutSet:
return CutSet.from_huggingface_dataset( return CutSet.from_huggingface_dataset(
self.dataset["dev"], self.dataset["dev"], text_key="transcript"
text_key="transcript"
) )
@lru_cache() @lru_cache()
def test_cuts(self) -> CutSet: def test_cuts(self) -> CutSet:
return CutSet.from_huggingface_dataset( return CutSet.from_huggingface_dataset(
self.dataset["test"], self.dataset["test"], text_key="transcript"
text_key="transcript"
) )

View File

@ -49,7 +49,7 @@ concat_params = {"gap": 1.0, "maxlen": 10.0}
def make_cutset_blueprints( def make_cutset_blueprints(
mls_eng_hf_dataset_path: str = "parler-tts/mls_eng" mls_eng_hf_dataset_path: str = "parler-tts/mls_eng",
) -> List[Tuple[str, CutSet]]: ) -> List[Tuple[str, CutSet]]:
cut_sets = [] cut_sets = []
@ -67,17 +67,14 @@ def make_cutset_blueprints(
cut_sets.append( cut_sets.append(
( (
"test", "test",
CutSet.from_huggingface_dataset(dataset["test"], text_key="transcript") CutSet.from_huggingface_dataset(dataset["test"], text_key="transcript"),
) )
) )
# Create dev dataset # Create dev dataset
logging.info("Creating dev cuts.") logging.info("Creating dev cuts.")
cut_sets.append( cut_sets.append(
( ("dev", CutSet.from_huggingface_dataset(dataset["dev"], text_key="transcript"))
"dev",
CutSet.from_huggingface_dataset(dataset["dev"], text_key="transcript")
)
) )
# Create train dataset # Create train dataset
@ -85,7 +82,7 @@ def make_cutset_blueprints(
cut_sets.append( cut_sets.append(
( (
"train", "train",
CutSet.from_huggingface_dataset(dataset["train"], text_key="transcript") CutSet.from_huggingface_dataset(dataset["train"], text_key="transcript"),
) )
) )
return cut_sets return cut_sets

View File

@ -24,6 +24,7 @@ from typing import Optional
from lhotse import CutSet from lhotse import CutSet
from tqdm import tqdm from tqdm import tqdm
def get_args(): def get_args():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Generate transcripts for BPE training from MLS English dataset", description="Generate transcripts for BPE training from MLS English dataset",
@ -53,6 +54,7 @@ def get_args():
return parser.parse_args() return parser.parse_args()
def generate_transcript_from_cuts(cuts: CutSet, output_file: Path) -> None: def generate_transcript_from_cuts(cuts: CutSet, output_file: Path) -> None:
"""Generate transcript text file from Lhotse CutSet.""" """Generate transcript text file from Lhotse CutSet."""
with open(output_file, "w") as f: with open(output_file, "w") as f:
@ -60,6 +62,7 @@ def generate_transcript_from_cuts(cuts: CutSet, output_file: Path) -> None:
for sup in cut.supervisions: for sup in cut.supervisions:
f.write(f"{sup.text}\n") f.write(f"{sup.text}\n")
def main(): def main():
args = get_args() args = get_args()
logging.basicConfig( logging.basicConfig(
@ -73,9 +76,7 @@ def main():
logging.info(f"Loading {args.split} split from dataset: {args.dataset_path}") logging.info(f"Loading {args.split} split from dataset: {args.dataset_path}")
try: try:
cuts = CutSet.from_huggingface_dataset( cuts = CutSet.from_huggingface_dataset(
args.dataset_path, args.dataset_path, split=args.split, text_key="transcript"
split=args.split,
text_key="transcript"
) )
except Exception as e: except Exception as e:
logging.error(f"Failed to load dataset: {e}") logging.error(f"Failed to load dataset: {e}")
@ -85,5 +86,6 @@ def main():
generate_transcript_from_cuts(cuts, output_file) generate_transcript_from_cuts(cuts, output_file)
logging.info("Transcript generation completed") logging.info("Transcript generation completed")
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -103,9 +103,6 @@ from pathlib import Path
from typing import Dict, List, Optional, Tuple from typing import Dict, List, Optional, Tuple
import k2 import k2
# import sentencepiece as spm
from tokenizer import Tokenizer
import torch import torch
import torch.nn as nn import torch.nn as nn
from asr_datamodule import MLSEnglishHFAsrDataModule from asr_datamodule import MLSEnglishHFAsrDataModule
@ -123,6 +120,10 @@ from beam_search import (
modified_beam_search_lm_shallow_fusion, modified_beam_search_lm_shallow_fusion,
modified_beam_search_LODR, modified_beam_search_LODR,
) )
# import sentencepiece as spm
from tokenizer import Tokenizer
# from gigaspeech_scoring import asr_text_post_processing # from gigaspeech_scoring import asr_text_post_processing
from train import add_model_arguments, get_model, get_params from train import add_model_arguments, get_model, get_params
@ -384,6 +385,7 @@ def get_parser():
return parser return parser
def asr_text_post_processing(inp): def asr_text_post_processing(inp):
return inp return inp
@ -869,7 +871,6 @@ def main():
sp = Tokenizer.load(Path(args.lang_dir), "bpe") # force bpe model sp = Tokenizer.load(Path(args.lang_dir), "bpe") # force bpe model
# <blk> and <unk> are defined in local/train_bpe_model.py # <blk> and <unk> are defined in local/train_bpe_model.py
params.blank_id = sp.piece_to_id("<blk>") params.blank_id = sp.piece_to_id("<blk>")
params.unk_id = sp.piece_to_id("<unk>") params.unk_id = sp.piece_to_id("<unk>")

View File

@ -1239,7 +1239,6 @@ def run(rank, world_size, args):
# valid_dl = mls_english_corpus.valid_dataloader(valid_cuts) # valid_dl = mls_english_corpus.valid_dataloader(valid_cuts)
valid_dl = mls_english_corpus.valid_dataloader() valid_dl = mls_english_corpus.valid_dataloader()
if not params.print_diagnostics: if not params.print_diagnostics:
scan_pessimistic_batches_for_oom( scan_pessimistic_batches_for_oom(
model=model, model=model,