mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-09 01:52:41 +00:00
Fix librispeech CI test errors
This commit is contained in:
parent
fe36fcc25c
commit
ffe2f16b1d
@ -91,7 +91,7 @@ def get_matrix(min_torch_version, specified_torch_version, specified_python_vers
|
||||
matrix = []
|
||||
for p in python_version:
|
||||
for t in torch_version:
|
||||
if min_torch_version and version_gt(min_torch_version, t):
|
||||
if min_torch_version and version_ge(min_torch_version, t):
|
||||
continue
|
||||
|
||||
# torchaudio <= 1.13.x supports only python <= 3.10
|
||||
|
2
.github/workflows/librispeech.yml
vendored
2
.github/workflows/librispeech.yml
vendored
@ -30,7 +30,7 @@ jobs:
|
||||
run: |
|
||||
# outputting for debugging purposes
|
||||
python ./.github/scripts/docker/generate_build_matrix.py --python-version "3.10"
|
||||
MATRIX=$(python ./.github/scripts/docker/generate_build_matrix.py --python-version "3.10")
|
||||
MATRIX=$(python ./.github/scripts/docker/generate_build_matrix.py --python-version "3.10" --min-torch-version "2.6.0")
|
||||
echo "::set-output name=matrix::${MATRIX}"
|
||||
librispeech:
|
||||
needs: generate_build_matrix
|
||||
|
3
.github/workflows/test.yml
vendored
3
.github/workflows/test.yml
vendored
@ -30,7 +30,8 @@ jobs:
|
||||
id: set-matrix
|
||||
run: |
|
||||
# outputting for debugging purposes
|
||||
MATRIX=$(python ./.github/scripts/docker/generate_build_matrix.py --min-torch-version "2.3")
|
||||
python ./.github/scripts/docker/generate_build_matrix.py --python-version "3.10"
|
||||
MATRIX=$(python ./.github/scripts/docker/generate_build_matrix.py --python-version "3.10")
|
||||
echo "::set-output name=matrix::${MATRIX}"
|
||||
test:
|
||||
needs: generate_build_matrix
|
||||
|
3
.github/workflows/yesno.yml
vendored
3
.github/workflows/yesno.yml
vendored
@ -31,7 +31,8 @@ jobs:
|
||||
run: |
|
||||
# outputting for debugging purposes
|
||||
python ./.github/scripts/docker/generate_build_matrix.py --python-version "3.10"
|
||||
MATRIX=$(python ./.github/scripts/docker/generate_build_matrix.py --python-version "3.10")
|
||||
# MATRIX=$(python ./.github/scripts/docker/generate_build_matrix.py --python-version "3.10")
|
||||
MATRIX=$(python ./.github/scripts/docker/generate_build_matrix.py --python-version "3.10" --min-torch-version "2.5.0")
|
||||
echo "::set-output name=matrix::${MATRIX}"
|
||||
yesno:
|
||||
needs: generate_build_matrix
|
||||
|
@ -633,7 +633,9 @@ def main():
|
||||
H = None
|
||||
bpe_model = None
|
||||
HLG = k2.Fsa.from_dict(
|
||||
torch.load(f"{params.lang_dir}/HLG.pt", map_location=device)
|
||||
torch.load(
|
||||
f"{params.lang_dir}/HLG.pt", map_location=device, weights_only=False
|
||||
)
|
||||
)
|
||||
assert HLG.requires_grad is False
|
||||
|
||||
@ -672,7 +674,9 @@ def main():
|
||||
torch.save(G.as_dict(), params.lm_dir / "G_4_gram.pt")
|
||||
else:
|
||||
logging.info("Loading pre-compiled G_4_gram.pt")
|
||||
d = torch.load(params.lm_dir / "G_4_gram.pt", map_location=device)
|
||||
d = torch.load(
|
||||
params.lm_dir / "G_4_gram.pt", map_location=device, weights_only=False
|
||||
)
|
||||
G = k2.Fsa.from_dict(d)
|
||||
|
||||
if params.decoding_method == "whole-lattice-rescoring":
|
||||
|
@ -786,7 +786,7 @@ def main():
|
||||
lg_filename = params.lang_dir / "LG.pt"
|
||||
logging.info(f"Loading {lg_filename}")
|
||||
decoding_graph = k2.Fsa.from_dict(
|
||||
torch.load(lg_filename, map_location=device)
|
||||
torch.load(lg_filename, map_location=device, weights_only=False)
|
||||
)
|
||||
decoding_graph.scores *= params.ngram_lm_scale
|
||||
else:
|
||||
|
@ -347,7 +347,9 @@ def main():
|
||||
"whole-lattice-rescoring",
|
||||
]:
|
||||
logging.info(f"Loading HLG from {params.HLG}")
|
||||
HLG = k2.Fsa.from_dict(torch.load(params.HLG, map_location="cpu"))
|
||||
HLG = k2.Fsa.from_dict(
|
||||
torch.load(params.HLG, map_location="cpu", weights_only=False)
|
||||
)
|
||||
HLG = HLG.to(device)
|
||||
if not hasattr(HLG, "lm_scores"):
|
||||
# For whole-lattice-rescoring and attention-decoder
|
||||
@ -358,7 +360,9 @@ def main():
|
||||
"whole-lattice-rescoring",
|
||||
]:
|
||||
logging.info(f"Loading G from {params.G}")
|
||||
G = k2.Fsa.from_dict(torch.load(params.G, map_location="cpu"))
|
||||
G = k2.Fsa.from_dict(
|
||||
torch.load(params.G, map_location="cpu", weights_only=False)
|
||||
)
|
||||
G = G.to(device)
|
||||
if params.method == "whole-lattice-rescoring":
|
||||
# Add epsilon self-loops to G as we will compose
|
||||
|
@ -247,7 +247,7 @@ def main():
|
||||
num_param = sum([p.numel() for p in model.parameters()])
|
||||
logging.info(f"Number of model parameters: {num_param}")
|
||||
|
||||
checkpoint = torch.load(args.checkpoint, map_location="cpu")
|
||||
checkpoint = torch.load(args.checkpoint, map_location="cpu", weights_only=False)
|
||||
model.load_state_dict(checkpoint["model"], strict=False)
|
||||
model.to(device)
|
||||
model.eval()
|
||||
|
@ -286,7 +286,7 @@ def main():
|
||||
num_param = sum([p.numel() for p in model.parameters()])
|
||||
logging.info(f"Number of model parameters: {num_param}")
|
||||
|
||||
checkpoint = torch.load(args.checkpoint, map_location="cpu")
|
||||
checkpoint = torch.load(args.checkpoint, map_location="cpu", weights_only=False)
|
||||
model.load_state_dict(checkpoint["model"], strict=False)
|
||||
model.to(device)
|
||||
model.eval()
|
||||
|
@ -936,7 +936,7 @@ def main():
|
||||
lg_filename = params.lang_dir / "LG.pt"
|
||||
logging.info(f"Loading {lg_filename}")
|
||||
decoding_graph = k2.Fsa.from_dict(
|
||||
torch.load(lg_filename, map_location=device)
|
||||
torch.load(lg_filename, map_location=device, weights_only=False)
|
||||
)
|
||||
decoding_graph.scores *= params.ngram_lm_scale
|
||||
else:
|
||||
|
@ -247,7 +247,7 @@ def main():
|
||||
num_param = sum([p.numel() for p in model.parameters()])
|
||||
logging.info(f"Number of model parameters: {num_param}")
|
||||
|
||||
checkpoint = torch.load(args.checkpoint, map_location="cpu")
|
||||
checkpoint = torch.load(args.checkpoint, map_location="cpu", weights_only=False)
|
||||
model.load_state_dict(checkpoint["model"], strict=False)
|
||||
model.to(device)
|
||||
model.eval()
|
||||
|
@ -947,7 +947,9 @@ def main():
|
||||
H = None
|
||||
bpe_model = None
|
||||
HLG = k2.Fsa.from_dict(
|
||||
torch.load(f"{params.lang_dir}/HLG.pt", map_location=device)
|
||||
torch.load(
|
||||
f"{params.lang_dir}/HLG.pt", map_location=device, weights_only=False
|
||||
)
|
||||
)
|
||||
assert HLG.requires_grad is False
|
||||
|
||||
@ -987,7 +989,9 @@ def main():
|
||||
torch.save(G.as_dict(), params.lm_dir / "G_4_gram.pt")
|
||||
else:
|
||||
logging.info("Loading pre-compiled G_4_gram.pt")
|
||||
d = torch.load(params.lm_dir / "G_4_gram.pt", map_location=device)
|
||||
d = torch.load(
|
||||
params.lm_dir / "G_4_gram.pt", map_location=device, weights_only=False
|
||||
)
|
||||
G = k2.Fsa.from_dict(d)
|
||||
|
||||
if params.decoding_method in [
|
||||
|
@ -1013,7 +1013,7 @@ def main():
|
||||
lg_filename = params.lang_dir / "LG.pt"
|
||||
logging.info(f"Loading {lg_filename}")
|
||||
decoding_graph = k2.Fsa.from_dict(
|
||||
torch.load(lg_filename, map_location=device)
|
||||
torch.load(lg_filename, map_location=device, weights_only=False)
|
||||
)
|
||||
decoding_graph.scores *= params.ngram_lm_scale
|
||||
else:
|
||||
|
@ -1049,7 +1049,7 @@ def main():
|
||||
lg_filename = params.lang_dir / "LG.pt"
|
||||
logging.info(f"Loading {lg_filename}")
|
||||
decoding_graph = k2.Fsa.from_dict(
|
||||
torch.load(lg_filename, map_location=device)
|
||||
torch.load(lg_filename, map_location=device, weights_only=False)
|
||||
)
|
||||
decoding_graph.scores *= params.ngram_lm_scale
|
||||
else:
|
||||
|
@ -765,7 +765,7 @@ def load_model_params(
|
||||
|
||||
"""
|
||||
logging.info(f"Loading checkpoint from {ckpt}")
|
||||
checkpoint = torch.load(ckpt, map_location="cpu")
|
||||
checkpoint = torch.load(ckpt, map_location="cpu", weights_only=False)
|
||||
|
||||
# if module list is empty, load the whole model from ckpt
|
||||
if not init_modules:
|
||||
|
@ -346,7 +346,9 @@ def main():
|
||||
"whole-lattice-rescoring",
|
||||
]:
|
||||
logging.info(f"Loading HLG from {params.HLG}")
|
||||
HLG = k2.Fsa.from_dict(torch.load(params.HLG, map_location="cpu"))
|
||||
HLG = k2.Fsa.from_dict(
|
||||
torch.load(params.HLG, map_location="cpu", weights_only=False)
|
||||
)
|
||||
HLG = HLG.to(device)
|
||||
if not hasattr(HLG, "lm_scores"):
|
||||
# For whole-lattice-rescoring and attention-decoder
|
||||
@ -357,7 +359,9 @@ def main():
|
||||
"whole-lattice-rescoring",
|
||||
]:
|
||||
logging.info(f"Loading G from {params.G}")
|
||||
G = k2.Fsa.from_dict(torch.load(params.G, map_location="cpu"))
|
||||
G = k2.Fsa.from_dict(
|
||||
torch.load(params.G, map_location="cpu", weights_only=False)
|
||||
)
|
||||
G = G.to(device)
|
||||
if params.method == "whole-lattice-rescoring":
|
||||
# Add epsilon self-loops to G as we will compose
|
||||
|
@ -289,7 +289,7 @@ def main():
|
||||
num_param = sum([p.numel() for p in model.parameters()])
|
||||
logging.info(f"Number of model parameters: {num_param}")
|
||||
|
||||
checkpoint = torch.load(args.checkpoint, map_location="cpu")
|
||||
checkpoint = torch.load(args.checkpoint, map_location="cpu", weights_only=False)
|
||||
model.load_state_dict(checkpoint["model"], strict=False)
|
||||
model.to(device)
|
||||
model.eval()
|
||||
|
@ -305,7 +305,7 @@ def main():
|
||||
num_param = sum([p.numel() for p in model.parameters()])
|
||||
logging.info(f"Number of model parameters: {num_param}")
|
||||
|
||||
checkpoint = torch.load(args.checkpoint, map_location="cpu")
|
||||
checkpoint = torch.load(args.checkpoint, map_location="cpu", weights_only=False)
|
||||
model.load_state_dict(checkpoint["model"], strict=False)
|
||||
model.to(device)
|
||||
model.eval()
|
||||
@ -389,7 +389,9 @@ def main():
|
||||
"whole-lattice-rescoring",
|
||||
]:
|
||||
logging.info(f"Loading HLG from {params.HLG}")
|
||||
HLG = k2.Fsa.from_dict(torch.load(params.HLG, map_location="cpu"))
|
||||
HLG = k2.Fsa.from_dict(
|
||||
torch.load(params.HLG, map_location="cpu", weights_only=False)
|
||||
)
|
||||
HLG = HLG.to(device)
|
||||
if not hasattr(HLG, "lm_scores"):
|
||||
# For whole-lattice-rescoring and attention-decoder
|
||||
@ -400,7 +402,9 @@ def main():
|
||||
"whole-lattice-rescoring",
|
||||
]:
|
||||
logging.info(f"Loading G from {params.G}")
|
||||
G = k2.Fsa.from_dict(torch.load(params.G, map_location="cpu"))
|
||||
G = k2.Fsa.from_dict(
|
||||
torch.load(params.G, map_location="cpu", weights_only=False)
|
||||
)
|
||||
G = G.to(device)
|
||||
if params.method == "whole-lattice-rescoring":
|
||||
# Add epsilon self-loops to G as we will compose
|
||||
|
@ -679,7 +679,9 @@ def main():
|
||||
H = None
|
||||
bpe_model = None
|
||||
HLG = k2.Fsa.from_dict(
|
||||
torch.load(f"{params.lang_dir}/HLG.pt", map_location=device)
|
||||
torch.load(
|
||||
f"{params.lang_dir}/HLG.pt", map_location=device, weights_only=False
|
||||
)
|
||||
)
|
||||
assert HLG.requires_grad is False
|
||||
|
||||
@ -719,7 +721,9 @@ def main():
|
||||
torch.save(G.as_dict(), params.lm_dir / "G_4_gram.pt")
|
||||
else:
|
||||
logging.info("Loading pre-compiled G_4_gram.pt")
|
||||
d = torch.load(params.lm_dir / "G_4_gram.pt", map_location=device)
|
||||
d = torch.load(
|
||||
params.lm_dir / "G_4_gram.pt", map_location=device, weights_only=False
|
||||
)
|
||||
G = k2.Fsa.from_dict(d)
|
||||
|
||||
if params.method in [
|
||||
|
@ -47,7 +47,7 @@ def compile_HLG(lang_dir: str) -> k2.Fsa:
|
||||
max_token_id = max(lexicon.tokens)
|
||||
logging.info(f"Building ctc_topo. max_token_id: {max_token_id}")
|
||||
H = k2.ctc_topo(max_token_id)
|
||||
L = k2.Fsa.from_dict(torch.load(f"{lang_dir}/L_disambig.pt"))
|
||||
L = k2.Fsa.from_dict(torch.load(f"{lang_dir}/L_disambig.pt", weights_only=False))
|
||||
|
||||
logging.info("Loading G.fst.txt")
|
||||
with open("data/lm/G.fst.txt") as f:
|
||||
|
@ -271,7 +271,9 @@ def main():
|
||||
|
||||
logging.info(f"device: {device}")
|
||||
|
||||
HLG = k2.Fsa.from_dict(torch.load(f"{params.lang_dir}/HLG.pt", map_location="cpu"))
|
||||
HLG = k2.Fsa.from_dict(
|
||||
torch.load(f"{params.lang_dir}/HLG.pt", map_location="cpu", weights_only=False)
|
||||
)
|
||||
HLG = HLG.to(device)
|
||||
assert HLG.requires_grad is False
|
||||
|
||||
|
@ -131,7 +131,9 @@ def main():
|
||||
model.to(device)
|
||||
|
||||
logging.info(f"Loading HLG from {params.HLG}")
|
||||
HLG = k2.Fsa.from_dict(torch.load(params.HLG, map_location="cpu"))
|
||||
HLG = k2.Fsa.from_dict(
|
||||
torch.load(params.HLG, map_location="cpu", weights_only=False)
|
||||
)
|
||||
HLG = HLG.to(device)
|
||||
|
||||
logging.info("Constructing Fbank computer")
|
||||
|
@ -176,7 +176,9 @@ def main():
|
||||
model = OnnxModel(params.nn_model)
|
||||
|
||||
logging.info(f"Loading HLG from {args.HLG}")
|
||||
HLG = k2.Fsa.from_dict(torch.load(params.HLG, map_location="cpu"))
|
||||
HLG = k2.Fsa.from_dict(
|
||||
torch.load(params.HLG, map_location="cpu", weights_only=False)
|
||||
)
|
||||
HLG = HLG.to(device)
|
||||
|
||||
logging.info("Constructing Fbank computer")
|
||||
|
@ -148,13 +148,15 @@ def main():
|
||||
num_classes=params.num_classes,
|
||||
)
|
||||
|
||||
checkpoint = torch.load(args.checkpoint, map_location="cpu")
|
||||
checkpoint = torch.load(args.checkpoint, map_location="cpu", weights_only=False)
|
||||
model.load_state_dict(checkpoint["model"])
|
||||
model.to(device)
|
||||
model.eval()
|
||||
|
||||
logging.info(f"Loading HLG from {params.HLG}")
|
||||
HLG = k2.Fsa.from_dict(torch.load(params.HLG, map_location="cpu"))
|
||||
HLG = k2.Fsa.from_dict(
|
||||
torch.load(params.HLG, map_location="cpu", weights_only=False)
|
||||
)
|
||||
HLG = HLG.to(device)
|
||||
|
||||
logging.info("Constructing Fbank computer")
|
||||
|
Loading…
x
Reference in New Issue
Block a user