mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-10 02:22:17 +00:00
fixed import quantization is none (#541)
Signed-off-by: shanguanma <nanr9544@gmail.com> Signed-off-by: shanguanma <nanr9544@gmail.com> Co-authored-by: shanguanma <nanr9544@gmail.com>
This commit is contained in:
parent
c0101185d7
commit
dbd61a9db3
@ -81,9 +81,9 @@ if [ $stage -le 0 ] && [ $stop_stage -ge 0 ] && [ ! "$use_extracted_codebook" ==
|
|||||||
# or
|
# or
|
||||||
# pip install multi_quantization
|
# pip install multi_quantization
|
||||||
|
|
||||||
has_quantization=$(python3 -c "import importlib; print(importlib.util.find_spec('quantization') is not None)")
|
has_quantization=$(python3 -c "import importlib; print(importlib.util.find_spec('multi_quantization') is not None)")
|
||||||
if [ $has_quantization == 'False' ]; then
|
if [ $has_quantization == 'False' ]; then
|
||||||
log "Please install quantization before running following stages"
|
log "Please install multi_quantization before running following stages"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ from typing import List, Tuple
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
import torch.multiprocessing as mp
|
import torch.multiprocessing as mp
|
||||||
import quantization
|
import multi_quantization as quantization
|
||||||
|
|
||||||
from asr_datamodule import LibriSpeechAsrDataModule
|
from asr_datamodule import LibriSpeechAsrDataModule
|
||||||
from hubert_xlarge import HubertXlargeFineTuned
|
from hubert_xlarge import HubertXlargeFineTuned
|
||||||
|
Loading…
x
Reference in New Issue
Block a user