mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-09-19 05:54:20 +00:00
fixed a formatting issue
This commit is contained in:
parent
20d4031e9b
commit
a4e3186e4f
@ -269,7 +269,7 @@ def main():
|
|||||||
# Load tokens.txt here
|
# Load tokens.txt here
|
||||||
token_table = k2.SymbolTable.from_file(params.tokens)
|
token_table = k2.SymbolTable.from_file(params.tokens)
|
||||||
|
|
||||||
# Load id of the <blk> token and the vocab size, <blk> is
|
# Load id of the <blk> token and the vocab size, <blk> is
|
||||||
# defined in local/train_bpe_model.py
|
# defined in local/train_bpe_model.py
|
||||||
params.blank_id = token_table["<blk>"]
|
params.blank_id = token_table["<blk>"]
|
||||||
params.vocab_size = num_tokens(token_table) + 1 # +1 for <blk>
|
params.vocab_size = num_tokens(token_table) + 1 # +1 for <blk>
|
||||||
|
@ -224,7 +224,7 @@ def main():
|
|||||||
# Load tokens.txt here
|
# Load tokens.txt here
|
||||||
token_table = k2.SymbolTable.from_file(params.tokens)
|
token_table = k2.SymbolTable.from_file(params.tokens)
|
||||||
|
|
||||||
# Load id of the <blk> token and the vocab size, <blk> is
|
# Load id of the <blk> token and the vocab size, <blk> is
|
||||||
# defined in local/train_bpe_model.py
|
# defined in local/train_bpe_model.py
|
||||||
params.blank_id = token_table["<blk>"]
|
params.blank_id = token_table["<blk>"]
|
||||||
params.vocab_size = num_tokens(token_table) + 1 # +1 for <blk>
|
params.vocab_size = num_tokens(token_table) + 1 # +1 for <blk>
|
||||||
|
@ -440,7 +440,7 @@ def main():
|
|||||||
# Load tokens.txt here
|
# Load tokens.txt here
|
||||||
token_table = k2.SymbolTable.from_file(params.tokens)
|
token_table = k2.SymbolTable.from_file(params.tokens)
|
||||||
|
|
||||||
# Load id of the <blk> token and the vocab size, <blk> is
|
# Load id of the <blk> token and the vocab size, <blk> is
|
||||||
# defined in local/train_bpe_model.py
|
# defined in local/train_bpe_model.py
|
||||||
params.blank_id = token_table["<blk>"]
|
params.blank_id = token_table["<blk>"]
|
||||||
params.vocab_size = num_tokens(token_table) + 1 # +1 for <blk>
|
params.vocab_size = num_tokens(token_table) + 1 # +1 for <blk>
|
||||||
|
@ -270,7 +270,7 @@ def main():
|
|||||||
# Load tokens.txt here
|
# Load tokens.txt here
|
||||||
token_table = k2.SymbolTable.from_file(params.tokens)
|
token_table = k2.SymbolTable.from_file(params.tokens)
|
||||||
|
|
||||||
# Load id of the <blk> token and the vocab size, <blk> is
|
# Load id of the <blk> token and the vocab size, <blk> is
|
||||||
# defined in local/train_bpe_model.py
|
# defined in local/train_bpe_model.py
|
||||||
params.blank_id = token_table["<blk>"]
|
params.blank_id = token_table["<blk>"]
|
||||||
params.vocab_size = num_tokens(token_table) + 1 # +1 for <blk>
|
params.vocab_size = num_tokens(token_table) + 1 # +1 for <blk>
|
||||||
|
@ -269,7 +269,7 @@ def main():
|
|||||||
# Load tokens.txt here
|
# Load tokens.txt here
|
||||||
token_table = k2.SymbolTable.from_file(params.tokens)
|
token_table = k2.SymbolTable.from_file(params.tokens)
|
||||||
|
|
||||||
# Load id of the <blk> token and the vocab size, <blk> is
|
# Load id of the <blk> token and the vocab size, <blk> is
|
||||||
# defined in local/train_bpe_model.py
|
# defined in local/train_bpe_model.py
|
||||||
params.blank_id = token_table["<blk>"]
|
params.blank_id = token_table["<blk>"]
|
||||||
params.vocab_size = num_tokens(token_table) + 1 # +1 for <blk>
|
params.vocab_size = num_tokens(token_table) + 1 # +1 for <blk>
|
||||||
|
Loading…
x
Reference in New Issue
Block a user