mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-12-11 06:55:27 +00:00
Fix comment; have 6, not 4, layers in most-downsampled stack.
This commit is contained in:
parent
5958f1ee11
commit
a88587dc8a
@ -123,7 +123,7 @@ def add_model_arguments(parser: argparse.ArgumentParser):
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--num-encoder-layers",
|
"--num-encoder-layers",
|
||||||
type=str,
|
type=str,
|
||||||
default="2,4,4,4,4,4",
|
default="2,4,4,6,4,4",
|
||||||
help="Number of zipformer encoder layers per stack, comma separated.",
|
help="Number of zipformer encoder layers per stack, comma separated.",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -1946,7 +1946,7 @@ class Conv2dSubsampling(nn.Module):
|
|||||||
assert in_channels >= 7
|
assert in_channels >= 7
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
# The ScalarMultiply modules are there to prevent the gradients
|
# The ScaleGrad module is there to prevent the gradients
|
||||||
# w.r.t. the weight or bias of the first Conv2d module in self.conv from
|
# w.r.t. the weight or bias of the first Conv2d module in self.conv from
|
||||||
# exceeding the range of fp16 when using automatic mixed precision (amp)
|
# exceeding the range of fp16 when using automatic mixed precision (amp)
|
||||||
# training. (The second one is necessary to stop its bias from getting
|
# training. (The second one is necessary to stop its bias from getting
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user