mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-13 12:02:21 +00:00
Fix code style
This commit is contained in:
parent
d58cd7b39d
commit
dad5680920
@ -452,7 +452,6 @@ class RelPositionMultiheadAttention(nn.Module):
|
|||||||
|
|
||||||
self._reset_parameters()
|
self._reset_parameters()
|
||||||
|
|
||||||
|
|
||||||
def _reset_parameters(self) -> None:
|
def _reset_parameters(self) -> None:
|
||||||
nn.init.xavier_uniform_(self.in_proj.weight)
|
nn.init.xavier_uniform_(self.in_proj.weight)
|
||||||
nn.init.constant_(self.in_proj.bias, 0.0)
|
nn.init.constant_(self.in_proj.bias, 0.0)
|
||||||
@ -682,7 +681,6 @@ class RelPositionMultiheadAttention(nn.Module):
|
|||||||
_b = _b[_start:]
|
_b = _b[_start:]
|
||||||
v = nn.functional.linear(value, _w, _b)
|
v = nn.functional.linear(value, _w, _b)
|
||||||
|
|
||||||
|
|
||||||
if attn_mask is not None:
|
if attn_mask is not None:
|
||||||
assert (
|
assert (
|
||||||
attn_mask.dtype == torch.float32
|
attn_mask.dtype == torch.float32
|
||||||
|
@ -139,7 +139,8 @@ def get_params() -> AttributeDict:
|
|||||||
|
|
||||||
- subsampling_factor: The subsampling factor for the model.
|
- subsampling_factor: The subsampling factor for the model.
|
||||||
|
|
||||||
- use_feat_batchnorm: Whether to do batch normalization for the input features.
|
- use_feat_batchnorm: Whether to do batch normalization for the
|
||||||
|
input features.
|
||||||
|
|
||||||
- attention_dim: Hidden dim for multi-head attention model.
|
- attention_dim: Hidden dim for multi-head attention model.
|
||||||
|
|
||||||
|
@ -108,7 +108,6 @@ def get_parser():
|
|||||||
""",
|
""",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--export",
|
"--export",
|
||||||
type=str2bool,
|
type=str2bool,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user