mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-13 20:12:24 +00:00
Progress in testing
This commit is contained in:
parent
03ff4aab2f
commit
e0b04ba54f
File diff suppressed because it is too large
Load Diff
62
egs/librispeech/ASR/conformer_lm/test_conformer.py
Normal file
62
egs/librispeech/ASR/conformer_lm/test_conformer.py
Normal file
@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env python3
|
||||
# run with:
|
||||
# python3 -m pytest test_conformer.py
|
||||
|
||||
import torch
|
||||
from conformer import (
|
||||
TransformerDecoderRelPos,
|
||||
MaskedLmConformer,
|
||||
RelPositionMultiheadAttention,
|
||||
RelPositionalEncoding,
|
||||
generate_square_subsequent_mask,
|
||||
)
|
||||
|
||||
from torch.nn.utils.rnn import pad_sequence
|
||||
|
||||
|
||||
def test_rel_position_multihead_attention():
|
||||
# Also tests RelPositionalEncoding
|
||||
embed_dim = 256
|
||||
num_heads = 4
|
||||
T = 25
|
||||
N = 4
|
||||
C = 256
|
||||
pos_emb_module = RelPositionalEncoding(C, dropout_rate=0.0)
|
||||
rel_pos_multihead_attn = RelPositionMultiheadAttention(embed_dim, num_heads)
|
||||
|
||||
x = torch.randn(N, T, C)
|
||||
#pos_emb = torch.randn(1, 2*T-1, C)
|
||||
x, pos_enc = pos_emb_module(x)
|
||||
print("pos_enc.shape=", pos_enc.shape)
|
||||
x = x.transpose(0, 1) # (T, N, C)
|
||||
attn_output, attn_output_weights = rel_pos_multihead_attn(x, x, x, pos_enc)
|
||||
|
||||
|
||||
def test_transformer():
|
||||
return
|
||||
num_features = 40
|
||||
num_classes = 87
|
||||
model = Transformer(num_features=num_features, num_classes=num_classes)
|
||||
|
||||
N = 31
|
||||
|
||||
for T in range(7, 30):
|
||||
x = torch.rand(N, T, num_features)
|
||||
y, _, _ = model(x)
|
||||
assert y.shape == (N, (((T - 1) // 2) - 1) // 2, num_classes)
|
||||
|
||||
|
||||
def test_generate_square_subsequent_mask():
|
||||
s = 5
|
||||
mask = generate_square_subsequent_mask(s, torch.device('cpu'))
|
||||
inf = float("inf")
|
||||
expected_mask = torch.tensor(
|
||||
[
|
||||
[0.0, -inf, -inf, -inf, -inf],
|
||||
[0.0, 0.0, -inf, -inf, -inf],
|
||||
[0.0, 0.0, 0.0, -inf, -inf],
|
||||
[0.0, 0.0, 0.0, 0.0, -inf],
|
||||
[0.0, 0.0, 0.0, 0.0, 0.0],
|
||||
]
|
||||
)
|
||||
assert torch.all(torch.eq(mask, expected_mask))
|
@ -1,89 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import torch
|
||||
from transformer import (
|
||||
Transformer,
|
||||
encoder_padding_mask,
|
||||
generate_square_subsequent_mask,
|
||||
decoder_padding_mask,
|
||||
add_sos,
|
||||
add_eos,
|
||||
)
|
||||
|
||||
from torch.nn.utils.rnn import pad_sequence
|
||||
|
||||
|
||||
def test_encoder_padding_mask():
|
||||
supervisions = {
|
||||
"sequence_idx": torch.tensor([0, 1, 2]),
|
||||
"start_frame": torch.tensor([0, 0, 0]),
|
||||
"num_frames": torch.tensor([18, 7, 13]),
|
||||
}
|
||||
|
||||
max_len = ((18 - 1) // 2 - 1) // 2
|
||||
mask = encoder_padding_mask(max_len, supervisions)
|
||||
expected_mask = torch.tensor(
|
||||
[
|
||||
[False, False, False], # ((18 - 1)//2 - 1)//2 = 3,
|
||||
[False, True, True], # ((7 - 1)//2 - 1)//2 = 1,
|
||||
[False, False, True], # ((13 - 1)//2 - 1)//2 = 2,
|
||||
]
|
||||
)
|
||||
assert torch.all(torch.eq(mask, expected_mask))
|
||||
|
||||
|
||||
def test_transformer():
|
||||
num_features = 40
|
||||
num_classes = 87
|
||||
model = Transformer(num_features=num_features, num_classes=num_classes)
|
||||
|
||||
N = 31
|
||||
|
||||
for T in range(7, 30):
|
||||
x = torch.rand(N, T, num_features)
|
||||
y, _, _ = model(x)
|
||||
assert y.shape == (N, (((T - 1) // 2) - 1) // 2, num_classes)
|
||||
|
||||
|
||||
def test_generate_square_subsequent_mask():
|
||||
s = 5
|
||||
mask = generate_square_subsequent_mask(s)
|
||||
inf = float("inf")
|
||||
expected_mask = torch.tensor(
|
||||
[
|
||||
[0.0, -inf, -inf, -inf, -inf],
|
||||
[0.0, 0.0, -inf, -inf, -inf],
|
||||
[0.0, 0.0, 0.0, -inf, -inf],
|
||||
[0.0, 0.0, 0.0, 0.0, -inf],
|
||||
[0.0, 0.0, 0.0, 0.0, 0.0],
|
||||
]
|
||||
)
|
||||
assert torch.all(torch.eq(mask, expected_mask))
|
||||
|
||||
|
||||
def test_decoder_padding_mask():
|
||||
x = [torch.tensor([1, 2]), torch.tensor([3]), torch.tensor([2, 5, 8])]
|
||||
y = pad_sequence(x, batch_first=True, padding_value=-1)
|
||||
mask = decoder_padding_mask(y, ignore_id=-1)
|
||||
expected_mask = torch.tensor(
|
||||
[
|
||||
[False, False, True],
|
||||
[False, True, True],
|
||||
[False, False, False],
|
||||
]
|
||||
)
|
||||
assert torch.all(torch.eq(mask, expected_mask))
|
||||
|
||||
|
||||
def test_add_sos():
|
||||
x = [[1, 2], [3], [2, 5, 8]]
|
||||
y = add_sos(x, sos_id=0)
|
||||
expected_y = [[0, 1, 2], [0, 3], [0, 2, 5, 8]]
|
||||
assert y == expected_y
|
||||
|
||||
|
||||
def test_add_eos():
|
||||
x = [[1, 2], [3], [2, 5, 8]]
|
||||
y = add_eos(x, eos_id=0)
|
||||
expected_y = [[1, 2, 0], [3, 0], [2, 5, 8, 0]]
|
||||
assert y == expected_y
|
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user