mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-08-13 20:12:24 +00:00
Fix a bug in beam_search
This commit is contained in:
parent
59daf83b59
commit
f43ea5db30
@ -665,7 +665,7 @@ def greedy_search_batch(
|
|||||||
assert torch.all(encoder_out_lens > 0), encoder_out_lens
|
assert torch.all(encoder_out_lens > 0), encoder_out_lens
|
||||||
assert N == batch_size_list[0], (N, batch_size_list)
|
assert N == batch_size_list[0], (N, batch_size_list)
|
||||||
|
|
||||||
hyps = [[-1] * (context_size - 1) + [blank_id] for _ in range(N)]
|
hyps = [[-1] * context_size + [blank_id] for _ in range(N)]
|
||||||
|
|
||||||
# timestamp[n][i] is the frame index after subsampling
|
# timestamp[n][i] is the frame index after subsampling
|
||||||
# on which hyp[n][i] is decoded
|
# on which hyp[n][i] is decoded
|
||||||
@ -675,7 +675,7 @@ def greedy_search_batch(
|
|||||||
hyps,
|
hyps,
|
||||||
device=device,
|
device=device,
|
||||||
dtype=torch.int64,
|
dtype=torch.int64,
|
||||||
) # (N, context_size)
|
)[:, -context_size : ] # (N, context_size)
|
||||||
|
|
||||||
k = torch.zeros(N, 1, device=device, dtype=torch.int64)
|
k = torch.zeros(N, 1, device=device, dtype=torch.int64)
|
||||||
|
|
||||||
@ -717,12 +717,13 @@ def greedy_search_batch(
|
|||||||
c = torch.tensor(
|
c = torch.tensor(
|
||||||
[h[-context_size - 1 :] for h in hyps[:batch_size]],
|
[h[-context_size - 1 :] for h in hyps[:batch_size]],
|
||||||
device=device,
|
device=device,
|
||||||
|
dtype=torch.int64,
|
||||||
)
|
)
|
||||||
|
|
||||||
k[:, 0] = torch.sum(
|
k[:, 0] = torch.sum(
|
||||||
(
|
(
|
||||||
c[:, -context_size - 1 : -1]
|
c[:, : context_size]
|
||||||
== c[:, -1].expand_as(c[:, -context_size - 1 : -1])
|
== c[:, context_size : context_size + 1].expand_as(c[:, : context_size])
|
||||||
).int(),
|
).int(),
|
||||||
dim=1,
|
dim=1,
|
||||||
keepdim=True,
|
keepdim=True,
|
||||||
@ -736,7 +737,7 @@ def greedy_search_batch(
|
|||||||
decoder_out = model.decoder(decoder_input, k, need_pad=False)
|
decoder_out = model.decoder(decoder_input, k, need_pad=False)
|
||||||
decoder_out = model.joiner.decoder_proj(decoder_out)
|
decoder_out = model.joiner.decoder_proj(decoder_out)
|
||||||
|
|
||||||
sorted_ans = [h[context_size:] for h in hyps]
|
sorted_ans = [h[context_size + 1 :] for h in hyps]
|
||||||
ans = []
|
ans = []
|
||||||
ans_timestamps = []
|
ans_timestamps = []
|
||||||
for i in range(N):
|
for i in range(N):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user