update asf code

This commit is contained in:
shixian.shi 2024-02-22 14:59:47 +08:00
parent 66d3b5c212
commit 5b38115da4
2 changed files with 58 additions and 9 deletions

View File

@ -116,6 +116,22 @@ class DecoderLayerSANM(torch.nn.Module):
# x = residual + self.dropout(self.src_attn(x, memory, memory_mask))
return x, tgt_mask, memory, memory_mask, cache
def get_attn_mat(self, tgt, tgt_mask, memory, memory_mask=None, cache=None):
residual = tgt
tgt = self.norm1(tgt)
tgt = self.feed_forward(tgt)
x = tgt
if self.self_attn is not None:
tgt = self.norm2(tgt)
x, cache = self.self_attn(tgt, tgt_mask, cache=cache)
x = residual + x
residual = x
x = self.norm3(x)
x_src_attn, attn_mat = self.src_attn(x, memory, memory_mask, ret_attn=True)
return attn_mat
def forward_one_step(self, tgt, tgt_mask, memory, memory_mask=None, cache=None):
"""Compute decoded features.
@ -396,6 +412,46 @@ class ParaformerSANMDecoder(BaseTransformerDecoder):
ys.unsqueeze(0), ys_mask, x.unsqueeze(0), cache=state
)
return logp.squeeze(0), state
def forward_asf2(
self,
hs_pad: torch.Tensor,
hlens: torch.Tensor,
ys_in_pad: torch.Tensor,
ys_in_lens: torch.Tensor,
):
tgt = ys_in_pad
tgt_mask = myutils.sequence_mask(ys_in_lens, device=tgt.device)[:, :, None]
memory = hs_pad
memory_mask = myutils.sequence_mask(hlens, device=memory.device)[:, None, :]
tgt, tgt_mask, memory, memory_mask, _ = self.decoders[0](tgt, tgt_mask, memory, memory_mask)
attn_mat = self.model.decoders[1].get_attn_mat(tgt, tgt_mask, memory, memory_mask)
return attn_mat
def forward_asf6(
self,
hs_pad: torch.Tensor,
hlens: torch.Tensor,
ys_in_pad: torch.Tensor,
ys_in_lens: torch.Tensor,
):
tgt = ys_in_pad
tgt_mask = myutils.sequence_mask(ys_in_lens, device=tgt.device)[:, :, None]
memory = hs_pad
memory_mask = myutils.sequence_mask(hlens, device=memory.device)[:, None, :]
tgt, tgt_mask, memory, memory_mask, _ = self.decoders[0](tgt, tgt_mask, memory, memory_mask)
tgt, tgt_mask, memory, memory_mask, _ = self.decoders[1](tgt, tgt_mask, memory, memory_mask)
tgt, tgt_mask, memory, memory_mask, _ = self.decoders[2](tgt, tgt_mask, memory, memory_mask)
tgt, tgt_mask, memory, memory_mask, _ = self.decoders[3](tgt, tgt_mask, memory, memory_mask)
tgt, tgt_mask, memory, memory_mask, _ = self.decoders[4](tgt, tgt_mask, memory, memory_mask)
attn_mat = self.decoders[5].get_attn_mat(tgt, tgt_mask, memory, memory_mask)
return attn_mat
def forward_chunk(
self,

View File

@ -223,12 +223,8 @@ class SeacoParaformer(BiCifParaformer, Paraformer):
# ASF Core
if nfilter > 0 and nfilter < num_hot_word:
for dec in self.seaco_decoder.decoders:
dec.reserve_attn = True
# cif_attended, _ = self.decoder2(contextual_info, _contextual_length, sematic_embeds, ys_pad_lens)
dec_attended, _ = self.seaco_decoder(contextual_info, _contextual_length, decoder_hidden, ys_pad_lens)
# cif_filter = torch.topk(self.decoder2.decoders[-1].attn_mat[0][0].sum(0).sum(0)[:-1], min(nfilter, num_hot_word-1))[1].tolist()
hotword_scores = self.seaco_decoder.decoders[-1].attn_mat[0][0].sum(0).sum(0)[:-1]
hotword_scores = self.seaco_decoder.forward_asf6(contextual_info, _contextual_length, decoder_hidden, ys_pad_lens)
hotword_scores = hotword_scores[0].sum(0).sum(0)
# hotword_scores /= torch.sqrt(torch.tensor(hw_lengths)[:-1].float()).to(hotword_scores.device)
dec_filter = torch.topk(hotword_scores, min(nfilter, num_hot_word-1))[1].tolist()
add_filter = dec_filter
@ -239,9 +235,6 @@ class SeacoParaformer(BiCifParaformer, Paraformer):
contextual_info = selected.squeeze(0).repeat(encoder_out.shape[0], 1, 1).to(encoder_out.device)
num_hot_word = contextual_info.shape[1]
_contextual_length = torch.Tensor([num_hot_word]).int().repeat(encoder_out.shape[0]).to(encoder_out.device)
for dec in self.seaco_decoder.decoders:
dec.attn_mat = []
dec.reserve_attn = False
# SeACo Core
cif_attended, _ = self.seaco_decoder(contextual_info, _contextual_length, sematic_embeds, ys_pad_lens)