From 5b38115da4f91576ee3b8dea625f6b4795cc112b Mon Sep 17 00:00:00 2001 From: "shixian.shi" Date: Thu, 22 Feb 2024 14:59:47 +0800 Subject: [PATCH] update asf code --- funasr/models/paraformer/decoder.py | 56 +++++++++++++++++++++++++ funasr/models/seaco_paraformer/model.py | 11 +---- 2 files changed, 58 insertions(+), 9 deletions(-) diff --git a/funasr/models/paraformer/decoder.py b/funasr/models/paraformer/decoder.py index 68018a039..ad321e4f4 100644 --- a/funasr/models/paraformer/decoder.py +++ b/funasr/models/paraformer/decoder.py @@ -116,6 +116,22 @@ class DecoderLayerSANM(torch.nn.Module): # x = residual + self.dropout(self.src_attn(x, memory, memory_mask)) return x, tgt_mask, memory, memory_mask, cache + + def get_attn_mat(self, tgt, tgt_mask, memory, memory_mask=None, cache=None): + residual = tgt + tgt = self.norm1(tgt) + tgt = self.feed_forward(tgt) + + x = tgt + if self.self_attn is not None: + tgt = self.norm2(tgt) + x, cache = self.self_attn(tgt, tgt_mask, cache=cache) + x = residual + x + + residual = x + x = self.norm3(x) + x_src_attn, attn_mat = self.src_attn(x, memory, memory_mask, ret_attn=True) + return attn_mat def forward_one_step(self, tgt, tgt_mask, memory, memory_mask=None, cache=None): """Compute decoded features. @@ -396,6 +412,46 @@ class ParaformerSANMDecoder(BaseTransformerDecoder): ys.unsqueeze(0), ys_mask, x.unsqueeze(0), cache=state ) return logp.squeeze(0), state + + def forward_asf2( + self, + hs_pad: torch.Tensor, + hlens: torch.Tensor, + ys_in_pad: torch.Tensor, + ys_in_lens: torch.Tensor, + ): + + tgt = ys_in_pad + tgt_mask = myutils.sequence_mask(ys_in_lens, device=tgt.device)[:, :, None] + + memory = hs_pad + memory_mask = myutils.sequence_mask(hlens, device=memory.device)[:, None, :] + + tgt, tgt_mask, memory, memory_mask, _ = self.decoders[0](tgt, tgt_mask, memory, memory_mask) + attn_mat = self.model.decoders[1].get_attn_mat(tgt, tgt_mask, memory, memory_mask) + return attn_mat + + def forward_asf6( + self, + hs_pad: torch.Tensor, + hlens: torch.Tensor, + ys_in_pad: torch.Tensor, + ys_in_lens: torch.Tensor, + ): + + tgt = ys_in_pad + tgt_mask = myutils.sequence_mask(ys_in_lens, device=tgt.device)[:, :, None] + + memory = hs_pad + memory_mask = myutils.sequence_mask(hlens, device=memory.device)[:, None, :] + + tgt, tgt_mask, memory, memory_mask, _ = self.decoders[0](tgt, tgt_mask, memory, memory_mask) + tgt, tgt_mask, memory, memory_mask, _ = self.decoders[1](tgt, tgt_mask, memory, memory_mask) + tgt, tgt_mask, memory, memory_mask, _ = self.decoders[2](tgt, tgt_mask, memory, memory_mask) + tgt, tgt_mask, memory, memory_mask, _ = self.decoders[3](tgt, tgt_mask, memory, memory_mask) + tgt, tgt_mask, memory, memory_mask, _ = self.decoders[4](tgt, tgt_mask, memory, memory_mask) + attn_mat = self.decoders[5].get_attn_mat(tgt, tgt_mask, memory, memory_mask) + return attn_mat def forward_chunk( self, diff --git a/funasr/models/seaco_paraformer/model.py b/funasr/models/seaco_paraformer/model.py index 0287f56ed..cfdd26a79 100644 --- a/funasr/models/seaco_paraformer/model.py +++ b/funasr/models/seaco_paraformer/model.py @@ -223,12 +223,8 @@ class SeacoParaformer(BiCifParaformer, Paraformer): # ASF Core if nfilter > 0 and nfilter < num_hot_word: - for dec in self.seaco_decoder.decoders: - dec.reserve_attn = True - # cif_attended, _ = self.decoder2(contextual_info, _contextual_length, sematic_embeds, ys_pad_lens) - dec_attended, _ = self.seaco_decoder(contextual_info, _contextual_length, decoder_hidden, ys_pad_lens) - # cif_filter = torch.topk(self.decoder2.decoders[-1].attn_mat[0][0].sum(0).sum(0)[:-1], min(nfilter, num_hot_word-1))[1].tolist() - hotword_scores = self.seaco_decoder.decoders[-1].attn_mat[0][0].sum(0).sum(0)[:-1] + hotword_scores = self.seaco_decoder.forward_asf6(contextual_info, _contextual_length, decoder_hidden, ys_pad_lens) + hotword_scores = hotword_scores[0].sum(0).sum(0) # hotword_scores /= torch.sqrt(torch.tensor(hw_lengths)[:-1].float()).to(hotword_scores.device) dec_filter = torch.topk(hotword_scores, min(nfilter, num_hot_word-1))[1].tolist() add_filter = dec_filter @@ -239,9 +235,6 @@ class SeacoParaformer(BiCifParaformer, Paraformer): contextual_info = selected.squeeze(0).repeat(encoder_out.shape[0], 1, 1).to(encoder_out.device) num_hot_word = contextual_info.shape[1] _contextual_length = torch.Tensor([num_hot_word]).int().repeat(encoder_out.shape[0]).to(encoder_out.device) - for dec in self.seaco_decoder.decoders: - dec.attn_mat = [] - dec.reserve_attn = False # SeACo Core cif_attended, _ = self.seaco_decoder(contextual_info, _contextual_length, sematic_embeds, ys_pad_lens)