mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
* sensevoice finetune * sensevoice finetune * sensevoice finetune * sensevoice finetune * sensevoice finetune * sensevoice finetune * sensevoice finetune * sensevoice finetune * sensevoice finetune * sensevoice finetune * bugfix * update with main (#1631) * update seaco finetune * v1.0.24 --------- Co-authored-by: 维石 <shixian.shi@alibaba-inc.com> * sensevoice * sensevoice * sensevoice * update with main (#1638) * update seaco finetune * v1.0.24 * update rwkv template --------- Co-authored-by: 维石 <shixian.shi@alibaba-inc.com> * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * whisper * whisper * update style * update style --------- Co-authored-by: 维石 <shixian.shi@alibaba-inc.com>
21 lines
849 B
Python
21 lines
849 B
Python
#!/usr/bin/env python3
|
|
# -*- encoding: utf-8 -*-
|
|
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
|
|
# MIT License (https://opensource.org/licenses/MIT)
|
|
|
|
import torch
|
|
from funasr.models.sanm.attention import MultiHeadedAttentionSANM
|
|
|
|
|
|
class MultiHeadedAttentionSANMwithMask(MultiHeadedAttentionSANM):
|
|
def __init__(self, *args, **kwargs):
|
|
super().__init__(*args, **kwargs)
|
|
|
|
def forward(self, x, mask, mask_shfit_chunk=None, mask_att_chunk_encoder=None):
|
|
q_h, k_h, v_h, v = self.forward_qkv(x)
|
|
fsmn_memory = self.forward_fsmn(v, mask[0], mask_shfit_chunk)
|
|
q_h = q_h * self.d_k ** (-0.5)
|
|
scores = torch.matmul(q_h, k_h.transpose(-2, -1))
|
|
att_outs = self.forward_attention(v_h, scores, mask[1], mask_att_chunk_encoder)
|
|
return att_outs + fsmn_memory
|