mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
update
This commit is contained in:
parent
317dac5b75
commit
d0d8684b96
@ -42,6 +42,7 @@ from funasr.utils import asr_utils, wav_utils, postprocess_utils
|
||||
from funasr.models.frontend.wav_frontend import WavFrontend
|
||||
from funasr.models.e2e_asr_paraformer import BiCifParaformer, ContextualParaformer
|
||||
from funasr.export.models.e2e_asr_paraformer import Paraformer as Paraformer_export
|
||||
np.set_printoptions(threshold=np.inf)
|
||||
|
||||
class Speech2Text:
|
||||
"""Speech2Text class
|
||||
@ -203,7 +204,6 @@ class Speech2Text:
|
||||
# Input as audio signal
|
||||
if isinstance(speech, np.ndarray):
|
||||
speech = torch.tensor(speech)
|
||||
|
||||
if self.frontend is not None:
|
||||
feats, feats_len = self.frontend.forward(speech, speech_lengths)
|
||||
feats = to_device(feats, device=self.device)
|
||||
@ -213,13 +213,16 @@ class Speech2Text:
|
||||
feats = speech
|
||||
feats_len = speech_lengths
|
||||
lfr_factor = max(1, (feats.size()[-1] // 80) - 1)
|
||||
feats_len = cache["encoder"]["stride"] + cache["encoder"]["pad_left"] + cache["encoder"]["pad_right"]
|
||||
feats = feats[:,cache["encoder"]["start_idx"]:cache["encoder"]["start_idx"]+feats_len,:]
|
||||
feats_len = torch.tensor([feats_len])
|
||||
batch = {"speech": feats, "speech_lengths": feats_len, "cache": cache}
|
||||
|
||||
# a. To device
|
||||
batch = to_device(batch, device=self.device)
|
||||
|
||||
# b. Forward Encoder
|
||||
enc, enc_len = self.asr_model.encode_chunk(**batch)
|
||||
enc, enc_len = self.asr_model.encode_chunk(feats, feats_len, cache)
|
||||
if isinstance(enc, tuple):
|
||||
enc = enc[0]
|
||||
# assert len(enc) == 1, len(enc)
|
||||
@ -592,7 +595,6 @@ def inference_modelscope(
|
||||
if data_path_and_name_and_type is None and raw_inputs is not None:
|
||||
if isinstance(raw_inputs, np.ndarray):
|
||||
raw_inputs = torch.tensor(raw_inputs)
|
||||
|
||||
is_final = False
|
||||
if param_dict is not None and "cache" in param_dict:
|
||||
cache = param_dict["cache"]
|
||||
@ -605,62 +607,87 @@ def inference_modelscope(
|
||||
asr_result = ""
|
||||
wait = True
|
||||
if len(cache) == 0:
|
||||
cache["encoder"] = {"start_idx": 0, "pad_left": 0, "stride": 10, "pad_right": 5, "cif_hidden": None, "cif_alphas": None}
|
||||
cache["encoder"] = {"start_idx": 0, "pad_left": 0, "stride": 10, "pad_right": 5, "cif_hidden": None, "cif_alphas": None, "is_final": is_final, "left": 0, "right": 0}
|
||||
cache_de = {"decode_fsmn": None}
|
||||
cache["decoder"] = cache_de
|
||||
cache["first_chunk"] = True
|
||||
cache["speech"] = []
|
||||
cache["chunk_index"] = 0
|
||||
cache["speech_chunk"] = []
|
||||
cache["accum_speech"] = 0
|
||||
|
||||
if raw_inputs is not None:
|
||||
if len(cache["speech"]) == 0:
|
||||
cache["speech"] = raw_inputs
|
||||
else:
|
||||
cache["speech"] = torch.cat([cache["speech"], raw_inputs], dim=0)
|
||||
if len(cache["speech_chunk"]) == 0:
|
||||
cache["speech_chunk"] = raw_inputs
|
||||
else:
|
||||
cache["speech_chunk"] = torch.cat([cache["speech_chunk"], raw_inputs], dim=0)
|
||||
while len(cache["speech_chunk"]) >= 960:
|
||||
cache["accum_speech"] += len(raw_inputs)
|
||||
while cache["accum_speech"] >= 960:
|
||||
if cache["first_chunk"]:
|
||||
if len(cache["speech_chunk"]) >= 14400:
|
||||
speech = torch.unsqueeze(cache["speech_chunk"][0:14400], axis=0)
|
||||
speech_length = torch.tensor([14400])
|
||||
if cache["accum_speech"] >= 14400:
|
||||
speech = torch.unsqueeze(cache["speech"], axis=0)
|
||||
speech_length = torch.tensor([len(cache["speech"])])
|
||||
cache["encoder"]["pad_left"] = 5
|
||||
cache["encoder"]["pad_right"] = 5
|
||||
cache["encoder"]["stride"] = 10
|
||||
cache["encoder"]["left"] = 5
|
||||
cache["encoder"]["right"] = 0
|
||||
results = speech2text(cache, speech, speech_length)
|
||||
cache["speech_chunk"]= cache["speech_chunk"][4800:]
|
||||
cache["accum_speech"] -= 4800
|
||||
cache["first_chunk"] = False
|
||||
cache["encoder"]["start_idx"] = -5
|
||||
cache["encoder"]["is_final"] = False
|
||||
wait = False
|
||||
else:
|
||||
if is_final:
|
||||
cache["encoder"]["stride"] = len(cache["speech_chunk"]) // 960
|
||||
cache["encoder"]["stride"] = len(cache["speech"]) // 960
|
||||
cache["encoder"]["pad_left"] = 0
|
||||
cache["encoder"]["pad_right"] = 0
|
||||
speech = torch.unsqueeze(cache["speech_chunk"], axis=0)
|
||||
speech_length = torch.tensor([len(cache["speech_chunk"])])
|
||||
speech = torch.unsqueeze(cache["speech"], axis=0)
|
||||
speech_length = torch.tensor([len(cache["speech"])])
|
||||
results = speech2text(cache, speech, speech_length)
|
||||
cache["speech_chunk"] = []
|
||||
cache["accum_speech"] = 0
|
||||
wait = False
|
||||
else:
|
||||
break
|
||||
else:
|
||||
if len(cache["speech_chunk"]) >= 19200:
|
||||
if cache["accum_speech"] >= 19200:
|
||||
cache["encoder"]["start_idx"] += 10
|
||||
cache["encoder"]["stride"] = 10
|
||||
cache["encoder"]["pad_left"] = 5
|
||||
speech = torch.unsqueeze(cache["speech_chunk"][:19200], axis=0)
|
||||
speech_length = torch.tensor([19200])
|
||||
cache["encoder"]["pad_right"] = 5
|
||||
cache["encoder"]["left"] = 0
|
||||
cache["encoder"]["right"] = 0
|
||||
speech = torch.unsqueeze(cache["speech"], axis=0)
|
||||
speech_length = torch.tensor([len(cache["speech"])])
|
||||
results = speech2text(cache, speech, speech_length)
|
||||
cache["speech_chunk"] = cache["speech_chunk"][9600:]
|
||||
cache["accum_speech"] -= 9600
|
||||
wait = False
|
||||
else:
|
||||
if is_final:
|
||||
cache["encoder"]["stride"] = len(cache["speech_chunk"]) // 960
|
||||
cache["encoder"]["pad_right"] = 0
|
||||
speech = torch.unsqueeze(cache["speech_chunk"], axis=0)
|
||||
speech_length = torch.tensor([len(cache["speech_chunk"])])
|
||||
results = speech2text(cache, speech, speech_length)
|
||||
cache["speech_chunk"] = []
|
||||
wait = False
|
||||
cache["encoder"]["is_final"] = True
|
||||
if cache["accum_speech"] >= 14400:
|
||||
cache["encoder"]["start_idx"] += 10
|
||||
cache["encoder"]["stride"] = 10
|
||||
cache["encoder"]["pad_left"] = 5
|
||||
cache["encoder"]["pad_right"] = 5
|
||||
cache["encoder"]["left"] = 0
|
||||
cache["encoder"]["right"] = cache["accum_speech"] // 960 - 15
|
||||
speech = torch.unsqueeze(cache["speech"], axis=0)
|
||||
speech_length = torch.tensor([len(cache["speech"])])
|
||||
results = speech2text(cache, speech, speech_length)
|
||||
cache["accum_speech"] -= 9600
|
||||
wait = False
|
||||
else:
|
||||
cache["encoder"]["start_idx"] += 10
|
||||
cache["encoder"]["stride"] = cache["accum_speech"] // 960 - 5
|
||||
cache["encoder"]["pad_left"] = 5
|
||||
cache["encoder"]["pad_right"] = 0
|
||||
cache["encoder"]["left"] = 0
|
||||
cache["encoder"]["right"] = 0
|
||||
speech = torch.unsqueeze(cache["speech"], axis=0)
|
||||
speech_length = torch.tensor([len(cache["speech"])])
|
||||
results = speech2text(cache, speech, speech_length)
|
||||
cache["accum_speech"] = 0
|
||||
wait = False
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
@ -370,19 +370,10 @@ class Paraformer(AbsESPnetModel):
|
||||
encoder_out, encoder_out_lens
|
||||
)
|
||||
|
||||
assert encoder_out.size(0) == speech.size(0), (
|
||||
encoder_out.size(),
|
||||
speech.size(0),
|
||||
)
|
||||
assert encoder_out.size(1) <= encoder_out_lens.max(), (
|
||||
encoder_out.size(),
|
||||
encoder_out_lens.max(),
|
||||
)
|
||||
|
||||
if intermediate_outs is not None:
|
||||
return (encoder_out, intermediate_outs), encoder_out_lens
|
||||
|
||||
return encoder_out, encoder_out_lens
|
||||
return encoder_out, torch.tensor([encoder_out.size(1)])
|
||||
|
||||
def calc_predictor(self, encoder_out, encoder_out_lens):
|
||||
|
||||
|
||||
@ -200,6 +200,7 @@ class CifPredictorV2(nn.Module):
|
||||
return acoustic_embeds, token_num, alphas, cif_peak
|
||||
|
||||
def forward_chunk(self, hidden, cache=None):
|
||||
b, t, d = hidden.size()
|
||||
h = hidden
|
||||
context = h.transpose(1, 2)
|
||||
queries = self.pad(context)
|
||||
@ -220,11 +221,20 @@ class CifPredictorV2(nn.Module):
|
||||
alphas = alphas * mask_chunk_predictor
|
||||
|
||||
if cache is not None:
|
||||
if cache["is_final"]:
|
||||
alphas[:, cache["stride"] + cache["pad_left"] - 1] += 0.45
|
||||
if cache["cif_hidden"] is not None:
|
||||
hidden = torch.cat((cache["cif_hidden"], hidden), 1)
|
||||
if cache["cif_alphas"] is not None:
|
||||
alphas = torch.cat((cache["cif_alphas"], alphas), -1)
|
||||
|
||||
#if cache["is_final"]:
|
||||
# tail_threshold = torch.tensor([self.tail_threshold], dtype=alphas.dtype).to(alphas.device)
|
||||
# tail_threshold = torch.reshape(tail_threshold, (1, 1))
|
||||
# alphas = torch.cat([alphas, tail_threshold], dim=1)
|
||||
# zeros_hidden = torch.zeros((b, 1, d), dtype=hidden.dtype).to(hidden.device)
|
||||
# hidden = torch.cat([hidden, zeros_hidden], dim=1)
|
||||
|
||||
token_num = alphas.sum(-1)
|
||||
acoustic_embeds, cif_peak = cif(hidden, alphas, self.threshold)
|
||||
len_time = alphas.size(-1)
|
||||
@ -240,8 +250,9 @@ class CifPredictorV2(nn.Module):
|
||||
pre_alphas_length = cache["cif_alphas"].size(-1)
|
||||
mask_chunk_peak_predictor[:, :pre_alphas_length] = 1.0
|
||||
mask_chunk_peak_predictor[:, pre_alphas_length + cache["pad_left"]:pre_alphas_length + cache["stride"] + cache["pad_left"]] = 1.0
|
||||
#if cache["is_final"]:
|
||||
# mask_chunk_peak_predictor[:, -1] = 1.0
|
||||
|
||||
|
||||
if mask_chunk_peak_predictor is not None:
|
||||
cif_peak = cif_peak * mask_chunk_peak_predictor.squeeze(-1)
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
import math
|
||||
import torch
|
||||
|
||||
import torch.nn.functional as F
|
||||
|
||||
def _pre_hook(
|
||||
state_dict,
|
||||
@ -409,9 +409,18 @@ class SinusoidalPositionEncoder(torch.nn.Module):
|
||||
|
||||
def forward_chunk(self, x, cache=None):
|
||||
start_idx = 0
|
||||
pad_left = 0
|
||||
pad_right = 0
|
||||
batch_size, timesteps, input_dim = x.size()
|
||||
if cache is not None:
|
||||
start_idx = cache["start_idx"]
|
||||
pad_left = cache["left"]
|
||||
pad_right = cache["right"]
|
||||
positions = torch.arange(1, timesteps+start_idx+1)[None, :]
|
||||
position_encoding = self.encode(positions, input_dim, x.dtype).to(x.device)
|
||||
return x + position_encoding[:, start_idx: start_idx + timesteps]
|
||||
outputs = x + position_encoding[:, start_idx: start_idx + timesteps]
|
||||
outputs = outputs.transpose(1,2)
|
||||
outputs = F.pad(outputs, (pad_left, pad_right))
|
||||
outputs = outputs.transpose(1,2)
|
||||
return outputs
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user