Merge branch 'alibaba-damo-academy:main' into add-offline-websocket-srv

This commit is contained in:
zhaomingwork 2023-04-28 23:40:03 +08:00 committed by GitHub
commit 7a2ca2cca4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 55 additions and 11 deletions

View File

@ -0,0 +1,39 @@
import os
import logging
import torch
import soundfile
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.utils.logger import get_logger
logger = get_logger(log_level=logging.CRITICAL)
logger.setLevel(logging.CRITICAL)
os.environ["MODELSCOPE_CACHE"] = "./"
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online',
model_revision='v1.0.4'
)
model_dir = os.path.join(os.environ["MODELSCOPE_CACHE"], "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online")
speech, sample_rate = soundfile.read(os.path.join(model_dir, "example/asr_example.wav"))
speech_length = speech.shape[0]
sample_offset = 0
chunk_size = [5, 10, 5] #[5, 10, 5] 600ms, [8, 8, 4] 480ms
stride_size = chunk_size[1] * 960
param_dict = {"cache": dict(), "is_final": False, "chunk_size": chunk_size}
final_result = ""
for sample_offset in range(0, speech_length, min(stride_size, speech_length - sample_offset)):
if sample_offset + stride_size >= speech_length - 1:
stride_size = speech_length - sample_offset
param_dict["is_final"] = True
rec_result = inference_pipeline(audio_in=speech[sample_offset: sample_offset + stride_size],
param_dict=param_dict)
if len(rec_result) != 0:
final_result += rec_result['text'][0]
print(rec_result)
print(final_result)

View File

@ -14,24 +14,26 @@ os.environ["MODELSCOPE_CACHE"] = "./"
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
model='damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online',
model_revision='v1.0.2')
model_revision='v1.0.4'
)
model_dir = os.path.join(os.environ["MODELSCOPE_CACHE"], "damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online")
speech, sample_rate = soundfile.read(os.path.join(model_dir, "example/asr_example.wav"))
speech_length = speech.shape[0]
sample_offset = 0
step = 4800 #300ms
param_dict = {"cache": dict(), "is_final": False}
chunk_size = [8, 8, 4] #[5, 10, 5] 600ms, [8, 8, 4] 480ms
stride_size = chunk_size[1] * 960
param_dict = {"cache": dict(), "is_final": False, "chunk_size": chunk_size}
final_result = ""
for sample_offset in range(0, speech_length, min(step, speech_length - sample_offset)):
if sample_offset + step >= speech_length - 1:
step = speech_length - sample_offset
for sample_offset in range(0, speech_length, min(stride_size, speech_length - sample_offset)):
if sample_offset + stride_size >= speech_length - 1:
stride_size = speech_length - sample_offset
param_dict["is_final"] = True
rec_result = inference_pipeline(audio_in=speech[sample_offset: sample_offset + step],
rec_result = inference_pipeline(audio_in=speech[sample_offset: sample_offset + stride_size],
param_dict=param_dict)
if len(rec_result) != 0 and rec_result['text'] != "sil" and rec_result['text'] != "waiting_for_more_voice":
final_result += rec_result['text']
print(rec_result)
if len(rec_result) != 0:
final_result += rec_result['text'][0]
print(rec_result)
print(final_result)

View File

@ -205,9 +205,12 @@ class Speech2Text:
results = []
cache_en = cache["encoder"]
if speech.shape[1] < 16 * 60 and cache_en["is_final"]:
if cache_en["start_idx"] == 0:
return []
cache_en["tail_chunk"] = True
feats = cache_en["feats"]
feats_len = torch.tensor([feats.shape[1]])
self.asr_model.frontend = None
results = self.infer(feats, feats_len, cache)
return results
else:

View File

@ -380,7 +380,7 @@ class SANMEncoder(AbsEncoder):
else:
xs_pad = self.embed(xs_pad, cache)
if cache["tail_chunk"]:
xs_pad = cache["feats"]
xs_pad = to_device(cache["feats"], device=xs_pad.device)
else:
xs_pad = self._add_overlap_chunk(xs_pad, cache)
encoder_outs = self.encoders0(xs_pad, None, None, None, None)