mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
Merge pull request #442 from alibaba-damo-academy/dev_lhn
update streaming paraformer recipe
This commit is contained in:
commit
ebabb37754
@ -0,0 +1,39 @@
|
|||||||
|
import os
|
||||||
|
import logging
|
||||||
|
import torch
|
||||||
|
import soundfile
|
||||||
|
|
||||||
|
from modelscope.pipelines import pipeline
|
||||||
|
from modelscope.utils.constant import Tasks
|
||||||
|
from modelscope.utils.logger import get_logger
|
||||||
|
|
||||||
|
logger = get_logger(log_level=logging.CRITICAL)
|
||||||
|
logger.setLevel(logging.CRITICAL)
|
||||||
|
|
||||||
|
os.environ["MODELSCOPE_CACHE"] = "./"
|
||||||
|
inference_pipeline = pipeline(
|
||||||
|
task=Tasks.auto_speech_recognition,
|
||||||
|
model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online',
|
||||||
|
model_revision='v1.0.4'
|
||||||
|
)
|
||||||
|
|
||||||
|
model_dir = os.path.join(os.environ["MODELSCOPE_CACHE"], "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online")
|
||||||
|
speech, sample_rate = soundfile.read(os.path.join(model_dir, "example/asr_example.wav"))
|
||||||
|
speech_length = speech.shape[0]
|
||||||
|
|
||||||
|
sample_offset = 0
|
||||||
|
chunk_size = [5, 10, 5] #[5, 10, 5] 600ms, [8, 8, 4] 480ms
|
||||||
|
stride_size = chunk_size[1] * 960
|
||||||
|
param_dict = {"cache": dict(), "is_final": False, "chunk_size": chunk_size}
|
||||||
|
final_result = ""
|
||||||
|
|
||||||
|
for sample_offset in range(0, speech_length, min(stride_size, speech_length - sample_offset)):
|
||||||
|
if sample_offset + stride_size >= speech_length - 1:
|
||||||
|
stride_size = speech_length - sample_offset
|
||||||
|
param_dict["is_final"] = True
|
||||||
|
rec_result = inference_pipeline(audio_in=speech[sample_offset: sample_offset + stride_size],
|
||||||
|
param_dict=param_dict)
|
||||||
|
if len(rec_result) != 0:
|
||||||
|
final_result += rec_result['text'][0]
|
||||||
|
print(rec_result)
|
||||||
|
print(final_result)
|
||||||
@ -14,24 +14,26 @@ os.environ["MODELSCOPE_CACHE"] = "./"
|
|||||||
inference_pipeline = pipeline(
|
inference_pipeline = pipeline(
|
||||||
task=Tasks.auto_speech_recognition,
|
task=Tasks.auto_speech_recognition,
|
||||||
model='damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online',
|
model='damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online',
|
||||||
model_revision='v1.0.2')
|
model_revision='v1.0.4'
|
||||||
|
)
|
||||||
|
|
||||||
model_dir = os.path.join(os.environ["MODELSCOPE_CACHE"], "damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online")
|
model_dir = os.path.join(os.environ["MODELSCOPE_CACHE"], "damo/speech_paraformer_asr_nat-zh-cn-16k-common-vocab8404-online")
|
||||||
speech, sample_rate = soundfile.read(os.path.join(model_dir, "example/asr_example.wav"))
|
speech, sample_rate = soundfile.read(os.path.join(model_dir, "example/asr_example.wav"))
|
||||||
speech_length = speech.shape[0]
|
speech_length = speech.shape[0]
|
||||||
|
|
||||||
sample_offset = 0
|
sample_offset = 0
|
||||||
step = 4800 #300ms
|
chunk_size = [8, 8, 4] #[5, 10, 5] 600ms, [8, 8, 4] 480ms
|
||||||
param_dict = {"cache": dict(), "is_final": False}
|
stride_size = chunk_size[1] * 960
|
||||||
|
param_dict = {"cache": dict(), "is_final": False, "chunk_size": chunk_size}
|
||||||
final_result = ""
|
final_result = ""
|
||||||
|
|
||||||
for sample_offset in range(0, speech_length, min(step, speech_length - sample_offset)):
|
for sample_offset in range(0, speech_length, min(stride_size, speech_length - sample_offset)):
|
||||||
if sample_offset + step >= speech_length - 1:
|
if sample_offset + stride_size >= speech_length - 1:
|
||||||
step = speech_length - sample_offset
|
stride_size = speech_length - sample_offset
|
||||||
param_dict["is_final"] = True
|
param_dict["is_final"] = True
|
||||||
rec_result = inference_pipeline(audio_in=speech[sample_offset: sample_offset + step],
|
rec_result = inference_pipeline(audio_in=speech[sample_offset: sample_offset + stride_size],
|
||||||
param_dict=param_dict)
|
param_dict=param_dict)
|
||||||
if len(rec_result) != 0 and rec_result['text'] != "sil" and rec_result['text'] != "waiting_for_more_voice":
|
if len(rec_result) != 0:
|
||||||
final_result += rec_result['text']
|
final_result += rec_result['text'][0]
|
||||||
print(rec_result)
|
print(rec_result)
|
||||||
print(final_result)
|
print(final_result)
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user