mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
funasr1.0
This commit is contained in:
parent
f2a68d0bc8
commit
b66a41fb73
56
README_zh.md
56
README_zh.md
@ -92,30 +92,48 @@ print(res)
|
||||
```
|
||||
注:`model_hub`:表示模型仓库,`ms`为选择modelscope下载,`hf`为选择huggingface下载。
|
||||
|
||||
### 实时语音识别
|
||||
```python
|
||||
from funasr import infer
|
||||
[//]: # (### 实时语音识别)
|
||||
|
||||
p = infer(model="paraformer-zh-streaming", model_hub="ms")
|
||||
[//]: # (```python)
|
||||
|
||||
chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
|
||||
param_dict = {"cache": dict(), "is_final": False, "chunk_size": chunk_size, "encoder_chunk_look_back": 4, "decoder_chunk_look_back": 1}
|
||||
[//]: # (from funasr import infer)
|
||||
|
||||
import torchaudio
|
||||
speech = torchaudio.load("asr_example_zh.wav")[0][0]
|
||||
speech_length = speech.shape[0]
|
||||
[//]: # ()
|
||||
[//]: # (p = infer(model="paraformer-zh-streaming", model_hub="ms"))
|
||||
|
||||
stride_size = chunk_size[1] * 960
|
||||
sample_offset = 0
|
||||
for sample_offset in range(0, speech_length, min(stride_size, speech_length - sample_offset)):
|
||||
param_dict["is_final"] = True if sample_offset + stride_size >= speech_length - 1 else False
|
||||
input = speech[sample_offset: sample_offset + stride_size]
|
||||
rec_result = p(input=input, param_dict=param_dict)
|
||||
print(rec_result)
|
||||
```
|
||||
注:`chunk_size`为流式延时配置,`[0,10,5]`表示上屏实时出字粒度为`10*60=600ms`,未来信息为`5*60=300ms`。每次推理输入为`600ms`(采样点数为`16000*0.6=960`),输出为对应文字,最后一个语音片段输入需要设置`is_final=True`来强制输出最后一个字。
|
||||
[//]: # ()
|
||||
[//]: # (chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms)
|
||||
|
||||
更多详细用法([新人文档](https://alibaba-damo-academy.github.io/FunASR/en/funasr/quick_start_zh.html))
|
||||
[//]: # (param_dict = {"cache": dict(), "is_final": False, "chunk_size": chunk_size, "encoder_chunk_look_back": 4, "decoder_chunk_look_back": 1})
|
||||
|
||||
[//]: # ()
|
||||
[//]: # (import torchaudio)
|
||||
|
||||
[//]: # (speech = torchaudio.load("asr_example_zh.wav")[0][0])
|
||||
|
||||
[//]: # (speech_length = speech.shape[0])
|
||||
|
||||
[//]: # ()
|
||||
[//]: # (stride_size = chunk_size[1] * 960)
|
||||
|
||||
[//]: # (sample_offset = 0)
|
||||
|
||||
[//]: # (for sample_offset in range(0, speech_length, min(stride_size, speech_length - sample_offset)):)
|
||||
|
||||
[//]: # ( param_dict["is_final"] = True if sample_offset + stride_size >= speech_length - 1 else False)
|
||||
|
||||
[//]: # ( input = speech[sample_offset: sample_offset + stride_size])
|
||||
|
||||
[//]: # ( rec_result = p(input=input, param_dict=param_dict))
|
||||
|
||||
[//]: # ( print(rec_result))
|
||||
|
||||
[//]: # (```)
|
||||
|
||||
[//]: # (注:`chunk_size`为流式延时配置,`[0,10,5]`表示上屏实时出字粒度为`10*60=600ms`,未来信息为`5*60=300ms`。每次推理输入为`600ms`(采样点数为`16000*0.6=960`),输出为对应文字,最后一个语音片段输入需要设置`is_final=True`来强制输出最后一个字。)
|
||||
|
||||
[//]: # ()
|
||||
[//]: # (更多详细用法([新人文档](https://alibaba-damo-academy.github.io/FunASR/en/funasr/quick_start_zh.html)))
|
||||
|
||||
|
||||
<a name="服务部署"></a>
|
||||
|
||||
Loading…
Reference in New Issue
Block a user