mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
update readme
This commit is contained in:
parent
d9e48a5706
commit
19645da9e9
@ -97,7 +97,7 @@ model = AutoModel(model="paraformer-zh", model_revision="v2.0.2", \
|
|||||||
punc_model="ct-punc-c", punc_model_revision="v2.0.2", \
|
punc_model="ct-punc-c", punc_model_revision="v2.0.2", \
|
||||||
spk_model="cam++", spk_model_revision="v2.0.2")
|
spk_model="cam++", spk_model_revision="v2.0.2")
|
||||||
res = model(input=f"{model.model_path}/example/asr_example.wav",
|
res = model(input=f"{model.model_path}/example/asr_example.wav",
|
||||||
batch_size=16,
|
batch_size=64,
|
||||||
hotword='魔搭')
|
hotword='魔搭')
|
||||||
print(res)
|
print(res)
|
||||||
```
|
```
|
||||||
@ -135,7 +135,6 @@ Note: `chunk_size` is the configuration for streaming latency.` [0,10,5]` indica
|
|||||||
from funasr import AutoModel
|
from funasr import AutoModel
|
||||||
|
|
||||||
model = AutoModel(model="fsmn-vad", model_revision="v2.0.2")
|
model = AutoModel(model="fsmn-vad", model_revision="v2.0.2")
|
||||||
|
|
||||||
wav_file = f"{model.model_path}/example/asr_example.wav"
|
wav_file = f"{model.model_path}/example/asr_example.wav"
|
||||||
res = model(input=wav_file)
|
res = model(input=wav_file)
|
||||||
print(res)
|
print(res)
|
||||||
@ -167,7 +166,6 @@ for i in range(total_chunk_num):
|
|||||||
from funasr import AutoModel
|
from funasr import AutoModel
|
||||||
|
|
||||||
model = AutoModel(model="ct-punc", model_revision="v2.0.2")
|
model = AutoModel(model="ct-punc", model_revision="v2.0.2")
|
||||||
|
|
||||||
res = model(input="那今天的会就到这里吧 happy new year 明年见")
|
res = model(input="那今天的会就到这里吧 happy new year 明年见")
|
||||||
print(res)
|
print(res)
|
||||||
```
|
```
|
||||||
@ -176,9 +174,8 @@ print(res)
|
|||||||
from funasr import AutoModel
|
from funasr import AutoModel
|
||||||
|
|
||||||
model = AutoModel(model="fa-zh", model_revision="v2.0.2")
|
model = AutoModel(model="fa-zh", model_revision="v2.0.2")
|
||||||
|
|
||||||
wav_file = f"{model.model_path}/example/asr_example.wav"
|
wav_file = f"{model.model_path}/example/asr_example.wav"
|
||||||
text_file = f"{model.model_path}/example/asr_example.wav"
|
text_file = f"{model.model_path}/example/text.txt"
|
||||||
res = model(input=(wav_file, text_file), data_type=("sound", "text"))
|
res = model(input=(wav_file, text_file), data_type=("sound", "text"))
|
||||||
print(res)
|
print(res)
|
||||||
```
|
```
|
||||||
|
|||||||
21
README_zh.md
21
README_zh.md
@ -86,12 +86,15 @@ funasr +model=paraformer-zh +vad_model="fsmn-vad" +punc_model="ct-punc" +input=a
|
|||||||
### 非实时语音识别
|
### 非实时语音识别
|
||||||
```python
|
```python
|
||||||
from funasr import AutoModel
|
from funasr import AutoModel
|
||||||
|
# paraformer-zh is a multi-functional asr model
|
||||||
model = AutoModel(model="paraformer-zh")
|
# use vad, punc, spk or not as you need
|
||||||
# for the long duration wav, you could add vad model
|
model = AutoModel(model="paraformer-zh", model_revision="v2.0.2", \
|
||||||
# model = AutoModel(model="paraformer-zh", vad_model="fsmn-vad", punc_model="ct-punc")
|
vad_model="fsmn-vad", vad_model_revision="v2.0.2", \
|
||||||
|
punc_model="ct-punc-c", punc_model_revision="v2.0.2", \
|
||||||
res = model(input="asr_example_zh.wav", batch_size=64)
|
spk_model="cam++", spk_model_revision="v2.0.2")
|
||||||
|
res = model(input=f"{model.model_path}/example/asr_example.wav",
|
||||||
|
batch_size=64,
|
||||||
|
hotword='魔搭')
|
||||||
print(res)
|
print(res)
|
||||||
```
|
```
|
||||||
注:`model_hub`:表示模型仓库,`ms`为选择modelscope下载,`hf`为选择huggingface下载。
|
注:`model_hub`:表示模型仓库,`ms`为选择modelscope下载,`hf`为选择huggingface下载。
|
||||||
@ -105,7 +108,7 @@ chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
|
|||||||
encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
|
encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
|
||||||
decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
|
decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
|
||||||
|
|
||||||
model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.0")
|
model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.2")
|
||||||
|
|
||||||
import soundfile
|
import soundfile
|
||||||
import os
|
import os
|
||||||
@ -163,7 +166,7 @@ for i in range(total_chunk_num):
|
|||||||
```python
|
```python
|
||||||
from funasr import AutoModel
|
from funasr import AutoModel
|
||||||
|
|
||||||
model = AutoModel(model="ct-punc", model_revision="v2.0.1")
|
model = AutoModel(model="ct-punc", model_revision="v2.0.2")
|
||||||
|
|
||||||
res = model(input="那今天的会就到这里吧 happy new year 明年见")
|
res = model(input="那今天的会就到这里吧 happy new year 明年见")
|
||||||
print(res)
|
print(res)
|
||||||
@ -176,7 +179,7 @@ from funasr import AutoModel
|
|||||||
model = AutoModel(model="fa-zh", model_revision="v2.0.0")
|
model = AutoModel(model="fa-zh", model_revision="v2.0.0")
|
||||||
|
|
||||||
wav_file = f"{model.model_path}/example/asr_example.wav"
|
wav_file = f"{model.model_path}/example/asr_example.wav"
|
||||||
text_file = f"{model.model_path}/example/asr_example.wav"
|
text_file = f"{model.model_path}/example/text.txt"
|
||||||
res = model(input=(wav_file, text_file), data_type=("sound", "text"))
|
res = model(input=(wav_file, text_file), data_type=("sound", "text"))
|
||||||
print(res)
|
print(res)
|
||||||
```
|
```
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user