update readme

This commit is contained in:
shixian.shi 2024-01-15 20:46:40 +08:00
parent d9e48a5706
commit 19645da9e9
2 changed files with 14 additions and 14 deletions

View File

@ -97,7 +97,7 @@ model = AutoModel(model="paraformer-zh", model_revision="v2.0.2", \
punc_model="ct-punc-c", punc_model_revision="v2.0.2", \
spk_model="cam++", spk_model_revision="v2.0.2")
res = model(input=f"{model.model_path}/example/asr_example.wav",
batch_size=16,
batch_size=64,
hotword='魔搭')
print(res)
```
@ -135,7 +135,6 @@ Note: `chunk_size` is the configuration for streaming latency.` [0,10,5]` indica
from funasr import AutoModel
model = AutoModel(model="fsmn-vad", model_revision="v2.0.2")
wav_file = f"{model.model_path}/example/asr_example.wav"
res = model(input=wav_file)
print(res)
@ -167,7 +166,6 @@ for i in range(total_chunk_num):
from funasr import AutoModel
model = AutoModel(model="ct-punc", model_revision="v2.0.2")
res = model(input="那今天的会就到这里吧 happy new year 明年见")
print(res)
```
@ -176,9 +174,8 @@ print(res)
from funasr import AutoModel
model = AutoModel(model="fa-zh", model_revision="v2.0.2")
wav_file = f"{model.model_path}/example/asr_example.wav"
text_file = f"{model.model_path}/example/asr_example.wav"
text_file = f"{model.model_path}/example/text.txt"
res = model(input=(wav_file, text_file), data_type=("sound", "text"))
print(res)
```

View File

@ -86,12 +86,15 @@ funasr +model=paraformer-zh +vad_model="fsmn-vad" +punc_model="ct-punc" +input=a
### 非实时语音识别
```python
from funasr import AutoModel
model = AutoModel(model="paraformer-zh")
# for the long duration wav, you could add vad model
# model = AutoModel(model="paraformer-zh", vad_model="fsmn-vad", punc_model="ct-punc")
res = model(input="asr_example_zh.wav", batch_size=64)
# paraformer-zh is a multi-functional asr model
# use vad, punc, spk or not as you need
model = AutoModel(model="paraformer-zh", model_revision="v2.0.2", \
vad_model="fsmn-vad", vad_model_revision="v2.0.2", \
punc_model="ct-punc-c", punc_model_revision="v2.0.2", \
spk_model="cam++", spk_model_revision="v2.0.2")
res = model(input=f"{model.model_path}/example/asr_example.wav",
batch_size=64,
hotword='魔搭')
print(res)
```
注:`model_hub`:表示模型仓库,`ms`为选择modelscope下载`hf`为选择huggingface下载。
@ -105,7 +108,7 @@ chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.0")
model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.2")
import soundfile
import os
@ -163,7 +166,7 @@ for i in range(total_chunk_num):
```python
from funasr import AutoModel
model = AutoModel(model="ct-punc", model_revision="v2.0.1")
model = AutoModel(model="ct-punc", model_revision="v2.0.2")
res = model(input="那今天的会就到这里吧 happy new year 明年见")
print(res)
@ -176,7 +179,7 @@ from funasr import AutoModel
model = AutoModel(model="fa-zh", model_revision="v2.0.0")
wav_file = f"{model.model_path}/example/asr_example.wav"
text_file = f"{model.model_path}/example/asr_example.wav"
text_file = f"{model.model_path}/example/text.txt"
res = model(input=(wav_file, text_file), data_type=("sound", "text"))
print(res)
```