diff --git a/README.md b/README.md index 05f6364f1..eff05936d 100644 --- a/README.md +++ b/README.md @@ -97,7 +97,7 @@ model = AutoModel(model="paraformer-zh", model_revision="v2.0.2", \ punc_model="ct-punc-c", punc_model_revision="v2.0.2", \ spk_model="cam++", spk_model_revision="v2.0.2") res = model(input=f"{model.model_path}/example/asr_example.wav", - batch_size=16, + batch_size=64, hotword='魔搭') print(res) ``` @@ -135,7 +135,6 @@ Note: `chunk_size` is the configuration for streaming latency.` [0,10,5]` indica from funasr import AutoModel model = AutoModel(model="fsmn-vad", model_revision="v2.0.2") - wav_file = f"{model.model_path}/example/asr_example.wav" res = model(input=wav_file) print(res) @@ -167,7 +166,6 @@ for i in range(total_chunk_num): from funasr import AutoModel model = AutoModel(model="ct-punc", model_revision="v2.0.2") - res = model(input="那今天的会就到这里吧 happy new year 明年见") print(res) ``` @@ -176,9 +174,8 @@ print(res) from funasr import AutoModel model = AutoModel(model="fa-zh", model_revision="v2.0.2") - wav_file = f"{model.model_path}/example/asr_example.wav" -text_file = f"{model.model_path}/example/asr_example.wav" +text_file = f"{model.model_path}/example/text.txt" res = model(input=(wav_file, text_file), data_type=("sound", "text")) print(res) ``` diff --git a/README_zh.md b/README_zh.md index 62d251b4c..2da217170 100644 --- a/README_zh.md +++ b/README_zh.md @@ -86,12 +86,15 @@ funasr +model=paraformer-zh +vad_model="fsmn-vad" +punc_model="ct-punc" +input=a ### 非实时语音识别 ```python from funasr import AutoModel - -model = AutoModel(model="paraformer-zh") -# for the long duration wav, you could add vad model -# model = AutoModel(model="paraformer-zh", vad_model="fsmn-vad", punc_model="ct-punc") - -res = model(input="asr_example_zh.wav", batch_size=64) +# paraformer-zh is a multi-functional asr model +# use vad, punc, spk or not as you need +model = AutoModel(model="paraformer-zh", model_revision="v2.0.2", \ + vad_model="fsmn-vad", vad_model_revision="v2.0.2", \ + punc_model="ct-punc-c", punc_model_revision="v2.0.2", \ + spk_model="cam++", spk_model_revision="v2.0.2") +res = model(input=f"{model.model_path}/example/asr_example.wav", + batch_size=64, + hotword='魔搭') print(res) ``` 注:`model_hub`:表示模型仓库,`ms`为选择modelscope下载,`hf`为选择huggingface下载。 @@ -105,7 +108,7 @@ chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention -model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.0") +model = AutoModel(model="paraformer-zh-streaming", model_revision="v2.0.2") import soundfile import os @@ -163,7 +166,7 @@ for i in range(total_chunk_num): ```python from funasr import AutoModel -model = AutoModel(model="ct-punc", model_revision="v2.0.1") +model = AutoModel(model="ct-punc", model_revision="v2.0.2") res = model(input="那今天的会就到这里吧 happy new year 明年见") print(res) @@ -176,7 +179,7 @@ from funasr import AutoModel model = AutoModel(model="fa-zh", model_revision="v2.0.0") wav_file = f"{model.model_path}/example/asr_example.wav" -text_file = f"{model.model_path}/example/asr_example.wav" +text_file = f"{model.model_path}/example/text.txt" res = model(input=(wav_file, text_file), data_type=("sound", "text")) print(res) ```