diff --git a/tests/test_asr_inference_pipeline.py b/tests/test_asr_inference_pipeline.py index 6257d114c..70dbe8952 100644 --- a/tests/test_asr_inference_pipeline.py +++ b/tests/test_asr_inference_pipeline.py @@ -65,7 +65,7 @@ class TestMfccaInferencePipelines(unittest.TestCase): model='NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950', model_revision='v3.0.0') rec_result = inference_pipeline( - audio_in='16:32https://pre.modelscope.cn/api/v1/models/NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/repo?Revision=master&FilePath=example/asr_example_mc.wav') + audio_in='https://pre.modelscope.cn/api/v1/models/NPU-ASLP/speech_mfcca_asr-zh-cn-16k-alimeeting-vocab4950/repo?Revision=master&FilePath=example/asr_example_mc.wav') logger.info("asr inference result: {0}".format(rec_result)) @@ -451,8 +451,8 @@ class TestUniasrInferencePipelines(unittest.TestCase): def test_uniasr_2pass_zhcn_16k_common_vocab8358_offline(self): inference_pipeline = pipeline( - task=Tasks.auto_speech_recognition, - model='damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline') + task=Tasks., + model='damo/speech_UniASauto_speech_recognitionR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline') rec_result = inference_pipeline( audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav', param_dict={"decoding_model": "offline"}) diff --git a/tests/test_asr_vad_punc_inference_pipeline.py b/tests/test_asr_vad_punc_inference_pipeline.py new file mode 100644 index 000000000..bcb57e09f --- /dev/null +++ b/tests/test_asr_vad_punc_inference_pipeline.py @@ -0,0 +1,32 @@ +import unittest + +from modelscope.pipelines import pipeline +from modelscope.utils.constant import Tasks +from modelscope.utils.logger import get_logger + +logger = get_logger() + +class TestParaformerInferencePipelines(unittest.TestCase): + def test_funasr_path(self): + import funasr + import os + logger.info("run_dir:{0} ; funasr_path: {1}".format(os.getcwd(), funasr.__file__)) + + def test_inference_pipeline(self): + inference_pipeline = pipeline( + task=Tasks.auto_speech_recognition, + model='damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch', + model_revision="v1.2.1", + vad_model='damo/speech_fsmn_vad_zh-cn-16k-common-pytorch', + vad_model_revision="v1.1.8", + punc_model='damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch', + punc_model_revision="v1.1.6", + ngpu=1, + ) + rec_result = inference_pipeline( + audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav') + logger.info("asr inference result: {0}".format(rec_result)) + + +if __name__ == '__main__': + unittest.main()