From c1d4bd297a4418ef44882079c4845cfe64ed0b21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B8=B8=E9=9B=81?= Date: Thu, 27 Apr 2023 19:36:32 +0800 Subject: [PATCH] docs --- .../demo.py | 4 ++-- .../demo.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../demo.py | 4 ++-- .../demo.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- .../infer.py | 4 ++-- egs_modelscope/punctuation/TEMPLATE/README.md | 8 ++++---- .../demo.py | 4 ++-- egs_modelscope/tp/TEMPLATE/README.md | 4 ++-- .../tp/speech_timestamp_prediction-v1-16k-offline/demo.py | 4 ++-- .../vad/speech_fsmn_vad_zh-cn-16k-common/demo.py | 4 ++-- .../vad/speech_fsmn_vad_zh-cn-16k-common/demo_online.py | 4 ++-- .../vad/speech_fsmn_vad_zh-cn-8k-common/demo.py | 4 ++-- .../vad/speech_fsmn_vad_zh-cn-8k-common/demo_online.py | 4 ++-- 53 files changed, 108 insertions(+), 108 deletions(-) diff --git a/egs_modelscope/asr/conformer/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py b/egs_modelscope/asr/conformer/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py index 3594815f7..87bb65299 100644 --- a/egs_modelscope/asr/conformer/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py +++ b/egs_modelscope/asr/conformer/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py @@ -4,11 +4,11 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav' output_dir = None - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in) + rec_result = inference_pipeline(audio_in=audio_in) print(rec_result) diff --git a/egs_modelscope/asr/conformer/speech_conformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/demo.py b/egs_modelscope/asr/conformer/speech_conformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/demo.py index b55b59f41..3b0164a46 100644 --- a/egs_modelscope/asr/conformer/speech_conformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/demo.py +++ b/egs_modelscope/asr/conformer/speech_conformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/demo.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://modelscope.oss-cn-beijing.aliyuncs.com/test/audios/asr_example.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_conformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in) + rec_result = inference_pipeline(audio_in=audio_in) print(rec_result) diff --git a/egs_modelscope/asr/data2vec/speech_data2vec_pretrain-paraformer-zh-cn-aishell2-16k/infer.py b/egs_modelscope/asr/data2vec/speech_data2vec_pretrain-paraformer-zh-cn-aishell2-16k/infer.py index 77b2cbd23..7a6b750e1 100644 --- a/egs_modelscope/asr/data2vec/speech_data2vec_pretrain-paraformer-zh-cn-aishell2-16k/infer.py +++ b/egs_modelscope/asr/data2vec/speech_data2vec_pretrain-paraformer-zh-cn-aishell2-16k/infer.py @@ -16,13 +16,13 @@ def modelscope_infer_core(output_dir, split_dir, njob, idx): os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id]) else: os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_data2vec_pretrain-paraformer-zh-cn-aishell2-16k", output_dir=output_dir_job, ) audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx)) - inference_pipline(audio_in=audio_in) + inference_pipeline(audio_in=audio_in) def modelscope_infer(params): diff --git a/egs_modelscope/asr/data2vec/speech_data2vec_pretrain-zh-cn-aishell2-16k-pytorch/infer.py b/egs_modelscope/asr/data2vec/speech_data2vec_pretrain-zh-cn-aishell2-16k-pytorch/infer.py index 0d06377e0..f07f308c2 100644 --- a/egs_modelscope/asr/data2vec/speech_data2vec_pretrain-zh-cn-aishell2-16k-pytorch/infer.py +++ b/egs_modelscope/asr/data2vec/speech_data2vec_pretrain-zh-cn-aishell2-16k-pytorch/infer.py @@ -16,13 +16,13 @@ def modelscope_infer_core(output_dir, split_dir, njob, idx): os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id]) else: os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_data2vec_pretrain-zh-cn-aishell2-16k-pytorch", output_dir=output_dir_job, ) audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx)) - inference_pipline(audio_in=audio_in) + inference_pipeline(audio_in=audio_in) def modelscope_infer(params): diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/infer.py b/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/infer.py index d1fbca22d..00be7935f 100644 --- a/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/infer.py +++ b/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/infer.py @@ -16,14 +16,14 @@ def modelscope_infer_core(output_dir, split_dir, njob, idx): os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id]) else: os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch", output_dir=output_dir_job, batch_size=64 ) audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx)) - inference_pipline(audio_in=audio_in) + inference_pipeline(audio_in=audio_in) def modelscope_infer(params): diff --git a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py index 4125a5751..2863c1ada 100644 --- a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py +++ b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py @@ -4,12 +4,12 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav' output_dir = None - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch", output_dir=output_dir, batch_size=1, ) - rec_result = inference_pipline(audio_in=audio_in) + rec_result = inference_pipeline(audio_in=audio_in) print(rec_result) diff --git a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/demo.py b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/demo.py index dec7de041..f2db74e8b 100644 --- a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/demo.py +++ b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/demo.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://modelscope.oss-cn-beijing.aliyuncs.com/test/audios/asr_example.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_paraformer_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in) + rec_result = inference_pipeline(audio_in=audio_in) print(rec_result) diff --git a/egs_modelscope/asr/paraformerbert/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.py b/egs_modelscope/asr/paraformerbert/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.py index df1890311..f4c4fc2fb 100644 --- a/egs_modelscope/asr/paraformerbert/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.py +++ b/egs_modelscope/asr/paraformerbert/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.py @@ -4,11 +4,11 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav' output_dir = None - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in) + rec_result = inference_pipeline(audio_in=audio_in) print(rec_result) diff --git a/egs_modelscope/asr/paraformerbert/speech_paraformerbert_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.py b/egs_modelscope/asr/paraformerbert/speech_paraformerbert_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.py index 83d680583..63bed40a0 100644 --- a/egs_modelscope/asr/paraformerbert/speech_paraformerbert_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.py +++ b/egs_modelscope/asr/paraformerbert/speech_paraformerbert_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://modelscope.oss-cn-beijing.aliyuncs.com/test/audios/asr_example.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell2-vocab5212-pytorch", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in) + rec_result = inference_pipeline(audio_in=audio_in) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-offline/infer.py index c15114934..862f88198 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-offline/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_cantonese-CHS.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-online/infer.py index ac73adf72..d4f8d762f 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-online/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_cantonese-CHS.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-cantonese-CHS-16k-common-vocab1468-tensorflow1-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-offline/infer.py index 227f4bf28..347d31694 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-offline/infer.py @@ -4,11 +4,11 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav' output_dir = None - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-offline", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in) + rec_result = inference_pipeline(audio_in=audio_in) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-online/infer.py index 74d97643b..936d6d7ba 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-online/infer.py @@ -4,11 +4,11 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav' output_dir = None - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in) + rec_result = inference_pipeline(audio_in=audio_in) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-de-16k-common-vocab3690-tensorflow1-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-de-16k-common-vocab3690-tensorflow1-offline/infer.py index 5ace7e4cf..f82c1f4c4 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-de-16k-common-vocab3690-tensorflow1-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-de-16k-common-vocab3690-tensorflow1-offline/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_de.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-de-16k-common-vocab3690-tensorflow1-offline", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-de-16k-common-vocab3690-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-de-16k-common-vocab3690-tensorflow1-online/infer.py index f8d91b833..48b48071e 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-de-16k-common-vocab3690-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-de-16k-common-vocab3690-tensorflow1-online/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_de.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-de-16k-common-vocab3690-tensorflow1-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-en-16k-common-vocab1080-tensorflow1-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-en-16k-common-vocab1080-tensorflow1-offline/infer.py index 49b884b2f..98f31b602 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-en-16k-common-vocab1080-tensorflow1-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-en-16k-common-vocab1080-tensorflow1-offline/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_en.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-en-16k-common-vocab1080-tensorflow1-offline", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-en-16k-common-vocab1080-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-en-16k-common-vocab1080-tensorflow1-online/infer.py index 57a3afdf9..423c503ed 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-en-16k-common-vocab1080-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-en-16k-common-vocab1080-tensorflow1-online/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_en.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-en-16k-common-vocab1080-tensorflow1-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-es-16k-common-vocab3445-tensorflow1-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-es-16k-common-vocab3445-tensorflow1-offline/infer.py index 510f00828..75e22a0e9 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-es-16k-common-vocab3445-tensorflow1-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-es-16k-common-vocab3445-tensorflow1-offline/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_es.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-es-16k-common-vocab3445-tensorflow1-offline", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-es-16k-common-vocab3445-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-es-16k-common-vocab3445-tensorflow1-online/infer.py index 2ec59402c..cb1b4fa99 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-es-16k-common-vocab3445-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-es-16k-common-vocab3445-tensorflow1-online/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_es.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-es-16k-common-vocab3445-tensorflow1-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fa-16k-common-vocab1257-pytorch-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fa-16k-common-vocab1257-pytorch-offline/infer.py index 040265d22..e6c39c2b8 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fa-16k-common-vocab1257-pytorch-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fa-16k-common-vocab1257-pytorch-offline/infer.py @@ -16,14 +16,14 @@ def modelscope_infer_core(output_dir, split_dir, njob, idx): os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id]) else: os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-fa-16k-common-vocab1257-pytorch-offline", output_dir=output_dir_job, batch_size=1 ) audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx)) - inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) + inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) def modelscope_infer(params): diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fa-16k-common-vocab1257-pytorch-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fa-16k-common-vocab1257-pytorch-online/infer.py index 055e4ebdb..124d5ed05 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fa-16k-common-vocab1257-pytorch-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fa-16k-common-vocab1257-pytorch-online/infer.py @@ -16,14 +16,14 @@ def modelscope_infer_core(output_dir, split_dir, njob, idx): os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id]) else: os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-fa-16k-common-vocab1257-pytorch-online", output_dir=output_dir_job, batch_size=1 ) audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx)) - inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) + inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) def modelscope_infer(params): diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fr-16k-common-vocab3472-tensorflow1-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fr-16k-common-vocab3472-tensorflow1-offline/infer.py index 6aedeeaa8..627d132fc 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fr-16k-common-vocab3472-tensorflow1-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fr-16k-common-vocab3472-tensorflow1-offline/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_fr.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-fr-16k-common-vocab3472-tensorflow1-offline", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fr-16k-common-vocab3472-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fr-16k-common-vocab3472-tensorflow1-online/infer.py index 2f3e8330c..305d990c8 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fr-16k-common-vocab3472-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-fr-16k-common-vocab3472-tensorflow1-online/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_fr.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-fr-16k-common-vocab3472-tensorflow1-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-he-16k-common-vocab1085-pytorch/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-he-16k-common-vocab1085-pytorch/infer.py index c54ab8c83..e0d1a4d35 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-he-16k-common-vocab1085-pytorch/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-he-16k-common-vocab1085-pytorch/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_he.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-he-16k-common-vocab1085-pytorch", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-id-16k-common-vocab1067-tensorflow1-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-id-16k-common-vocab1067-tensorflow1-offline/infer.py index 219c9ec42..e53c37e60 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-id-16k-common-vocab1067-tensorflow1-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-id-16k-common-vocab1067-tensorflow1-offline/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_id.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-id-16k-common-vocab1067-tensorflow1-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-id-16k-common-vocab1067-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-id-16k-common-vocab1067-tensorflow1-online/infer.py index ad2671a3e..75ec783de 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-id-16k-common-vocab1067-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-id-16k-common-vocab1067-tensorflow1-online/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_id.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-id-16k-common-vocab1067-tensorflow1-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline/infer.py index 1a174bbca..68cc41d54 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_ja.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-online/infer.py index f15bc2d2b..a741e18e7 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-online/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_ja.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ko-16k-common-vocab6400-tensorflow1-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ko-16k-common-vocab6400-tensorflow1-offline/infer.py index 618b3f601..b87bcbb84 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ko-16k-common-vocab6400-tensorflow1-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ko-16k-common-vocab6400-tensorflow1-offline/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_ko.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-ko-16k-common-vocab6400-tensorflow1-offline", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ko-16k-common-vocab6400-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ko-16k-common-vocab6400-tensorflow1-online/infer.py index 135e8f8b9..9be791ceb 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ko-16k-common-vocab6400-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ko-16k-common-vocab6400-tensorflow1-online/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_ko.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-ko-16k-common-vocab6400-tensorflow1-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-my-16k-common-vocab696-pytorch/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-my-16k-common-vocab696-pytorch/infer.py index cfd869f04..b3a905859 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-my-16k-common-vocab696-pytorch/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-my-16k-common-vocab696-pytorch/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_my.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-my-16k-common-vocab696-pytorch", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline/infer.py index 2dcb6638a..4a43e7ce5 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_pt.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-online/infer.py index aff2a9a51..7029fd9c8 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-online/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_pt.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ru-16k-common-vocab1664-tensorflow1-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ru-16k-common-vocab1664-tensorflow1-offline/infer.py index 95f447d13..3c9d364e9 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ru-16k-common-vocab1664-tensorflow1-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ru-16k-common-vocab1664-tensorflow1-offline/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_ru.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-ru-16k-common-vocab1664-tensorflow1-offline", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ru-16k-common-vocab1664-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ru-16k-common-vocab1664-tensorflow1-online/infer.py index 88c06b4c6..95da47935 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ru-16k-common-vocab1664-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ru-16k-common-vocab1664-tensorflow1-online/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_ru.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-ru-16k-common-vocab1664-tensorflow1-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ur-16k-common-vocab877-pytorch/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ur-16k-common-vocab877-pytorch/infer.py index e8c5524f0..04b02fe16 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ur-16k-common-vocab877-pytorch/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ur-16k-common-vocab877-pytorch/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_ur.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-ur-16k-common-vocab877-pytorch", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-vi-16k-common-vocab1001-pytorch-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-vi-16k-common-vocab1001-pytorch-offline/infer.py index 9472104e5..4218f3d7a 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-vi-16k-common-vocab1001-pytorch-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-vi-16k-common-vocab1001-pytorch-offline/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_vi.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-vi-16k-common-vocab1001-pytorch-offline", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-vi-16k-common-vocab1001-pytorch-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-vi-16k-common-vocab1001-pytorch-online/infer.py index 4a844fc82..355e412c4 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-vi-16k-common-vocab1001-pytorch-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-vi-16k-common-vocab1001-pytorch-online/infer.py @@ -4,10 +4,10 @@ from modelscope.utils.constant import Tasks if __name__ == "__main__": audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_vi.wav" output_dir = "./results" - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-vi-16k-common-vocab1001-pytorch-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) + rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"normal"}) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline/infer.py index 40686acca..35209896c 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline/infer.py @@ -4,11 +4,11 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav' output_dir = None - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-offline", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in) + rec_result = inference_pipeline(audio_in=audio_in) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online/infer.py index dfe934d67..a3e2a002f 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online/infer.py @@ -4,11 +4,11 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav' output_dir = None - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in) + rec_result = inference_pipeline(audio_in=audio_in) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/infer.py index ce8988ef6..13d2a2e37 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/infer.py @@ -16,14 +16,14 @@ def modelscope_infer_core(output_dir, split_dir, njob, idx): os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id]) else: os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline", output_dir=output_dir_job, batch_size=1 ) audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx)) - inference_pipline(audio_in=audio_in) + inference_pipeline(audio_in=audio_in) def modelscope_infer(params): # prepare for multi-GPU decoding diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.py index 8b4a04dd3..876d51cc9 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.py @@ -16,14 +16,14 @@ def modelscope_infer_core(output_dir, split_dir, njob, idx): os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id]) else: os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online", output_dir=output_dir_job, batch_size=1 ) audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx)) - inference_pipline(audio_in=audio_in, param_dict={"decoding_model": "normal"}) + inference_pipeline(audio_in=audio_in, param_dict={"decoding_model": "normal"}) def modelscope_infer(params): diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-offline/infer.py index 1c1e303f3..8ec42885d 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-offline/infer.py @@ -4,11 +4,11 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav' output_dir = None - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-offline", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in) + rec_result = inference_pipeline(audio_in=audio_in) print(rec_result) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-online/infer.py index 94c1b6818..3ab16ea72 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-online/infer.py @@ -4,11 +4,11 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav' output_dir = None - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-online", output_dir=output_dir, ) - rec_result = inference_pipline(audio_in=audio_in) + rec_result = inference_pipeline(audio_in=audio_in) print(rec_result) diff --git a/egs_modelscope/lm/speech_transformer_lm_zh-cn-common-vocab8404-pytorch/infer.py b/egs_modelscope/lm/speech_transformer_lm_zh-cn-common-vocab8404-pytorch/infer.py index ec309b2ce..628cdd86b 100644 --- a/egs_modelscope/lm/speech_transformer_lm_zh-cn-common-vocab8404-pytorch/infer.py +++ b/egs_modelscope/lm/speech_transformer_lm_zh-cn-common-vocab8404-pytorch/infer.py @@ -6,12 +6,12 @@ inputs = "hello 大 家 好 呀" from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks -inference_pipline = pipeline( +inference_pipeline = pipeline( task=Tasks.language_score_prediction, model='damo/speech_transformer_lm_zh-cn-common-vocab8404-pytorch', output_dir="./tmp/" ) -rec_result = inference_pipline(text_in=inputs) +rec_result = inference_pipeline(text_in=inputs) print(rec_result) diff --git a/egs_modelscope/punctuation/TEMPLATE/README.md b/egs_modelscope/punctuation/TEMPLATE/README.md index 7cbca0586..19600d3a3 100644 --- a/egs_modelscope/punctuation/TEMPLATE/README.md +++ b/egs_modelscope/punctuation/TEMPLATE/README.md @@ -11,21 +11,21 @@ from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks -inference_pipline = pipeline( +inference_pipeline = pipeline( task=Tasks.punctuation, model='damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch', model_revision=None) -rec_result = inference_pipline(text_in='example/punc_example.txt') +rec_result = inference_pipeline(text_in='example/punc_example.txt') print(rec_result) ``` - text二进制数据,例如:用户直接从文件里读出bytes数据 ```python -rec_result = inference_pipline(text_in='我们都是木头人不会讲话不会动') +rec_result = inference_pipeline(text_in='我们都是木头人不会讲话不会动') ``` - text文件url,例如:https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_text/punc_example.txt ```python -rec_result = inference_pipline(text_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_text/punc_example.txt') +rec_result = inference_pipeline(text_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_text/punc_example.txt') ``` #### [CT-Transformer Realtime model](https://www.modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727/summary) diff --git a/egs_modelscope/punctuation/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/demo.py b/egs_modelscope/punctuation/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/demo.py index 0da8d25a1..20994d39c 100644 --- a/egs_modelscope/punctuation/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/demo.py +++ b/egs_modelscope/punctuation/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/demo.py @@ -12,12 +12,12 @@ inputs = "./egs_modelscope/punctuation/punc_ct-transformer_zh-cn-common-vocab272 from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks -inference_pipline = pipeline( +inference_pipeline = pipeline( task=Tasks.punctuation, model='damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch', model_revision="v1.1.7", output_dir="./tmp/" ) -rec_result = inference_pipline(text_in=inputs) +rec_result = inference_pipeline(text_in=inputs) print(rec_result) diff --git a/egs_modelscope/tp/TEMPLATE/README.md b/egs_modelscope/tp/TEMPLATE/README.md index 8d7558115..745249f86 100644 --- a/egs_modelscope/tp/TEMPLATE/README.md +++ b/egs_modelscope/tp/TEMPLATE/README.md @@ -8,12 +8,12 @@ from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks -inference_pipline = pipeline( +inference_pipeline = pipeline( task=Tasks.speech_timestamp, model='damo/speech_timestamp_prediction-v1-16k-offline', output_dir=None) -rec_result = inference_pipline( +rec_result = inference_pipeline( audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_timestamps.wav', text_in='一 个 东 太 平 洋 国 家 为 什 么 跑 到 西 太 平 洋 来 了 呢',) print(rec_result) diff --git a/egs_modelscope/tp/speech_timestamp_prediction-v1-16k-offline/demo.py b/egs_modelscope/tp/speech_timestamp_prediction-v1-16k-offline/demo.py index 2e6f92f72..bcc512837 100644 --- a/egs_modelscope/tp/speech_timestamp_prediction-v1-16k-offline/demo.py +++ b/egs_modelscope/tp/speech_timestamp_prediction-v1-16k-offline/demo.py @@ -1,12 +1,12 @@ from modelscope.pipelines import pipeline from modelscope.utils.constant import Tasks -inference_pipline = pipeline( +inference_pipeline = pipeline( task=Tasks.speech_timestamp, model='damo/speech_timestamp_prediction-v1-16k-offline', output_dir=None) -rec_result = inference_pipline( +rec_result = inference_pipeline( audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_timestamps.wav', text_in='一 个 东 太 平 洋 国 家 为 什 么 跑 到 西 太 平 洋 来 了 呢',) print(rec_result) \ No newline at end of file diff --git a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo.py b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo.py index 2bf3251e3..bbc16c5b6 100644 --- a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo.py +++ b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo.py @@ -4,12 +4,12 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav' output_dir = None - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.voice_activity_detection, model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch", model_revision='v1.2.0', output_dir=output_dir, batch_size=1, ) - segments_result = inference_pipline(audio_in=audio_in) + segments_result = inference_pipeline(audio_in=audio_in) print(segments_result) diff --git a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo_online.py b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo_online.py index 02e919d2e..65693b5f1 100644 --- a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo_online.py +++ b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo_online.py @@ -8,7 +8,7 @@ import soundfile if __name__ == '__main__': output_dir = None - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.voice_activity_detection, model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch", model_revision='v1.2.0', @@ -30,7 +30,7 @@ if __name__ == '__main__': else: is_final = False param_dict['is_final'] = is_final - segments_result = inference_pipline(audio_in=speech[sample_offset: sample_offset + step], + segments_result = inference_pipeline(audio_in=speech[sample_offset: sample_offset + step], param_dict=param_dict) print(segments_result) diff --git a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo.py b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo.py index 2e5027500..84863d082 100644 --- a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo.py +++ b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo.py @@ -4,12 +4,12 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example_8k.wav' output_dir = None - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.voice_activity_detection, model="damo/speech_fsmn_vad_zh-cn-8k-common", model_revision='v1.2.0', output_dir=output_dir, batch_size=1, ) - segments_result = inference_pipline(audio_in=audio_in) + segments_result = inference_pipeline(audio_in=audio_in) print(segments_result) diff --git a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo_online.py b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo_online.py index a8cc912d6..5b67da74a 100644 --- a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo_online.py +++ b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo_online.py @@ -8,7 +8,7 @@ import soundfile if __name__ == '__main__': output_dir = None - inference_pipline = pipeline( + inference_pipeline = pipeline( task=Tasks.voice_activity_detection, model="damo/speech_fsmn_vad_zh-cn-8k-common", model_revision='v1.2.0', @@ -30,7 +30,7 @@ if __name__ == '__main__': else: is_final = False param_dict['is_final'] = is_final - segments_result = inference_pipline(audio_in=speech[sample_offset: sample_offset + step], + segments_result = inference_pipeline(audio_in=speech[sample_offset: sample_offset + step], param_dict=param_dict) print(segments_result)