From 6cc96a10eb7430e36e9b9b84a06fc8f29144e54e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=BB=81=E8=BF=B7?= Date: Tue, 7 Feb 2023 10:50:15 +0800 Subject: [PATCH] update 8k uniasr recipe --- .../finetune.py | 2 +- .../infer.py | 2 +- .../infer_after_finetune.py | 2 +- .../finetune.py | 2 +- .../infer.py | 2 +- .../infer_after_finetune.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/finetune.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/finetune.py index 65df8cbba..b2325b2bb 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/finetune.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/finetune.py @@ -25,7 +25,7 @@ def modelscope_finetune(params): if __name__ == '__main__': - params = modelscope_args(model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online", data_path="./data") + params = modelscope_args(model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline", data_path="./data") params.output_dir = "./checkpoint" # m模型保存路径 params.data_path = "./example_data/" # 数据路径 params.dataset_type = "small" # 小数据量设置small,若数据量大于1000小时,请使用large diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/infer.py index 8fd751344..e855032de 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/infer.py @@ -18,7 +18,7 @@ def modelscope_infer_core(output_dir, split_dir, njob, idx): os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) inference_pipline = pipeline( task=Tasks.auto_speech_recognition, - model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online", + model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline", output_dir=output_dir_job, batch_size=1 ) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/infer_after_finetune.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/infer_after_finetune.py index b4dde6048..6664c3d02 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/infer_after_finetune.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline/infer_after_finetune.py @@ -45,7 +45,7 @@ def modelscope_infer_after_finetune(params): if __name__ == '__main__': params = {} - params["modelscope_model_name"] = "damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online" + params["modelscope_model_name"] = "damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline" params["required_files"] = ["am.mvn", "decoding.yaml", "configuration.json"] params["output_dir"] = "./checkpoint" params["data_dir"] = "./data/test" diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/finetune.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/finetune.py index b2325b2bb..65df8cbba 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/finetune.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/finetune.py @@ -25,7 +25,7 @@ def modelscope_finetune(params): if __name__ == '__main__': - params = modelscope_args(model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline", data_path="./data") + params = modelscope_args(model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online", data_path="./data") params.output_dir = "./checkpoint" # m模型保存路径 params.data_path = "./example_data/" # 数据路径 params.dataset_type = "small" # 小数据量设置small,若数据量大于1000小时,请使用large diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.py index e855032de..8fd751344 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer.py @@ -18,7 +18,7 @@ def modelscope_infer_core(output_dir, split_dir, njob, idx): os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id) inference_pipline = pipeline( task=Tasks.auto_speech_recognition, - model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline", + model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online", output_dir=output_dir_job, batch_size=1 ) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer_after_finetune.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer_after_finetune.py index 6664c3d02..b4dde6048 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer_after_finetune.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online/infer_after_finetune.py @@ -45,7 +45,7 @@ def modelscope_infer_after_finetune(params): if __name__ == '__main__': params = {} - params["modelscope_model_name"] = "damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline" + params["modelscope_model_name"] = "damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online" params["required_files"] = ["am.mvn", "decoding.yaml", "configuration.json"] params["output_dir"] = "./checkpoint" params["data_dir"] = "./data/test"