mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
update 8k uniasr recipe
This commit is contained in:
parent
c5f132a451
commit
6cc96a10eb
@ -25,7 +25,7 @@ def modelscope_finetune(params):
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
params = modelscope_args(model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online", data_path="./data")
|
||||
params = modelscope_args(model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline", data_path="./data")
|
||||
params.output_dir = "./checkpoint" # m模型保存路径
|
||||
params.data_path = "./example_data/" # 数据路径
|
||||
params.dataset_type = "small" # 小数据量设置small,若数据量大于1000小时,请使用large
|
||||
|
||||
@ -18,7 +18,7 @@ def modelscope_infer_core(output_dir, split_dir, njob, idx):
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
|
||||
inference_pipline = pipeline(
|
||||
task=Tasks.auto_speech_recognition,
|
||||
model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online",
|
||||
model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline",
|
||||
output_dir=output_dir_job,
|
||||
batch_size=1
|
||||
)
|
||||
|
||||
@ -45,7 +45,7 @@ def modelscope_infer_after_finetune(params):
|
||||
|
||||
if __name__ == '__main__':
|
||||
params = {}
|
||||
params["modelscope_model_name"] = "damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online"
|
||||
params["modelscope_model_name"] = "damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline"
|
||||
params["required_files"] = ["am.mvn", "decoding.yaml", "configuration.json"]
|
||||
params["output_dir"] = "./checkpoint"
|
||||
params["data_dir"] = "./data/test"
|
||||
|
||||
@ -25,7 +25,7 @@ def modelscope_finetune(params):
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
params = modelscope_args(model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline", data_path="./data")
|
||||
params = modelscope_args(model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online", data_path="./data")
|
||||
params.output_dir = "./checkpoint" # m模型保存路径
|
||||
params.data_path = "./example_data/" # 数据路径
|
||||
params.dataset_type = "small" # 小数据量设置small,若数据量大于1000小时,请使用large
|
||||
|
||||
@ -18,7 +18,7 @@ def modelscope_infer_core(output_dir, split_dir, njob, idx):
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
|
||||
inference_pipline = pipeline(
|
||||
task=Tasks.auto_speech_recognition,
|
||||
model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline",
|
||||
model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online",
|
||||
output_dir=output_dir_job,
|
||||
batch_size=1
|
||||
)
|
||||
|
||||
@ -45,7 +45,7 @@ def modelscope_infer_after_finetune(params):
|
||||
|
||||
if __name__ == '__main__':
|
||||
params = {}
|
||||
params["modelscope_model_name"] = "damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline"
|
||||
params["modelscope_model_name"] = "damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online"
|
||||
params["required_files"] = ["am.mvn", "decoding.yaml", "configuration.json"]
|
||||
params["output_dir"] = "./checkpoint"
|
||||
params["data_dir"] = "./data/test"
|
||||
|
||||
Loading…
Reference in New Issue
Block a user