update speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch (#688)

* update speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch finetune & infer scripts

* update speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch
This commit is contained in:
Chong Zhang 2023-06-29 16:32:14 +08:00 committed by GitHub
parent 98abc0e5ac
commit 6086ff54e3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 3 additions and 68 deletions

View File

@ -1,5 +1,4 @@
import os
<<<<<<< HEAD
from modelscope.metainfo import Trainers
from modelscope.trainers import build_trainer
@ -21,50 +20,17 @@ def modelscope_finetune(params):
batch_bins=params.batch_bins,
max_epoch=params.max_epoch,
lr=params.lr)
=======
from modelscope.metainfo import Trainers
from modelscope.trainers import build_trainer
from funasr.datasets.ms_dataset import MsDataset
def modelscope_finetune(params):
if not os.path.exists(params["output_dir"]):
os.makedirs(params["output_dir"], exist_ok=True)
# dataset split ["train", "validation"]
ds_dict = MsDataset.load(params["data_dir"])
kwargs = dict(
model=params["model"],
model_revision=params["model_revision"],
data_dir=ds_dict,
dataset_type=params["dataset_type"],
work_dir=params["output_dir"],
batch_bins=params["batch_bins"],
max_epoch=params["max_epoch"],
lr=params["lr"])
>>>>>>> main
trainer = build_trainer(Trainers.speech_asr_trainer, default_args=kwargs)
trainer.train()
if __name__ == '__main__':
<<<<<<< HEAD
params = modelscope_args(model="damo/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch", data_path="./data")
params.output_dir = "./checkpoint" # m模型保存路径
params.data_path = "./example_data/" # 数据路径
params.dataset_type = "small" # 小数据量设置small若数据量大于1000小时请使用large
params.batch_bins = 2000 # batch size如果dataset_type="small"batch_bins单位为fbank特征帧数如果dataset_type="large"batch_bins单位为毫秒
params.max_epoch = 50 # 最大训练轮数
params.max_epoch = 20 # 最大训练轮数
params.lr = 0.00005 # 设置学习率
=======
params = {}
params["output_dir"] = "./checkpoint"
params["data_dir"] = "./data"
params["batch_bins"] = 2000
params["dataset_type"] = "small"
params["max_epoch"] = 50
params["lr"] = 0.00005
params["model"] = "damo/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch"
params["model_revision"] = None
>>>>>>> main
modelscope_finetune(params)
modelscope_finetune(params)

View File

@ -1,33 +1,3 @@
<<<<<<< HEAD
import os
import shutil
import argparse
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
def modelscope_infer(args):
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpuid)
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
model=args.model,
output_dir=args.output_dir,
batch_size=args.batch_size,
param_dict={"decoding_model": args.decoding_mode, "hotword": args.hotword_txt}
)
inference_pipeline(audio_in=args.audio_in)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default="damo/speech_UniASR_asr_2pass-tr-16k-common-vocab1582-pytorch")
parser.add_argument('--audio_in', type=str, default="./data/test/wav.scp")
parser.add_argument('--output_dir', type=str, default="./results/")
parser.add_argument('--decoding_mode', type=str, default="normal")
parser.add_argument('--hotword_txt', type=str, default=None)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--gpuid', type=str, default="0")
args = parser.parse_args()
modelscope_infer(args)
=======
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
@ -40,5 +10,4 @@ if __name__ == "__main__":
output_dir=output_dir,
)
rec_result = inference_pipeline(audio_in=audio_in, param_dict={"decoding_model":"offline"})
print(rec_result)
>>>>>>> main
print(rec_result)