mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
Merge pull request #317 from xiaowan0322/feat/cuda
[Export] support gpu inference
This commit is contained in:
commit
2e5cd36e0f
@ -19,6 +19,7 @@ class ModelExport:
|
|||||||
self,
|
self,
|
||||||
cache_dir: Union[Path, str] = None,
|
cache_dir: Union[Path, str] = None,
|
||||||
onnx: bool = True,
|
onnx: bool = True,
|
||||||
|
device: str = "cpu",
|
||||||
quant: bool = True,
|
quant: bool = True,
|
||||||
fallback_num: int = 0,
|
fallback_num: int = 0,
|
||||||
audio_in: str = None,
|
audio_in: str = None,
|
||||||
@ -36,6 +37,7 @@ class ModelExport:
|
|||||||
)
|
)
|
||||||
print("output dir: {}".format(self.cache_dir))
|
print("output dir: {}".format(self.cache_dir))
|
||||||
self.onnx = onnx
|
self.onnx = onnx
|
||||||
|
self.device = device
|
||||||
self.quant = quant
|
self.quant = quant
|
||||||
self.fallback_num = fallback_num
|
self.fallback_num = fallback_num
|
||||||
self.frontend = None
|
self.frontend = None
|
||||||
@ -112,6 +114,10 @@ class ModelExport:
|
|||||||
else:
|
else:
|
||||||
dummy_input = model.get_dummy_inputs()
|
dummy_input = model.get_dummy_inputs()
|
||||||
|
|
||||||
|
if self.device == 'cuda':
|
||||||
|
model = model.cuda()
|
||||||
|
dummy_input = tuple([i.cuda() for i in dummy_input])
|
||||||
|
|
||||||
# model_script = torch.jit.script(model)
|
# model_script = torch.jit.script(model)
|
||||||
model_script = torch.jit.trace(model, dummy_input)
|
model_script = torch.jit.trace(model, dummy_input)
|
||||||
model_script.save(os.path.join(path, f'{model.model_name}.torchscripts'))
|
model_script.save(os.path.join(path, f'{model.model_name}.torchscripts'))
|
||||||
@ -234,6 +240,7 @@ if __name__ == '__main__':
|
|||||||
parser.add_argument('--model-name', type=str, required=True)
|
parser.add_argument('--model-name', type=str, required=True)
|
||||||
parser.add_argument('--export-dir', type=str, required=True)
|
parser.add_argument('--export-dir', type=str, required=True)
|
||||||
parser.add_argument('--type', type=str, default='onnx', help='["onnx", "torch"]')
|
parser.add_argument('--type', type=str, default='onnx', help='["onnx", "torch"]')
|
||||||
|
parser.add_argument('--device', type=str, default='cpu', help='["cpu", "cuda"]')
|
||||||
parser.add_argument('--quantize', type=str2bool, default=False, help='export quantized model')
|
parser.add_argument('--quantize', type=str2bool, default=False, help='export quantized model')
|
||||||
parser.add_argument('--fallback-num', type=int, default=0, help='amp fallback number')
|
parser.add_argument('--fallback-num', type=int, default=0, help='amp fallback number')
|
||||||
parser.add_argument('--audio_in', type=str, default=None, help='["wav", "wav.scp"]')
|
parser.add_argument('--audio_in', type=str, default=None, help='["wav", "wav.scp"]')
|
||||||
@ -243,6 +250,7 @@ if __name__ == '__main__':
|
|||||||
export_model = ModelExport(
|
export_model = ModelExport(
|
||||||
cache_dir=args.export_dir,
|
cache_dir=args.export_dir,
|
||||||
onnx=args.type == 'onnx',
|
onnx=args.type == 'onnx',
|
||||||
|
device=args.device,
|
||||||
quant=args.quantize,
|
quant=args.quantize,
|
||||||
fallback_num=args.fallback_num,
|
fallback_num=args.fallback_num,
|
||||||
audio_in=args.audio_in,
|
audio_in=args.audio_in,
|
||||||
|
|||||||
@ -1,20 +1,15 @@
|
|||||||
from funasr_torch import Paraformer
|
from funasr_torch import Paraformer
|
||||||
|
|
||||||
#model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
|
||||||
model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
|
||||||
model = Paraformer(model_dir, batch_size=2)
|
|
||||||
|
|
||||||
# when using paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch, you should set pred_bias=0
|
model_dir = "/nfs/zhifu.gzf/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
||||||
# plot_timestamp_to works only when using speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch
|
|
||||||
# model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch"
|
model = Paraformer(model_dir, batch_size=1) # cpu
|
||||||
# model = Paraformer(model_dir, batch_size=2, pred_bias=0)
|
# model = Paraformer(model_dir, batch_size=1, device_id=0) # gpu
|
||||||
|
|
||||||
# when using paraformer-large-vad-punc model, you can set plot_timestamp_to="./xx.png" to get figure of alignment besides timestamps
|
# when using paraformer-large-vad-punc model, you can set plot_timestamp_to="./xx.png" to get figure of alignment besides timestamps
|
||||||
# model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
|
||||||
# model = Paraformer(model_dir, batch_size=1)
|
|
||||||
# model = Paraformer(model_dir, batch_size=1, plot_timestamp_to="test.png")
|
# model = Paraformer(model_dir, batch_size=1, plot_timestamp_to="test.png")
|
||||||
|
|
||||||
wav_path = "YourPath/xx.wav"
|
wav_path = "YourPath/xx.wav"
|
||||||
|
|
||||||
result = model(wav_path)
|
result = model(wav_path)
|
||||||
print(result)
|
print(result)
|
||||||
|
|||||||
@ -46,6 +46,7 @@ class Paraformer():
|
|||||||
)
|
)
|
||||||
self.ort_infer = torch.jit.load(model_file)
|
self.ort_infer = torch.jit.load(model_file)
|
||||||
self.batch_size = batch_size
|
self.batch_size = batch_size
|
||||||
|
self.device_id = device_id
|
||||||
self.plot_timestamp_to = plot_timestamp_to
|
self.plot_timestamp_to = plot_timestamp_to
|
||||||
self.pred_bias = pred_bias
|
self.pred_bias = pred_bias
|
||||||
|
|
||||||
@ -58,8 +59,13 @@ class Paraformer():
|
|||||||
end_idx = min(waveform_nums, beg_idx + self.batch_size)
|
end_idx = min(waveform_nums, beg_idx + self.batch_size)
|
||||||
feats, feats_len = self.extract_feat(waveform_list[beg_idx:end_idx])
|
feats, feats_len = self.extract_feat(waveform_list[beg_idx:end_idx])
|
||||||
try:
|
try:
|
||||||
outputs = self.ort_infer(feats, feats_len)
|
with torch.no_grad():
|
||||||
am_scores, valid_token_lens = outputs[0], outputs[1]
|
if int(self.device_id) == -1:
|
||||||
|
outputs = self.ort_infer(feats, feats_len)
|
||||||
|
am_scores, valid_token_lens = outputs[0], outputs[1]
|
||||||
|
else:
|
||||||
|
outputs = self.ort_infer(feats.cuda(), feats_len.cuda())
|
||||||
|
am_scores, valid_token_lens = outputs[0].cpu(), outputs[1].cpu()
|
||||||
if len(outputs) == 4:
|
if len(outputs) == 4:
|
||||||
# for BiCifParaformer Inference
|
# for BiCifParaformer Inference
|
||||||
us_alphas, us_peaks = outputs[2], outputs[3]
|
us_alphas, us_peaks = outputs[2], outputs[3]
|
||||||
|
|||||||
@ -1,20 +1,15 @@
|
|||||||
from funasr_onnx import Paraformer
|
from funasr_onnx import Paraformer
|
||||||
|
|
||||||
#model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
model_dir = "/nfs/zhifu.gzf/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
||||||
model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
|
||||||
model = Paraformer(model_dir, batch_size=2)
|
|
||||||
|
|
||||||
# when using paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch, you should set pred_bias=0
|
model = Paraformer(model_dir, batch_size=2, plot_timestamp_to="./", pred_bias=0) # cpu
|
||||||
# plot_timestamp_to works only when using speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch
|
# model = Paraformer(model_dir, batch_size=2, plot_timestamp_to="./", pred_bias=0, device_id=0) # gpu
|
||||||
# model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch"
|
|
||||||
# model = Paraformer(model_dir, batch_size=2, pred_bias=0)
|
|
||||||
|
|
||||||
# when using paraformer-large-vad-punc model, you can set plot_timestamp_to="./xx.png" to get figure of alignment besides timestamps
|
# when using paraformer-large-vad-punc model, you can set plot_timestamp_to="./xx.png" to get figure of alignment besides timestamps
|
||||||
# model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
|
||||||
# model = Paraformer(model_dir, batch_size=1)
|
|
||||||
# model = Paraformer(model_dir, batch_size=1, plot_timestamp_to="test.png")
|
# model = Paraformer(model_dir, batch_size=1, plot_timestamp_to="test.png")
|
||||||
|
|
||||||
|
|
||||||
wav_path = "YourPath/xx.wav"
|
wav_path = "YourPath/xx.wav"
|
||||||
|
|
||||||
result = model(wav_path)
|
result = model(wav_path)
|
||||||
print(result)
|
print(result)
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user