mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
funasr_onnx funasr_torch
This commit is contained in:
parent
78a3b75cc0
commit
bda3527dbb
@ -24,7 +24,7 @@ class ASRServicer(paraformer_pb2_grpc.ASRServicer):
|
||||
self.inference_16k_pipeline = pipeline(task=Tasks.auto_speech_recognition, model=model, vad_model=vad_model, punc_model=punc_model)
|
||||
elif self.backend == "onnxruntime":
|
||||
try:
|
||||
from rapid_paraformer.paraformer_onnx import Paraformer
|
||||
from funasr_onnx import Paraformer
|
||||
except ImportError:
|
||||
raise ImportError(f"Please install onnxruntime environment")
|
||||
self.inference_16k_pipeline = Paraformer(model_dir=onnx_dir)
|
||||
|
||||
@ -27,10 +27,11 @@
|
||||
```
|
||||
|
||||
|
||||
2. Install the `torch_paraformer`.
|
||||
2. Install the `funasr_torch`.
|
||||
```shell
|
||||
git clone https://github.com/alibaba/FunASR.git && cd FunASR
|
||||
cd funasr/runtime/python/libtorch
|
||||
python setup.py build
|
||||
python setup.py install
|
||||
```
|
||||
|
||||
@ -41,7 +42,7 @@
|
||||
- Output: `List[str]`: recognition result.
|
||||
- Example:
|
||||
```python
|
||||
from torch_paraformer import Paraformer
|
||||
from funasr_torch import Paraformer
|
||||
|
||||
model_dir = "/nfs/zhifu.gzf/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
||||
model = Paraformer(model_dir, batch_size=1)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
|
||||
from torch_paraformer import Paraformer
|
||||
from funasr_torch import Paraformer
|
||||
|
||||
model_dir = "/nfs/zhifu.gzf/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
||||
model = Paraformer(model_dir, batch_size=1)
|
||||
|
||||
@ -14,8 +14,8 @@ def get_readme():
|
||||
|
||||
|
||||
setuptools.setup(
|
||||
name='torch_paraformer',
|
||||
version='0.0.1',
|
||||
name='funasr_torch',
|
||||
version='0.0.2',
|
||||
platforms="Any",
|
||||
url="https://github.com/alibaba-damo-academy/FunASR.git",
|
||||
author="Speech Lab, Alibaba Group, China",
|
||||
@ -31,7 +31,7 @@ setuptools.setup(
|
||||
"PyYAML>=5.1.2", "torch-quant >= 0.4.0"],
|
||||
packages=find_packages(include=["torch_paraformer*"]),
|
||||
keywords=[
|
||||
'funasr,paraformer'
|
||||
'funasr,paraformer, funasr_torch'
|
||||
],
|
||||
classifiers=[
|
||||
'Programming Language :: Python :: 3.6',
|
||||
|
||||
@ -32,8 +32,8 @@
|
||||
```
|
||||
|
||||
|
||||
2. Install the `rapid_paraformer`.
|
||||
- Build the rapid_paraformer `whl`
|
||||
2. Install the `funasr_onnx`.
|
||||
- Build the funasr_onnx `whl`
|
||||
```shell
|
||||
git clone https://github.com/alibaba/FunASR.git && cd FunASR
|
||||
cd funasr/runtime/python/onnxruntime
|
||||
@ -41,7 +41,7 @@
|
||||
```
|
||||
- Install the build `whl`
|
||||
```bash
|
||||
pip install dist/rapid_paraformer-0.0.1-py3-none-any.whl
|
||||
pip install dist/funasr_onnx-0.0.2-py3-none-any.whl
|
||||
```
|
||||
|
||||
3. Run the demo.
|
||||
@ -50,7 +50,7 @@
|
||||
- Output: `List[str]`: recognition result.
|
||||
- Example:
|
||||
```python
|
||||
from rapid_paraformer import Paraformer
|
||||
from funasr_onnx import Paraformer
|
||||
|
||||
model_dir = "/nfs/zhifu.gzf/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
||||
model = Paraformer(model_dir, batch_size=1)
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 136 KiB |
@ -1,5 +1,5 @@
|
||||
|
||||
from rapid_paraformer import Paraformer
|
||||
from funasr_onnx import Paraformer
|
||||
|
||||
#model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
||||
#model_dir = "/Users/shixian/code/funasr/export/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
||||
|
||||
@ -0,0 +1,2 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
from .paraformer_bin import Paraformer
|
||||
@ -1,7 +1,5 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# @Author: SWHL
|
||||
# @Contact: liekkaskono@163.com
|
||||
from cgitb import text
|
||||
|
||||
import os.path
|
||||
from pathlib import Path
|
||||
from typing import List, Union, Tuple
|
||||
@ -1,4 +0,0 @@
|
||||
# -*- encoding: utf-8 -*-
|
||||
# @Author: SWHL
|
||||
# @Contact: liekkaskono@163.com
|
||||
from .paraformer_onnx import Paraformer
|
||||
@ -12,17 +12,17 @@ def get_readme():
|
||||
return readme
|
||||
|
||||
|
||||
MODULE_NAME = 'rapid_paraformer'
|
||||
MODULE_NAME = 'funasr_onnx'
|
||||
VERSION_NUM = '0.0.1'
|
||||
|
||||
setuptools.setup(
|
||||
name=MODULE_NAME,
|
||||
version=VERSION_NUM,
|
||||
platforms="Any",
|
||||
description="Using paraformer with ONNXRuntime",
|
||||
author="FunASR",
|
||||
url="https://github.com/alibaba-damo-academy/FunASR.git",
|
||||
author="Speech Lab, Alibaba Group, China",
|
||||
author_email="funasr@list.alibaba-inc.com",
|
||||
url="https://github.com/alibaba-damo-academy/FunASR",
|
||||
description="FunASR: A Fundamental End-to-End Speech Recognition Toolkit",
|
||||
license='MIT',
|
||||
long_description=get_readme(),
|
||||
long_description_content_type='text/markdown',
|
||||
|
||||
@ -15,9 +15,9 @@ parser.add_argument('--output_dir', type=str, default=None, help='amp fallback n
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
from funasr.runtime.python.libtorch.torch_paraformer import Paraformer
|
||||
from funasr.runtime.python.libtorch.funasr_torch import Paraformer
|
||||
if args.backend == "onnx":
|
||||
from funasr.runtime.python.onnxruntime.rapid_paraformer import Paraformer
|
||||
from funasr.runtime.python.onnxruntime.funasr_onnx import Paraformer
|
||||
|
||||
model = Paraformer(args.model_dir, batch_size=1, quantize=args.quantize, intra_op_num_threads=args.intra_op_num_threads)
|
||||
|
||||
|
||||
@ -14,9 +14,9 @@ parser.add_argument('--intra_op_num_threads', type=int, default=1, help='intra_o
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
from funasr.runtime.python.libtorch.torch_paraformer import Paraformer
|
||||
from funasr.runtime.python.libtorch.funasr_torch import Paraformer
|
||||
if args.backend == "onnx":
|
||||
from funasr.runtime.python.onnxruntime.rapid_paraformer import Paraformer
|
||||
from funasr.runtime.python.onnxruntime.funasr_onnx import Paraformer
|
||||
|
||||
model = Paraformer(args.model_dir, batch_size=1, quantize=args.quantize, intra_op_num_threads=args.intra_op_num_threads)
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user