From f53c55cff30ec34bdfde61e318e6910d57d184a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=8C=97=E5=BF=B5?= Date: Tue, 21 Feb 2023 19:26:16 +0800 Subject: [PATCH] modify onnxruntime environment dependencies --- funasr/runtime/python/grpc/Readme.md | 31 +++++++++++++++++-- funasr/runtime/python/grpc/grpc_server.py | 6 ++-- funasr/runtime/python/grpc/paraformer_onnx.py | 1 - funasr/runtime/python/grpc/utils | 1 - funasr/runtime/python/onnxruntime/README.md | 2 +- 5 files changed, 33 insertions(+), 8 deletions(-) delete mode 120000 funasr/runtime/python/grpc/paraformer_onnx.py delete mode 120000 funasr/runtime/python/grpc/utils diff --git a/funasr/runtime/python/grpc/Readme.md b/funasr/runtime/python/grpc/Readme.md index 053b3d05b..e02000770 100644 --- a/funasr/runtime/python/grpc/Readme.md +++ b/funasr/runtime/python/grpc/Readme.md @@ -5,7 +5,7 @@ The audio data is in streaming, the asr inference process is in offline. ## Steps -Step 1) Prepare server environment (on server). +Step 1-1) Prepare server modelscope pipeline environment (on server).          Install modelscope and funasr with pip or with cuda-docker image. @@ -24,6 +24,26 @@ git clone https://github.com/alibaba-damo-academy/FunASR cd FunASR/funasr/runtime/python/grpc/ ``` +Step 1-2) Optional, Prepare server onnxruntime environment (on server). + +Install [`rapid_paraformer`](https://github.com/alibaba-damo-academy/FunASR/tree/main/funasr/runtime/python/onnxruntime). + +- Build the rapid_paraformer `whl` +``` +git clone https://github.com/alibaba/FunASR.git && cd FunASR +cd funasr/runtime/python/onnxruntime/rapid_paraformer +python setup.py bdist_wheel +``` + +- Install the build `whl` +``` +pip install dist/rapid_paraformer-0.0.1-py3-none-any.whl +``` + +Export the model, more details ref to [export docs](https://github.com/alibaba-damo-academy/FunASR/tree/main/funasr/runtime/python/onnxruntime). +``` +python -m funasr.export.export_model 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' "./export" true +``` Step 2) Optional, generate protobuf file (run on server, the two generated pb files are both used for server and client). ``` @@ -44,9 +64,16 @@ python -m pip install grpcio grpcio-tools ``` ``` # Start server. -python grpc_main_server.py --port 10095 +python grpc_main_server.py --port 10095 --backend pipeline ``` +If you want run server with onnxruntime, please set `backend` and `onnx_dir` paramater. +``` +# Start server. +python grpc_main_server.py --port 10095 --backend onnxruntime --onnx_dir /models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch +``` + + Step 4) Start grpc client (on client with microphone). ``` # Optional, Install dependency. diff --git a/funasr/runtime/python/grpc/grpc_server.py b/funasr/runtime/python/grpc/grpc_server.py index 2d03f9dcf..baa7ed59a 100644 --- a/funasr/runtime/python/grpc/grpc_server.py +++ b/funasr/runtime/python/grpc/grpc_server.py @@ -5,7 +5,7 @@ import time import paraformer_pb2_grpc from paraformer_pb2 import Response -from utils.frontend import load_bytes +from rapid_paraformer.utils.frontend import load_bytes class ASRServicer(paraformer_pb2_grpc.ASRServicer): @@ -25,9 +25,9 @@ class ASRServicer(paraformer_pb2_grpc.ASRServicer): self.inference_16k_pipeline = pipeline(task=Tasks.auto_speech_recognition, model=model) elif self.backend == "onnxruntime": try: - from paraformer_onnx import Paraformer + from rapid_paraformer.paraformer_onnx import Paraformer except ImportError: - raise ImportError(f"Please install onnxruntime requirements, reference https://github.com/alibaba-damo-academy/FunASR/tree/main/funasr/runtime/python/onnxruntime/rapid_paraformer") + raise ImportError(f"Please install onnxruntime environment") self.inference_16k_pipeline = Paraformer(model_dir=onnx_dir) self.sample_rate = sample_rate diff --git a/funasr/runtime/python/grpc/paraformer_onnx.py b/funasr/runtime/python/grpc/paraformer_onnx.py deleted file mode 120000 index a05b2235f..000000000 --- a/funasr/runtime/python/grpc/paraformer_onnx.py +++ /dev/null @@ -1 +0,0 @@ -../onnxruntime/paraformer/rapid_paraformer/paraformer_onnx.py \ No newline at end of file diff --git a/funasr/runtime/python/grpc/utils b/funasr/runtime/python/grpc/utils deleted file mode 120000 index 831d965b8..000000000 --- a/funasr/runtime/python/grpc/utils +++ /dev/null @@ -1 +0,0 @@ -../onnxruntime/paraformer/rapid_paraformer/utils \ No newline at end of file diff --git a/funasr/runtime/python/onnxruntime/README.md b/funasr/runtime/python/onnxruntime/README.md index 99dba99ef..ca6f6b632 100644 --- a/funasr/runtime/python/onnxruntime/README.md +++ b/funasr/runtime/python/onnxruntime/README.md @@ -36,7 +36,7 @@ - Build the rapid_paraformer `whl` ```shell git clone https://github.com/alibaba/FunASR.git && cd FunASR - cd funasr/runtime/python/onnxruntime/rapid_paraformer + cd funasr/runtime/python/onnxruntime python setup.py bdist_wheel ``` - Install the build `whl`