Merge branch 'main' into dev_gzf_funasr2

This commit is contained in:
zhifu gao 2023-11-23 12:53:47 +08:00 committed by GitHub
commit 61567c6d3b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 36 additions and 26 deletions

View File

@ -32,7 +32,7 @@ def padding(data, float_pad_value=0.0, int_pad_value=-1):
batch[data_name] = tensor_pad
batch[data_name + "_lengths"] = tensor_lengths
# DHA, EAHC NOT INCLUDED
# SAC LABEL INCLUDE
if "hotword_indxs" in batch:
# if hotword indxs in batch
# use it to slice hotwords out
@ -41,28 +41,25 @@ def padding(data, float_pad_value=0.0, int_pad_value=-1):
text = batch['text']
text_lengths = batch['text_lengths']
hotword_indxs = batch['hotword_indxs']
num_hw = sum([int(i) for i in batch['hotword_indxs_lengths'] if i != 1]) // 2
B, t1 = text.shape
dha_pad = torch.ones_like(text) * -1
_, t1 = text.shape
t1 += 1 # TODO: as parameter which is same as predictor_bias
ideal_attn = torch.zeros(B, t1, num_hw+1)
nth_hw = 0
for b, (hotword_indx, one_text, length) in enumerate(zip(hotword_indxs, text, text_lengths)):
ideal_attn[b][:,-1] = 1
dha_pad[b][:length] = 8405
if hotword_indx[0] != -1:
start, end = int(hotword_indx[0]), int(hotword_indx[1])
hotword = one_text[start: end+1]
hotword_list.append(hotword)
hotword_lengths.append(end-start+1)
ideal_attn[b][start:end+1, nth_hw] = 1
ideal_attn[b][start:end+1, -1] = 0
dha_pad[b][start: end+1] = one_text[start: end+1]
nth_hw += 1
if len(hotword_indx) == 4 and hotword_indx[2] != -1:
# the second hotword if exist
start, end = int(hotword_indx[2]), int(hotword_indx[3])
hotword_list.append(one_text[start: end+1])
hotword_lengths.append(end-start+1)
ideal_attn[b][start:end+1, nth_hw-1] = 1
ideal_attn[b][start:end+1, -1] = 0
dha_pad[b][start: end+1] = one_text[start: end+1]
nth_hw += 1
hotword_list.append(torch.tensor([1]))
hotword_lengths.append(1)
@ -71,8 +68,7 @@ def padding(data, float_pad_value=0.0, int_pad_value=-1):
padding_value=0)
batch["hotword_pad"] = hotword_pad
batch["hotword_lengths"] = torch.tensor(hotword_lengths, dtype=torch.int32)
batch['ideal_attn'] = ideal_attn
batch['dha_pad'] = dha_pad
del batch['hotword_indxs']
del batch['hotword_indxs_lengths']
return keys, batch

View File

@ -16,7 +16,7 @@ Supports real-time streaming speech recognition, uses non-streaming models for e
#### Server Deployment
```shell
cd funasr/runtime/python/websocket
cd runtime/python/websocket
python funasr_wss_server.py --port 10095
```
@ -161,4 +161,4 @@ If you want to train from scratch, usually for academic models, you can start tr
cd egs/aishell/paraformer
. ./run.sh --CUDA_VISIBLE_DEVICES="0,1" --gpu_num=2
```
More examples could be found in [docs](https://alibaba-damo-academy.github.io/FunASR/en/modelscope_pipeline/quick_start.html)
More examples could be found in [docs](https://alibaba-damo-academy.github.io/FunASR/en/modelscope_pipeline/quick_start.html)

View File

@ -17,7 +17,7 @@
##### 服务端部署
```shell
cd funasr/runtime/python/websocket
cd runtime/python/websocket
python funasr_wss_server.py --port 10095
```
@ -161,4 +161,4 @@ cd egs/aishell/paraformer
. ./run.sh --CUDA_VISIBLE_DEVICES="0,1" --gpu_num=2
```
更多例子可以参考([点击此处](https://alibaba-damo-academy.github.io/FunASR/en/academic_recipe/asr_recipe.html)
更多例子可以参考([点击此处](https://alibaba-damo-academy.github.io/FunASR/en/academic_recipe/asr_recipe.html)

View File

@ -94,7 +94,9 @@ Introduction to run_server.sh parameters:
--punc-quant: True for quantized PUNC model, False for non-quantized PUNC model. Default is True.
--itn-dir modelscope model ID or local model path.
--port: Port number that the server listens on. Default is 10095.
--decoder-thread-num: Number of inference threads that the server starts. Default is 8.
--decoder-thread-num: The number of thread pools on the server side that can handle concurrent requests. The default value is 8.
--model-thread-num: The number of internal threads for each recognition route to control the parallelism of the ONNX model.
The default value is 1. It is recommended that decoder-thread-num * model-thread-num equals the total number of threads.
--io-thread-num: Number of IO threads that the server starts. Default is 1.
--certfile <string>: SSL certificate file. Default is ../../../ssl_key/server.crt. If you want to close sslset 0
--keyfile <string>: SSL key file. Default is ../../../ssl_key/server.key.

View File

@ -73,7 +73,9 @@ Introduction to run_server.sh parameters:
--punc-quant: True for quantized PUNC model, False for non-quantized PUNC model. Default is True.
--itn-dir modelscope model ID or local model path.
--port: Port number that the server listens on. Default is 10095.
--decoder-thread-num: Number of inference threads that the server starts. Default is 8.
--decoder-thread-num: The number of thread pools on the server side that can handle concurrent requests. The default value is 8.
--model-thread-num: The number of internal threads for each recognition route to control the parallelism of the ONNX model.
The default value is 1. It is recommended that decoder-thread-num * model-thread-num equals the total number of threads.
--io-thread-num: Number of IO threads that the server starts. Default is 1.
--certfile <string>: SSL certificate file. Default is ../../../ssl_key/server.crt. If you want to close sslset 0
--keyfile <string>: SSL key file. Default is ../../../ssl_key/server.key.

View File

@ -158,7 +158,9 @@ nohup bash run_server.sh \
--punc-quant True为量化PUNC模型False为非量化PUNC模型默认是True
--itn-dir modelscope model ID 或者 本地模型路径
--port 服务端监听的端口号,默认为 10095
--decoder-thread-num 服务端启动的推理线程数,默认为 8
--decoder-thread-num 服务端线程池个数(支持的最大并发路数),默认为 8
--model-thread-num 每路识别的内部线程数(控制ONNX模型的并行),默认为 1
其中建议 decoder-thread-num*model-thread-num 等于总线程数
--io-thread-num 服务端启动的IO线程数默认为 1
--certfile ssl的证书文件默认为../../../ssl_key/server.crt如果需要关闭ssl参数设置为0
--keyfile ssl的密钥文件默认为../../../ssl_key/server.key

View File

@ -175,11 +175,14 @@ nohup bash run_server.sh \
--lm-dir modelscope model ID 或者 本地模型路径
--itn-dir modelscope model ID 或者 本地模型路径
--port 服务端监听的端口号,默认为 10095
--decoder-thread-num 服务端启动的推理线程数,默认为 8
--decoder-thread-num 服务端线程池个数(支持的最大并发路数),默认为 8
--model-thread-num 每路识别的内部线程数(控制ONNX模型的并行),默认为 1
其中建议 decoder-thread-num*model-thread-num 等于总线程数
--io-thread-num 服务端启动的IO线程数默认为 1
--certfile ssl的证书文件默认为../../../ssl_key/server.crt如果需要关闭ssl参数设置为0
--keyfile ssl的密钥文件默认为../../../ssl_key/server.key
--hotword 热词文件路径,每行一个热词,格式:热词 权重(例如:阿里巴巴 20),如果客户端提供热词,则与客户端提供的热词合并一起使用。
--hotword 热词文件路径,每行一个热词,格式:热词 权重(例如:阿里巴巴 20)
如果客户端提供热词,则与客户端提供的热词合并一起使用,服务端热词全局生效,客户端热词只针对对应客户端生效。
```
### 关闭FunASR服务

View File

@ -111,7 +111,9 @@ nohup bash run_server_2pass.sh \
--punc-quant: True for quantized PUNC model, False for non-quantized PUNC model. Default is True.
--itn-dir modelscope model ID or local model path.
--port: Port number that the server listens on. Default is 10095.
--decoder-thread-num: Number of inference threads that the server starts. Default is 8.
--decoder-thread-num: The number of thread pools on the server side that can handle concurrent requests. The default value is 8.
--model-thread-num: The number of internal threads for each recognition route to control the parallelism of the ONNX model.
The default value is 1. It is recommended that decoder-thread-num * model-thread-num equals the total number of threads.
--io-thread-num: Number of IO threads that the server starts. Default is 1.
--certfile <string>: SSL certificate file. Default is ../../../ssl_key/server.crt. If you want to close sslset 0
--keyfile <string>: SSL key file. Default is ../../../ssl_key/server.key.

View File

@ -120,11 +120,14 @@ nohup bash run_server_2pass.sh \
--punc-quant True为量化PUNC模型False为非量化PUNC模型默认是True
--itn-dir modelscope model ID 或者 本地模型路径
--port 服务端监听的端口号,默认为 10095
--decoder-thread-num 服务端启动的推理线程数,默认为 8
--decoder-thread-num 服务端线程池个数(支持的最大并发路数),默认为 8
--model-thread-num 每路识别的内部线程数(控制ONNX模型的并行),默认为 1
其中建议 decoder-thread-num*model-thread-num 等于总线程数
--io-thread-num 服务端启动的IO线程数默认为 1
--certfile ssl的证书文件默认为../../../ssl_key/server.crt如果需要关闭ssl参数设置为0
--keyfile ssl的密钥文件默认为../../../ssl_key/server.key
--hotword 热词文件路径,每行一个热词,格式:热词 权重(例如:阿里巴巴 20),如果客户端提供热词,则与客户端提供的热词合并一起使用。
--hotword 热词文件路径,每行一个热词,格式:热词 权重(例如:阿里巴巴 20)
如果客户端提供热词,则与客户端提供的热词合并一起使用,服务端热词全局生效,客户端热词只针对对应客户端生效。
```
### 关闭FunASR服务

View File

@ -16,7 +16,7 @@ git clone https://github.com/alibaba/FunASR.git && cd FunASR
### Install the requirements for server
```shell
cd funasr/runtime/python/websocket
cd runtime/python/websocket
pip install -r requirements_server.txt
```

View File

@ -53,13 +53,13 @@ parser.add_argument("--ncpu",
help="cpu cores")
parser.add_argument("--certfile",
type=str,
default="../ssl_key/server.crt",
default="../../ssl_key/server.crt",
required=False,
help="certfile for ssl")
parser.add_argument("--keyfile",
type=str,
default="../ssl_key/server.key",
default="../../ssl_key/server.key",
required=False,
help="keyfile for ssl")
args = parser.parse_args()