FunASR/examples/industrial_data_pretraining/paraformer/infer.sh
zhifu gao 675b4605e8
Dev gzf llm (#1506)
* update

* update

* update

* update onnx

* update with main (#1492)

* contextual&seaco ONNX export (#1481)

* contextual&seaco ONNX export

* update ContextualEmbedderExport2

* update ContextualEmbedderExport2

* update code

* onnx (#1482)

* qwenaudio qwenaudiochat

* qwenaudio qwenaudiochat

* whisper

* whisper

* llm

* llm

* llm

* llm

* llm

* llm

* llm

* llm

* export onnx

* export onnx

* export onnx

* dingding

* dingding

* llm

* doc

* onnx

* onnx

* onnx

* onnx

* onnx

* onnx

* v1.0.15

* qwenaudio

* qwenaudio

* issue doc

* update

* update

* bugfix

* onnx

* update export calling

* update codes

* remove useless code

* update code

---------

Co-authored-by: zhifu gao <zhifu.gzf@alibaba-inc.com>

* acknowledge

---------

Co-authored-by: Shi Xian <40013335+R1ckShi@users.noreply.github.com>

* update onnx

* update onnx

* train update

* train update

* train update

* train update

* punc update

---------

Co-authored-by: Shi Xian <40013335+R1ckShi@users.noreply.github.com>
2024-03-15 21:14:08 +08:00

22 lines
688 B
Bash

# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
# MIT License (https://opensource.org/licenses/MIT)
# method1, inference from model hub
# for more input type, please ref to readme.md
input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav"
output_dir="./outputs/debug"
model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
model_revision="v2.0.4"
device="cuda:0" # "cuda:0" for gpu0, "cuda:1" for gpu1, "cpu"
python -m funasr.bin.inference \
++model=${model} \
++model_revision=${model_revision} \
++input="${input}" \
++output_dir="${output_dir}" \
++device="${device}" \