mirror of
https://github.com/FunAudioLLM/SenseVoice.git
synced 2025-09-15 15:08:35 +08:00
102 lines
2.6 KiB
Python
102 lines
2.6 KiB
Python
# Set the device with environment, default is cuda:0
|
|
# export SENSEVOICE_DEVICE=cuda:1
|
|
|
|
import os, re
|
|
from fastapi import FastAPI, File, Form, UploadFile
|
|
from fastapi.responses import HTMLResponse
|
|
from typing_extensions import Annotated
|
|
from typing import List
|
|
from enum import Enum
|
|
import torchaudio
|
|
from model import SenseVoiceSmall
|
|
from funasr.utils.postprocess_utils import rich_transcription_postprocess
|
|
from io import BytesIO
|
|
|
|
TARGET_FS = 16000
|
|
|
|
|
|
class Language(str, Enum):
|
|
auto = "auto"
|
|
zh = "zh"
|
|
en = "en"
|
|
yue = "yue"
|
|
ja = "ja"
|
|
ko = "ko"
|
|
nospeech = "nospeech"
|
|
|
|
|
|
model_dir = "iic/SenseVoiceSmall"
|
|
m, kwargs = SenseVoiceSmall.from_pretrained(model=model_dir, device=os.getenv("SENSEVOICE_DEVICE", "cuda:0"))
|
|
m.eval()
|
|
|
|
regex = r"<\|.*\|>"
|
|
|
|
app = FastAPI()
|
|
|
|
|
|
@app.get("/", response_class=HTMLResponse)
|
|
async def root():
|
|
return """
|
|
<!DOCTYPE html>
|
|
<html>
|
|
<head>
|
|
<meta charset=utf-8>
|
|
<title>Api information</title>
|
|
</head>
|
|
<body>
|
|
<a href='./docs'>Documents of API</a>
|
|
</body>
|
|
</html>
|
|
"""
|
|
|
|
|
|
@app.post("/api/v1/asr")
|
|
async def turn_audio_to_text(
|
|
files: Annotated[List[UploadFile], File(description="wav or mp3 audios in 16KHz")],
|
|
keys: Annotated[str, Form(description="name of each audio joined with comma")] = None,
|
|
lang: Annotated[Language, Form(description="language of audio content")] = "auto",
|
|
):
|
|
audios = []
|
|
for file in files:
|
|
file_io = BytesIO(await file.read())
|
|
data_or_path_or_list, audio_fs = torchaudio.load(file_io)
|
|
|
|
# transform to target sample
|
|
if audio_fs != TARGET_FS:
|
|
resampler = torchaudio.transforms.Resample(orig_freq=audio_fs, new_freq=TARGET_FS)
|
|
data_or_path_or_list = resampler(data_or_path_or_list)
|
|
|
|
data_or_path_or_list = data_or_path_or_list.mean(0)
|
|
audios.append(data_or_path_or_list)
|
|
|
|
if lang == "":
|
|
lang = "auto"
|
|
|
|
if not keys:
|
|
key = [f.filename for f in files]
|
|
else:
|
|
key = keys.split(",")
|
|
|
|
res = m.inference(
|
|
data_in=audios,
|
|
language=lang, # "zh", "en", "yue", "ja", "ko", "nospeech"
|
|
use_itn=False,
|
|
ban_emo_unk=False,
|
|
key=key,
|
|
fs=TARGET_FS,
|
|
**kwargs,
|
|
)
|
|
if len(res) == 0:
|
|
return {"result": []}
|
|
for it in res[0]:
|
|
it["raw_text"] = it["text"]
|
|
it["clean_text"] = re.sub(regex, "", it["text"], 0, re.MULTILINE)
|
|
it["text"] = rich_transcription_postprocess(it["text"])
|
|
return {"result": res[0]}
|
|
|
|
|
|
if __name__ == "__main__":
|
|
import uvicorn
|
|
|
|
uvicorn.run(app, host="0.0.0.0", port=50000)
|