update wss_client_asr.py

This commit is contained in:
雾聪 2023-06-13 19:39:07 +08:00
parent 6bdae5723e
commit 59113b50b5

View File

@ -1,7 +1,7 @@
# -*- encoding: utf-8 -*- # -*- encoding: utf-8 -*-
import os import os
import time import time
import websockets,ssl import websockets, ssl
import asyncio import asyncio
# import threading # import threading
import argparse import argparse
@ -12,6 +12,7 @@ from funasr.fileio.datadir_writer import DatadirWriter
import logging import logging
SUPPORT_AUDIO_TYPE_SETS = ['.wav', '.pcm']
logging.basicConfig(level=logging.ERROR) logging.basicConfig(level=logging.ERROR)
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
@ -53,7 +54,7 @@ parser.add_argument("--output_dir",
type=str, type=str,
default=None, default=None,
help="output_dir") help="output_dir")
parser.add_argument("--ssl", parser.add_argument("--ssl",
type=int, type=int,
default=1, default=1,
@ -68,6 +69,7 @@ args.chunk_size = [int(x) for x in args.chunk_size.split(",")]
print(args) print(args)
# voices = asyncio.Queue() # voices = asyncio.Queue()
from queue import Queue from queue import Queue
voices = Queue() voices = Queue()
ibest_writer = None ibest_writer = None
@ -75,15 +77,16 @@ if args.output_dir is not None:
writer = DatadirWriter(args.output_dir) writer = DatadirWriter(args.output_dir)
ibest_writer = writer[f"1best_recog"] ibest_writer = writer[f"1best_recog"]
async def record_microphone(): async def record_microphone():
is_finished = False is_finished = False
import pyaudio import pyaudio
#print("2") # print("2")
global voices global voices
FORMAT = pyaudio.paInt16 FORMAT = pyaudio.paInt16
CHANNELS = 1 CHANNELS = 1
RATE = 16000 RATE = 16000
chunk_size = 60*args.chunk_size[1]/args.chunk_interval chunk_size = 60 * args.chunk_size[1] / args.chunk_interval
CHUNK = int(RATE / 1000 * chunk_size) CHUNK = int(RATE / 1000 * chunk_size)
p = pyaudio.PyAudio() p = pyaudio.PyAudio()
@ -94,19 +97,16 @@ async def record_microphone():
input=True, input=True,
frames_per_buffer=CHUNK) frames_per_buffer=CHUNK)
message = json.dumps({"mode": args.mode, "chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval, "wav_name": "microphone", "is_speaking": True}) message = json.dumps({"mode": args.mode, "chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval,
"wav_name": "microphone", "is_speaking": True})
voices.put(message) voices.put(message)
while True: while True:
data = stream.read(CHUNK) data = stream.read(CHUNK)
message = data message = data
voices.put(message) voices.put(message)
await asyncio.sleep(0.005) await asyncio.sleep(0.005)
async def record_from_scp(chunk_begin,chunk_size): async def record_from_scp(chunk_begin, chunk_size):
import wave
global voices global voices
is_finished = False is_finished = False
if args.audio_in.endswith(".scp"): if args.audio_in.endswith(".scp"):
@ -114,47 +114,51 @@ async def record_from_scp(chunk_begin,chunk_size):
wavs = f_scp.readlines() wavs = f_scp.readlines()
else: else:
wavs = [args.audio_in] wavs = [args.audio_in]
if chunk_size>0: if chunk_size > 0:
wavs=wavs[chunk_begin:chunk_begin+chunk_size] wavs = wavs[chunk_begin:chunk_begin + chunk_size]
for wav in wavs: for wav in wavs:
wav_splits = wav.strip().split() wav_splits = wav.strip().split()
wav_name = wav_splits[0] if len(wav_splits) > 1 else "demo" wav_name = wav_splits[0] if len(wav_splits) > 1 else "demo"
wav_path = wav_splits[1] if len(wav_splits) > 1 else wav_splits[0] wav_path = wav_splits[1] if len(wav_splits) > 1 else wav_splits[0]
# bytes_f = open(wav_path, "rb")
# bytes_data = bytes_f.read()
with wave.open(wav_path, "rb") as wav_file:
params = wav_file.getparams()
# header_length = wav_file.getheaders()[0][1]
# wav_file.setpos(header_length)
frames = wav_file.readframes(wav_file.getnframes())
audio_bytes = bytes(frames) if wav_path.endswith(".pcm"):
with open(wav_path, "rb") as f:
audio_bytes = f.read()
elif wav_path.endswith(".wav"):
import wave
with wave.open(wav_path, "rb") as wav_file:
params = wav_file.getparams()
frames = wav_file.readframes(wav_file.getnframes())
audio_bytes = bytes(frames)
else:
raise NotImplementedError(
f'Not supported audio type')
# stride = int(args.chunk_size/1000*16000*2) # stride = int(args.chunk_size/1000*16000*2)
stride = int(60*args.chunk_size[1]/args.chunk_interval/1000*16000*2) stride = int(60 * args.chunk_size[1] / args.chunk_interval / 1000 * 16000 * 2)
chunk_num = (len(audio_bytes)-1)//stride + 1 chunk_num = (len(audio_bytes) - 1) // stride + 1
# print(stride) # print(stride)
# send first time # send first time
message = json.dumps({"mode": args.mode, "chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval, "wav_name": wav_name,"is_speaking": True}) message = json.dumps({"mode": args.mode, "chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval,
"wav_name": wav_name, "is_speaking": True})
voices.put(message) voices.put(message)
is_speaking = True is_speaking = True
for i in range(chunk_num): for i in range(chunk_num):
beg = i*stride beg = i * stride
data = audio_bytes[beg:beg+stride] data = audio_bytes[beg:beg + stride]
message = data message = data
voices.put(message) voices.put(message)
if i == chunk_num-1: if i == chunk_num - 1:
is_speaking = False is_speaking = False
message = json.dumps({"is_speaking": is_speaking}) message = json.dumps({"is_speaking": is_speaking})
voices.put(message) voices.put(message)
# print("data_chunk: ", len(data_chunk)) # print("data_chunk: ", len(data_chunk))
# print(voices.qsize()) # print(voices.qsize())
sleep_duration = 0.001 if args.send_without_sleep else 60*args.chunk_size[1]/args.chunk_interval/1000 sleep_duration = 0.001 if args.send_without_sleep else 60 * args.chunk_size[1] / args.chunk_interval / 1000
await asyncio.sleep(sleep_duration) await asyncio.sleep(sleep_duration)
async def ws_send(): async def ws_send():
global voices global voices
global websocket global websocket
@ -172,8 +176,6 @@ async def ws_send():
await asyncio.sleep(0.005) await asyncio.sleep(0.005)
await asyncio.sleep(0.005) await asyncio.sleep(0.005)
async def message(id): async def message(id):
global websocket global websocket
text_print = "" text_print = ""
@ -188,17 +190,17 @@ async def message(id):
text = meg["text"] text = meg["text"]
if ibest_writer is not None: if ibest_writer is not None:
ibest_writer["text"][wav_name] = text ibest_writer["text"][wav_name] = text
if meg["mode"] == "online": if meg["mode"] == "online":
text_print += "{}".format(text) text_print += "{}".format(text)
text_print = text_print[-args.words_max_print:] text_print = text_print[-args.words_max_print:]
os.system('clear') os.system('clear')
print("\rpid"+str(id)+": "+text_print) print("\rpid" + str(id) + ": " + text_print)
elif meg["mode"] == "offline": elif meg["mode"] == "offline":
text_print += "{}".format(text) text_print += "{}".format(text)
text_print = text_print[-args.words_max_print:] text_print = text_print[-args.words_max_print:]
os.system('clear') os.system('clear')
print("\rpid"+str(id)+": "+text_print) print("\rpid" + str(id) + ": " + text_print)
else: else:
if meg["mode"] == "2pass-online": if meg["mode"] == "2pass-online":
text_print_2pass_online += "{}".format(text) text_print_2pass_online += "{}".format(text)
@ -216,6 +218,7 @@ async def message(id):
traceback.print_exc() traceback.print_exc()
exit(0) exit(0)
async def print_messge(): async def print_messge():
global websocket global websocket
while True: while True:
@ -228,69 +231,75 @@ async def print_messge():
traceback.print_exc() traceback.print_exc()
exit(0) exit(0)
async def ws_client(id,chunk_begin,chunk_size): async def ws_client(id, chunk_begin, chunk_size):
global websocket global websocket
if args.ssl==1: if args.ssl == 1:
ssl_context = ssl.SSLContext() ssl_context = ssl.SSLContext()
ssl_context.check_hostname = False ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE ssl_context.verify_mode = ssl.CERT_NONE
uri = "wss://{}:{}".format(args.host, args.port) uri = "wss://{}:{}".format(args.host, args.port)
else: else:
uri = "ws://{}:{}".format(args.host, args.port) uri = "ws://{}:{}".format(args.host, args.port)
ssl_context=None ssl_context = None
print("connect to",uri) print("connect to", uri)
async for websocket in websockets.connect(uri, subprotocols=["binary"], ping_interval=None,ssl=ssl_context): async for websocket in websockets.connect(uri, subprotocols=["binary"], ping_interval=None, ssl=ssl_context):
if args.audio_in is not None: if args.audio_in is not None:
task = asyncio.create_task(record_from_scp(chunk_begin,chunk_size)) task = asyncio.create_task(record_from_scp(chunk_begin, chunk_size))
else: else:
task = asyncio.create_task(record_microphone()) task = asyncio.create_task(record_microphone())
task2 = asyncio.create_task(ws_send()) task2 = asyncio.create_task(ws_send())
task3 = asyncio.create_task(message(id)) task3 = asyncio.create_task(message(id))
await asyncio.gather(task, task2, task3) await asyncio.gather(task, task2, task3)
def one_thread(id,chunk_begin,chunk_size): def one_thread(id, chunk_begin, chunk_size):
asyncio.get_event_loop().run_until_complete(ws_client(id,chunk_begin,chunk_size)) asyncio.get_event_loop().run_until_complete(ws_client(id, chunk_begin, chunk_size))
asyncio.get_event_loop().run_forever() asyncio.get_event_loop().run_forever()
if __name__ == '__main__': if __name__ == '__main__':
# for microphone # for microphone
if args.audio_in is None: if args.audio_in is None:
p = Process(target=one_thread,args=(0, 0, 0)) p = Process(target=one_thread, args=(0, 0, 0))
p.start() p.start()
p.join() p.join()
print('end') print('end')
else: else:
# calculate the number of wavs for each preocess # calculate the number of wavs for each preocess
if args.audio_in.endswith(".scp"): if args.audio_in.endswith(".scp"):
f_scp = open(args.audio_in) f_scp = open(args.audio_in)
wavs = f_scp.readlines() wavs = f_scp.readlines()
else: else:
wavs = [args.audio_in] wavs = [args.audio_in]
total_len=len(wavs) for wav in wavs:
if total_len>=args.test_thread_num: wav_splits = wav.strip().split()
chunk_size=int((total_len)/args.test_thread_num) wav_name = wav_splits[0] if len(wav_splits) > 1 else "demo"
remain_wavs=total_len-chunk_size*args.test_thread_num wav_path = wav_splits[1] if len(wav_splits) > 1 else wav_splits[0]
else: audio_type = os.path.splitext(wav_path)[-1].lower()
chunk_size=1 if audio_type not in SUPPORT_AUDIO_TYPE_SETS:
remain_wavs=0 raise NotImplementedError(
f'Not supported audio type: {audio_type}')
process_list = [] total_len = len(wavs)
chunk_begin=0 if total_len >= args.test_thread_num:
for i in range(args.test_thread_num): chunk_size = int(total_len / args.test_thread_num)
now_chunk_size= chunk_size remain_wavs = total_len - chunk_size * args.test_thread_num
if remain_wavs>0: else:
now_chunk_size=chunk_size+1 chunk_size = 1
remain_wavs=remain_wavs-1 remain_wavs = 0
# process i handle wavs at chunk_begin and size of now_chunk_size
p = Process(target=one_thread,args=(i,chunk_begin,now_chunk_size))
chunk_begin=chunk_begin+now_chunk_size
p.start()
process_list.append(p)
for i in process_list: process_list = []
p.join() chunk_begin = 0
for i in range(args.test_thread_num):
print('end') now_chunk_size = chunk_size
if remain_wavs > 0:
now_chunk_size = chunk_size + 1
remain_wavs = remain_wavs - 1
# process i handle wavs at chunk_begin and size of now_chunk_size
p = Process(target=one_thread, args=(i, chunk_begin, now_chunk_size))
chunk_begin = chunk_begin + now_chunk_size
p.start()
process_list.append(p)
for i in process_list:
p.join()
print('end')