diff --git a/funasr/runtime/python/websocket/wss_client_asr.py b/funasr/runtime/python/websocket/wss_client_asr.py index 0dd236d62..dec598abf 100644 --- a/funasr/runtime/python/websocket/wss_client_asr.py +++ b/funasr/runtime/python/websocket/wss_client_asr.py @@ -1,7 +1,7 @@ # -*- encoding: utf-8 -*- import os import time -import websockets,ssl +import websockets, ssl import asyncio # import threading import argparse @@ -12,6 +12,7 @@ from funasr.fileio.datadir_writer import DatadirWriter import logging +SUPPORT_AUDIO_TYPE_SETS = ['.wav', '.pcm'] logging.basicConfig(level=logging.ERROR) parser = argparse.ArgumentParser() @@ -53,7 +54,7 @@ parser.add_argument("--output_dir", type=str, default=None, help="output_dir") - + parser.add_argument("--ssl", type=int, default=1, @@ -68,6 +69,7 @@ args.chunk_size = [int(x) for x in args.chunk_size.split(",")] print(args) # voices = asyncio.Queue() from queue import Queue + voices = Queue() ibest_writer = None @@ -75,15 +77,16 @@ if args.output_dir is not None: writer = DatadirWriter(args.output_dir) ibest_writer = writer[f"1best_recog"] + async def record_microphone(): is_finished = False import pyaudio - #print("2") - global voices + # print("2") + global voices FORMAT = pyaudio.paInt16 CHANNELS = 1 RATE = 16000 - chunk_size = 60*args.chunk_size[1]/args.chunk_interval + chunk_size = 60 * args.chunk_size[1] / args.chunk_interval CHUNK = int(RATE / 1000 * chunk_size) p = pyaudio.PyAudio() @@ -94,19 +97,16 @@ async def record_microphone(): input=True, frames_per_buffer=CHUNK) - message = json.dumps({"mode": args.mode, "chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval, "wav_name": "microphone", "is_speaking": True}) + message = json.dumps({"mode": args.mode, "chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval, + "wav_name": "microphone", "is_speaking": True}) voices.put(message) while True: - data = stream.read(CHUNK) - message = data - + message = data voices.put(message) - await asyncio.sleep(0.005) -async def record_from_scp(chunk_begin,chunk_size): - import wave +async def record_from_scp(chunk_begin, chunk_size): global voices is_finished = False if args.audio_in.endswith(".scp"): @@ -114,47 +114,51 @@ async def record_from_scp(chunk_begin,chunk_size): wavs = f_scp.readlines() else: wavs = [args.audio_in] - if chunk_size>0: - wavs=wavs[chunk_begin:chunk_begin+chunk_size] + if chunk_size > 0: + wavs = wavs[chunk_begin:chunk_begin + chunk_size] for wav in wavs: wav_splits = wav.strip().split() wav_name = wav_splits[0] if len(wav_splits) > 1 else "demo" wav_path = wav_splits[1] if len(wav_splits) > 1 else wav_splits[0] - - # bytes_f = open(wav_path, "rb") - # bytes_data = bytes_f.read() - with wave.open(wav_path, "rb") as wav_file: - params = wav_file.getparams() - # header_length = wav_file.getheaders()[0][1] - # wav_file.setpos(header_length) - frames = wav_file.readframes(wav_file.getnframes()) - audio_bytes = bytes(frames) + if wav_path.endswith(".pcm"): + with open(wav_path, "rb") as f: + audio_bytes = f.read() + elif wav_path.endswith(".wav"): + import wave + with wave.open(wav_path, "rb") as wav_file: + params = wav_file.getparams() + frames = wav_file.readframes(wav_file.getnframes()) + audio_bytes = bytes(frames) + else: + raise NotImplementedError( + f'Not supported audio type') + # stride = int(args.chunk_size/1000*16000*2) - stride = int(60*args.chunk_size[1]/args.chunk_interval/1000*16000*2) - chunk_num = (len(audio_bytes)-1)//stride + 1 + stride = int(60 * args.chunk_size[1] / args.chunk_interval / 1000 * 16000 * 2) + chunk_num = (len(audio_bytes) - 1) // stride + 1 # print(stride) - + # send first time - message = json.dumps({"mode": args.mode, "chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval, "wav_name": wav_name,"is_speaking": True}) + message = json.dumps({"mode": args.mode, "chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval, + "wav_name": wav_name, "is_speaking": True}) voices.put(message) is_speaking = True for i in range(chunk_num): - beg = i*stride - data = audio_bytes[beg:beg+stride] - message = data + beg = i * stride + data = audio_bytes[beg:beg + stride] + message = data voices.put(message) - if i == chunk_num-1: + if i == chunk_num - 1: is_speaking = False message = json.dumps({"is_speaking": is_speaking}) voices.put(message) # print("data_chunk: ", len(data_chunk)) # print(voices.qsize()) - sleep_duration = 0.001 if args.send_without_sleep else 60*args.chunk_size[1]/args.chunk_interval/1000 + sleep_duration = 0.001 if args.send_without_sleep else 60 * args.chunk_size[1] / args.chunk_interval / 1000 await asyncio.sleep(sleep_duration) - async def ws_send(): global voices global websocket @@ -172,8 +176,6 @@ async def ws_send(): await asyncio.sleep(0.005) await asyncio.sleep(0.005) - - async def message(id): global websocket text_print = "" @@ -188,17 +190,17 @@ async def message(id): text = meg["text"] if ibest_writer is not None: ibest_writer["text"][wav_name] = text - + if meg["mode"] == "online": text_print += "{}".format(text) text_print = text_print[-args.words_max_print:] os.system('clear') - print("\rpid"+str(id)+": "+text_print) + print("\rpid" + str(id) + ": " + text_print) elif meg["mode"] == "offline": text_print += "{}".format(text) text_print = text_print[-args.words_max_print:] os.system('clear') - print("\rpid"+str(id)+": "+text_print) + print("\rpid" + str(id) + ": " + text_print) else: if meg["mode"] == "2pass-online": text_print_2pass_online += "{}".format(text) @@ -216,6 +218,7 @@ async def message(id): traceback.print_exc() exit(0) + async def print_messge(): global websocket while True: @@ -228,69 +231,75 @@ async def print_messge(): traceback.print_exc() exit(0) -async def ws_client(id,chunk_begin,chunk_size): +async def ws_client(id, chunk_begin, chunk_size): global websocket - if args.ssl==1: - ssl_context = ssl.SSLContext() - ssl_context.check_hostname = False - ssl_context.verify_mode = ssl.CERT_NONE - uri = "wss://{}:{}".format(args.host, args.port) + if args.ssl == 1: + ssl_context = ssl.SSLContext() + ssl_context.check_hostname = False + ssl_context.verify_mode = ssl.CERT_NONE + uri = "wss://{}:{}".format(args.host, args.port) else: - uri = "ws://{}:{}".format(args.host, args.port) - ssl_context=None - print("connect to",uri) - async for websocket in websockets.connect(uri, subprotocols=["binary"], ping_interval=None,ssl=ssl_context): + uri = "ws://{}:{}".format(args.host, args.port) + ssl_context = None + print("connect to", uri) + async for websocket in websockets.connect(uri, subprotocols=["binary"], ping_interval=None, ssl=ssl_context): if args.audio_in is not None: - task = asyncio.create_task(record_from_scp(chunk_begin,chunk_size)) + task = asyncio.create_task(record_from_scp(chunk_begin, chunk_size)) else: task = asyncio.create_task(record_microphone()) task2 = asyncio.create_task(ws_send()) task3 = asyncio.create_task(message(id)) await asyncio.gather(task, task2, task3) -def one_thread(id,chunk_begin,chunk_size): - asyncio.get_event_loop().run_until_complete(ws_client(id,chunk_begin,chunk_size)) - asyncio.get_event_loop().run_forever() - +def one_thread(id, chunk_begin, chunk_size): + asyncio.get_event_loop().run_until_complete(ws_client(id, chunk_begin, chunk_size)) + asyncio.get_event_loop().run_forever() if __name__ == '__main__': - # for microphone - if args.audio_in is None: - p = Process(target=one_thread,args=(0, 0, 0)) - p.start() - p.join() - print('end') - else: - # calculate the number of wavs for each preocess - if args.audio_in.endswith(".scp"): - f_scp = open(args.audio_in) - wavs = f_scp.readlines() - else: - wavs = [args.audio_in] - total_len=len(wavs) - if total_len>=args.test_thread_num: - chunk_size=int((total_len)/args.test_thread_num) - remain_wavs=total_len-chunk_size*args.test_thread_num - else: - chunk_size=1 - remain_wavs=0 + # for microphone + if args.audio_in is None: + p = Process(target=one_thread, args=(0, 0, 0)) + p.start() + p.join() + print('end') + else: + # calculate the number of wavs for each preocess + if args.audio_in.endswith(".scp"): + f_scp = open(args.audio_in) + wavs = f_scp.readlines() + else: + wavs = [args.audio_in] + for wav in wavs: + wav_splits = wav.strip().split() + wav_name = wav_splits[0] if len(wav_splits) > 1 else "demo" + wav_path = wav_splits[1] if len(wav_splits) > 1 else wav_splits[0] + audio_type = os.path.splitext(wav_path)[-1].lower() + if audio_type not in SUPPORT_AUDIO_TYPE_SETS: + raise NotImplementedError( + f'Not supported audio type: {audio_type}') - process_list = [] - chunk_begin=0 - for i in range(args.test_thread_num): - now_chunk_size= chunk_size - if remain_wavs>0: - now_chunk_size=chunk_size+1 - remain_wavs=remain_wavs-1 - # process i handle wavs at chunk_begin and size of now_chunk_size - p = Process(target=one_thread,args=(i,chunk_begin,now_chunk_size)) - chunk_begin=chunk_begin+now_chunk_size - p.start() - process_list.append(p) + total_len = len(wavs) + if total_len >= args.test_thread_num: + chunk_size = int(total_len / args.test_thread_num) + remain_wavs = total_len - chunk_size * args.test_thread_num + else: + chunk_size = 1 + remain_wavs = 0 - for i in process_list: - p.join() - - print('end') + process_list = [] + chunk_begin = 0 + for i in range(args.test_thread_num): + now_chunk_size = chunk_size + if remain_wavs > 0: + now_chunk_size = chunk_size + 1 + remain_wavs = remain_wavs - 1 + # process i handle wavs at chunk_begin and size of now_chunk_size + p = Process(target=one_thread, args=(i, chunk_begin, now_chunk_size)) + chunk_begin = chunk_begin + now_chunk_size + p.start() + process_list.append(p) + for i in process_list: + p.join() + print('end')