mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
* add hotword for deploy_tools * Support wfst decoder and contextual biasing (#1039) * Support wfst decoder and contextual biasing * Turn on fstbin compilation --------- Co-authored-by: gongbo.gb <gongbo.gb@alibaba-inc.com> * mv funasr/runtime runtime * Fix crash caused by OOV in hotwords list * funasr infer * funasr infer * funasr infer * funasr infer * funasr infer * fix some bugs about fst hotword; support wfst for websocket server and clients; mv runtime out of funasr; modify relative docs * del onnxruntime/include/gflags * update tensor.h * update run_server.sh * update deploy tools * update deploy tools * update websocket-server * update funasr-wss-server * Remove self loop propagation * Update websocket_protocol_zh.md * Update websocket_protocol_zh.md * update hotword protocol * author zhaomingwork: change hotwords for h5 and java * update hotword protocol * catch exception for json_fst_hws * update hotword on message * update onnx benchmark for ngram&hotword * update docs * update funasr-wss-serve * add NONE for LM_DIR * update docs * update run_server.sh * add whats-new * modify whats-new * update whats-new * update whats-new * Support decoder option for beam searching * update benchmark_onnx_cpp * Support decoder option for websocket * fix bug of CompileHotwordEmbedding * update html client * update docs --------- Co-authored-by: gongbo.gb <35997837+aibulamusi@users.noreply.github.com> Co-authored-by: gongbo.gb <gongbo.gb@alibaba-inc.com> Co-authored-by: 游雁 <zhifu.gzf@alibaba-inc.com>
107 lines
3.7 KiB
C#
107 lines
3.7 KiB
C#
using System.Collections.Concurrent;
|
|
using NAudio.Wave;
|
|
using NAudio.CoreAudioApi;
|
|
|
|
namespace AliFsmnVadSharp
|
|
{
|
|
class WaveCollect
|
|
{
|
|
private string fileName = string.Empty;
|
|
private WaveInEvent? waveSource = null;
|
|
private WaveFileWriter? waveFile = null;
|
|
public static int wave_buffer_milliseconds = 600;
|
|
public static int wave_buffer_collectbits = 16;
|
|
public static int wave_buffer_collectchannels = 1;
|
|
public static int wave_buffer_collectfrequency = 16000;
|
|
public static readonly ConcurrentQueue<byte[]> voicebuff = new ConcurrentQueue<byte[]>();
|
|
|
|
public void StartRec()
|
|
{
|
|
// 获取麦克风设备
|
|
var captureDevices = new MMDeviceEnumerator().EnumerateAudioEndPoints(DataFlow.Capture, DeviceState.Active);
|
|
foreach (var device in captureDevices)
|
|
{
|
|
Console.WriteLine("Device Name: " + device.FriendlyName);
|
|
using (var capture = new WasapiLoopbackCapture(device))
|
|
{
|
|
// 获取支持的采样率列表
|
|
Console.WriteLine("Device Channels:" + capture.WaveFormat.Channels);
|
|
Console.WriteLine("Device SampleRate:" + capture.WaveFormat.SampleRate);
|
|
Console.WriteLine("Device BitsPerSample:" + capture.WaveFormat.BitsPerSample);
|
|
}
|
|
}
|
|
//清空缓存数据
|
|
int buffnum = voicebuff.Count;
|
|
for (int i = 0; i < buffnum; i++)
|
|
voicebuff.TryDequeue(out byte[] buff);
|
|
|
|
waveSource = new WaveInEvent();
|
|
waveSource.BufferMilliseconds = wave_buffer_milliseconds;
|
|
waveSource.WaveFormat = new WaveFormat(wave_buffer_collectfrequency, wave_buffer_collectbits, wave_buffer_collectchannels); // 16bit,16KHz,Mono的录音格式
|
|
waveSource.DataAvailable += new EventHandler<WaveInEventArgs>(waveSource_DataAvailable);
|
|
SetFileName(AppDomain.CurrentDomain.BaseDirectory + "tmp.wav");
|
|
waveFile = new WaveFileWriter(fileName, waveSource.WaveFormat);
|
|
waveSource.StartRecording();
|
|
}
|
|
|
|
public void StopRec()
|
|
{
|
|
if (waveSource != null)
|
|
{
|
|
waveSource.StopRecording();
|
|
if (waveSource != null)
|
|
{
|
|
waveSource.Dispose();
|
|
waveSource = null;
|
|
}
|
|
if (waveFile != null)
|
|
{
|
|
waveFile.Dispose();
|
|
waveFile = null;
|
|
}
|
|
}
|
|
}
|
|
|
|
public void SetFileName(string fileName)
|
|
{
|
|
this.fileName = fileName;
|
|
}
|
|
|
|
private void waveSource_DataAvailable(object sender, WaveInEventArgs e)
|
|
{
|
|
if (waveFile != null)
|
|
{
|
|
if (e.Buffer != null && e.BytesRecorded > 0)
|
|
{
|
|
voicebuff.Enqueue(e.Buffer);
|
|
//waveFile.Write(e.Buffer, 0, e.BytesRecorded);
|
|
waveFile.Flush();
|
|
}
|
|
|
|
}
|
|
}
|
|
|
|
public static byte[] Wavedata_Dequeue()
|
|
{
|
|
byte[] datas;
|
|
voicebuff.TryDequeue(out datas);
|
|
return datas;
|
|
}
|
|
|
|
private void waveSource_RecordingStopped(object sender, StoppedEventArgs e)
|
|
{
|
|
if (waveSource != null)
|
|
{
|
|
waveSource.Dispose();
|
|
waveSource = null;
|
|
}
|
|
|
|
if (waveFile != null)
|
|
{
|
|
waveFile.Dispose();
|
|
waveFile = null;
|
|
}
|
|
}
|
|
}
|
|
}
|