Compare commits

...

7 Commits

Author SHA1 Message Date
Vignesh Skanda
b166c00722
Merge f99e5fc706 into 5115a066c9 2025-08-14 16:14:59 +08:00
majic31
5115a066c9
fix #2587: Resolve VAD multithreading issue (#2613)
* Fix crash in ASR tasks when lm is set to none in #2237

* fix #2587: Resolve VAD multithreading issue

* Update funasr/models/fsmn_vad_streaming/model.py

ok

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

---------

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-14 16:08:44 +08:00
Kyle He
82a07e2f6e
fix: funasr_onnx pass run_options to ort (#2632) 2025-08-14 16:07:54 +08:00
topjgc
68dcef510a
Update SECURITY.md (#2639) 2025-08-14 16:07:24 +08:00
topjgc
4675344c13
Create SECURITY.md (#2638) 2025-08-14 15:56:12 +08:00
ming030890
b3fb4c0acd
Allow one to set a custom progress callback (#2609)
* Allow one to set a custom progress callback

so that they can show it own progrss bar

* Uncomment an existing test

* restore indentation

---------

Co-authored-by: Tony Mak <tony@Tonys-MacBook-Air-1802.local>
2025-08-05 17:48:10 +08:00
Vignesh Skanda
f99e5fc706
Update README.md 2024-10-19 22:41:02 +05:30
6 changed files with 73 additions and 11 deletions

View File

@ -76,7 +76,7 @@
```text
python>=3.8
torch>=1.13
torchaudio
torchaudio==v2.4.0
```
- Install for pypi

11
SECURITY.md Normal file
View File

@ -0,0 +1,11 @@
# Security Policy
## Reporting a Vulnerability
If you believe you have discovered a security vulnerability, please report it to us through the following portal:
👉[Report Security Issue](https://yundun.console.aliyun.com/?p=xznew#/taskmanagement/tasks/detail/153)
> **Note:** This channel is strictly for reporting security-related issues. Non-security vulnerabilities or general bug reports will not be addressed here.
We sincerely appreciate your responsible disclosure and your contribution to helping us keep our project secure.

View File

@ -301,14 +301,27 @@ class AutoModel:
res = self.model(*args, kwargs)
return res
def generate(self, input, input_len=None, **cfg):
def generate(self, input, input_len=None, progress_callback=None, **cfg):
if self.vad_model is None:
return self.inference(input, input_len=input_len, **cfg)
return self.inference(
input, input_len=input_len, progress_callback=progress_callback, **cfg
)
else:
return self.inference_with_vad(input, input_len=input_len, **cfg)
return self.inference_with_vad(
input, input_len=input_len, progress_callback=progress_callback, **cfg
)
def inference(self, input, input_len=None, model=None, kwargs=None, key=None, **cfg):
def inference(
self,
input,
input_len=None,
model=None,
kwargs=None,
key=None,
progress_callback=None,
**cfg,
):
kwargs = self.kwargs if kwargs is None else kwargs
if "cache" in kwargs:
kwargs.pop("cache")
@ -365,6 +378,11 @@ class AutoModel:
if pbar:
pbar.update(end_idx - beg_idx)
pbar.set_description(description)
if progress_callback:
try:
progress_callback(end_idx, num_samples)
except Exception as e:
logging.error(f"progress_callback error: {e}")
time_speech_total += batch_data_time
time_escape_total += time_escape

View File

@ -652,10 +652,11 @@ class FsmnVADStreaming(nn.Module):
key: list = None,
tokenizer=None,
frontend=None,
cache: dict = {},
cache: dict = None,
**kwargs,
):
if cache is None:
cache = {}
if len(cache) == 0:
self.init_cache(cache, **kwargs)

View File

@ -221,10 +221,10 @@ class OrtInferSession:
RuntimeWarning,
)
def __call__(self, input_content: List[Union[np.ndarray, np.ndarray]]) -> np.ndarray:
def __call__(self, input_content: List[Union[np.ndarray, np.ndarray]], run_options = None) -> np.ndarray:
input_dict = dict(zip(self.get_input_names(), input_content))
try:
return self.session.run(self.get_output_names(), input_dict)
return self.session.run(self.get_output_names(), input_dict, run_options)
except Exception as e:
raise ONNXRuntimeError("ONNXRuntime inferece failed.") from e

View File

@ -22,7 +22,39 @@ class TestAutoModel(unittest.TestCase):
kwargs["spk_kwargs"] = {"cb_kwargs": {"merge_thr": merge_thr}}
model = AutoModel(**kwargs)
self.assertEqual(model.cb_model.model_config['merge_thr'], merge_thr)
# res = model.generate(input="/test.wav",
# res = model.generate(input="/test.wav",
# batch_size_s=300)
def test_progress_callback_called(self):
class DummyModel:
def __init__(self):
self.param = torch.nn.Parameter(torch.zeros(1))
def parameters(self):
return iter([self.param])
def eval(self):
pass
def inference(self, data_in=None, **kwargs):
results = [{"text": str(d)} for d in data_in]
return results, {"batch_data_time": 1}
am = AutoModel.__new__(AutoModel)
am.model = DummyModel()
am.kwargs = {"batch_size": 2, "disable_pbar": True}
progress = []
res = AutoModel.inference(
am,
["a", "b", "c"],
progress_callback=lambda idx, total: progress.append((idx, total)),
)
self.assertEqual(len(progress), 2)
self.assertEqual(progress, [(2, 3), (3, 3)])
if __name__ == '__main__':
unittest.main()
unittest.main()