Funasr1.0 bugfix, audio sample input for the vad model (#1333)

* funasr1.0.5

* funasr1.0.5 audio samples input
This commit is contained in:
zhifu gao 2024-01-31 22:40:19 +08:00 committed by GitHub
parent 6cdbcf4b0a
commit dec1c875b2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 23 additions and 5 deletions

View File

@ -15,8 +15,26 @@ model = AutoModel(model="iic/speech_seaco_paraformer_large_asr_nat-zh-cn-16k-com
# spk_model_revision="v2.0.2",
)
# example1
res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav",
hotword='达摩院 魔搭',
# sentence_timestamp=True, # return sentence level information when spk_model is not given
)
print(res)
print(res)
# example2
import torchaudio
wav_file = os.path.join(model.model_path, "example/asr_example.wav")
input_tensor, sample_rate = torchaudio.load(wav_file)
input_tensor = input_tensor.mean(0)
res = model.generate(input=[input_tensor], batch_size_s=300, is_final=True)
# example3
import soundfile
import os
wav_file = os.path.join(model.model_path, "example/asr_example.wav")
speech, sample_rate = soundfile.read(wav_file)
res = model.generate(input=[speech], batch_size_s=300, is_final=True)

View File

@ -228,7 +228,7 @@ class AutoModel:
data_batch = data_list[beg_idx:end_idx]
key_batch = key_list[beg_idx:end_idx]
batch = {"data_in": data_batch, "key": key_batch}
if (end_idx - beg_idx) == 1 and isinstance(data_batch[0], torch.Tensor): # fbank
if (end_idx - beg_idx) == 1 and kwargs.get("data_type", None) == "fbank": # fbank
batch["data_in"] = data_batch[0]
batch["data_lengths"] = input_len

View File

@ -439,13 +439,13 @@ class Transformer(nn.Module):
token = tokenizer.ids2tokens(token_int)
text = tokenizer.tokens2text(token)
# text_postprocessed, _ = postprocess_utils.sentence_postprocess(token)
result_i = {"key": key[i], "token": token, "text": text}
text_postprocessed, _ = postprocess_utils.sentence_postprocess(token)
result_i = {"key": key[i], "token": token, "text": text_postprocessed}
results.append(result_i)
if ibest_writer is not None:
ibest_writer["token"][key[i]] = " ".join(token)
ibest_writer["text"][key[i]] = text
ibest_writer["text"][key[i]] = text_postprocessed
return results, meta_data