mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
commit
1717982237
@ -27,15 +27,18 @@ print(rec_result)
|
||||
inference_pipeline = pipeline(
|
||||
task=Tasks.auto_speech_recognition,
|
||||
model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online',
|
||||
model_revision='v1.0.6',
|
||||
model_revision='v1.0.7',
|
||||
update_model=False,
|
||||
mode='paraformer_streaming'
|
||||
)
|
||||
import soundfile
|
||||
speech, sample_rate = soundfile.read("example/asr_example.wav")
|
||||
|
||||
chunk_size = [5, 10, 5] #[5, 10, 5] 600ms, [8, 8, 4] 480ms
|
||||
param_dict = {"cache": dict(), "is_final": False, "chunk_size": chunk_size}
|
||||
chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
|
||||
encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
|
||||
decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
|
||||
param_dict = {"cache": dict(), "is_final": False, "chunk_size": chunk_size,
|
||||
"encoder_chunk_look_back": encoder_chunk_look_back, "decoder_chunk_look_back": decoder_chunk_look_back}
|
||||
chunk_stride = chunk_size[1] * 960 # 600ms、480ms
|
||||
# first chunk, 600ms
|
||||
speech_chunk = speech[0:chunk_stride]
|
||||
@ -55,7 +58,7 @@ from modelscope.utils.constant import Tasks
|
||||
inference_pipeline = pipeline(
|
||||
task=Tasks.auto_speech_recognition,
|
||||
model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online',
|
||||
model_revision='v1.0.6',
|
||||
model_revision='v1.0.7',
|
||||
update_model=False,
|
||||
mode="paraformer_fake_streaming"
|
||||
)
|
||||
|
||||
@ -27,15 +27,18 @@ print(rec_result)
|
||||
inference_pipeline = pipeline(
|
||||
task=Tasks.auto_speech_recognition,
|
||||
model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online',
|
||||
model_revision='v1.0.6',
|
||||
model_revision='v1.0.7',
|
||||
update_model=False,
|
||||
mode='paraformer_streaming'
|
||||
)
|
||||
import soundfile
|
||||
speech, sample_rate = soundfile.read("example/asr_example.wav")
|
||||
|
||||
chunk_size = [5, 10, 5] #[5, 10, 5] 600ms, [8, 8, 4] 480ms
|
||||
param_dict = {"cache": dict(), "is_final": False, "chunk_size": chunk_size}
|
||||
chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
|
||||
encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
|
||||
decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
|
||||
param_dict = {"cache": dict(), "is_final": False, "chunk_size": chunk_size,
|
||||
"encoder_chunk_look_back": encoder_chunk_look_back, "decoder_chunk_look_back": decoder_chunk_look_back}
|
||||
chunk_stride = chunk_size[1] * 960 # 600ms、480ms
|
||||
# first chunk, 600ms
|
||||
speech_chunk = speech[0:chunk_stride]
|
||||
@ -55,7 +58,7 @@ from modelscope.utils.constant import Tasks
|
||||
inference_pipeline = pipeline(
|
||||
task=Tasks.auto_speech_recognition,
|
||||
model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online',
|
||||
model_revision='v1.0.6',
|
||||
model_revision='v1.0.7',
|
||||
update_model=False,
|
||||
mode="paraformer_fake_streaming"
|
||||
)
|
||||
|
||||
@ -4,7 +4,7 @@ from modelscope.utils.constant import Tasks
|
||||
inference_pipeline = pipeline(
|
||||
task=Tasks.auto_speech_recognition,
|
||||
model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online',
|
||||
model_revision='v1.0.6',
|
||||
model_revision='v1.0.7',
|
||||
update_model=False,
|
||||
mode="paraformer_fake_streaming"
|
||||
)
|
||||
|
||||
@ -14,7 +14,7 @@ os.environ["MODELSCOPE_CACHE"] = "./"
|
||||
inference_pipeline = pipeline(
|
||||
task=Tasks.auto_speech_recognition,
|
||||
model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online',
|
||||
model_revision='v1.0.6',
|
||||
model_revision='v1.0.7',
|
||||
update_model=False,
|
||||
mode="paraformer_streaming"
|
||||
)
|
||||
|
||||
@ -24,9 +24,12 @@ speech, sample_rate = soundfile.read(os.path.join(model_dir, "example/asr_exampl
|
||||
speech_length = speech.shape[0]
|
||||
|
||||
sample_offset = 0
|
||||
chunk_size = [0, 8, 4] #[5, 10, 5] 600ms, [8, 8, 4] 480ms
|
||||
chunk_size = [0, 10, 5] #[0, 10, 5] 600ms, [0, 8, 4] 480ms
|
||||
encoder_chunk_look_back = 4 #number of chunks to lookback for encoder self-attention
|
||||
decoder_chunk_look_back = 1 #number of encoder chunks to lookback for decoder cross-attention
|
||||
stride_size = chunk_size[1] * 960
|
||||
param_dict = {"cache": dict(), "is_final": False, "chunk_size": chunk_size, "encoder_chunk_look_back": 4, "decoder_chunk_look_back": 1}
|
||||
param_dict = {"cache": dict(), "is_final": False, "chunk_size": chunk_size,
|
||||
"encoder_chunk_look_back": encoder_chunk_look_back, "decoder_chunk_look_back": decoder_chunk_look_back}
|
||||
final_result = ""
|
||||
|
||||
for sample_offset in range(0, speech_length, min(stride_size, speech_length - sample_offset)):
|
||||
|
||||
@ -14,7 +14,7 @@ def modelscope_finetune(params):
|
||||
ds_dict = MsDataset.load(params.data_path)
|
||||
kwargs = dict(
|
||||
model=params.model,
|
||||
model_revision='v1.0.6',
|
||||
model_revision='v1.0.7',
|
||||
update_model=False,
|
||||
data_dir=ds_dict,
|
||||
dataset_type=params.dataset_type,
|
||||
|
||||
@ -11,7 +11,7 @@ def modelscope_infer(args):
|
||||
model=args.model,
|
||||
output_dir=args.output_dir,
|
||||
batch_size=args.batch_size,
|
||||
model_revision='v1.0.6',
|
||||
model_revision='v1.0.7',
|
||||
update_model=False,
|
||||
mode="paraformer_fake_streaming",
|
||||
param_dict={"decoding_model": args.decoding_mode, "hotword": args.hotword_txt}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user