mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
update repo
This commit is contained in:
parent
135a74fcf3
commit
e422c6197b
6
egs/librispeech_100h/conf/decode_asr_transformer.yaml
Normal file
6
egs/librispeech_100h/conf/decode_asr_transformer.yaml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
beam_size: 10
|
||||||
|
penalty: 0.0
|
||||||
|
maxlenratio: 0.0
|
||||||
|
minlenratio: 0.0
|
||||||
|
ctc_weight: 0.5
|
||||||
|
lm_weight: 0.7
|
||||||
80
egs/librispeech_100h/conf/train_asr_conformer.yaml
Normal file
80
egs/librispeech_100h/conf/train_asr_conformer.yaml
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
encoder: conformer
|
||||||
|
encoder_conf:
|
||||||
|
output_size: 512
|
||||||
|
attention_heads: 8
|
||||||
|
linear_units: 2048
|
||||||
|
num_blocks: 12
|
||||||
|
dropout_rate: 0.1
|
||||||
|
positional_dropout_rate: 0.1
|
||||||
|
attention_dropout_rate: 0.1
|
||||||
|
input_layer: conv2d
|
||||||
|
normalize_before: true
|
||||||
|
macaron_style: true
|
||||||
|
rel_pos_type: latest
|
||||||
|
pos_enc_layer_type: rel_pos
|
||||||
|
selfattention_layer_type: rel_selfattn
|
||||||
|
activation_type: swish
|
||||||
|
use_cnn_module: true
|
||||||
|
cnn_module_kernel: 31
|
||||||
|
|
||||||
|
decoder: transformer
|
||||||
|
decoder_conf:
|
||||||
|
attention_heads: 8
|
||||||
|
linear_units: 2048
|
||||||
|
num_blocks: 6
|
||||||
|
dropout_rate: 0.1
|
||||||
|
positional_dropout_rate: 0.1
|
||||||
|
self_attention_dropout_rate: 0.1
|
||||||
|
src_attention_dropout_rate: 0.1
|
||||||
|
|
||||||
|
model_conf:
|
||||||
|
ctc_weight: 0.3
|
||||||
|
lsm_weight: 0.1
|
||||||
|
length_normalized_loss: false
|
||||||
|
|
||||||
|
accum_grad: 2
|
||||||
|
max_epoch: 50
|
||||||
|
patience: none
|
||||||
|
init: none
|
||||||
|
best_model_criterion:
|
||||||
|
- - valid
|
||||||
|
- acc
|
||||||
|
- max
|
||||||
|
keep_nbest_models: 10
|
||||||
|
|
||||||
|
optim: adam
|
||||||
|
optim_conf:
|
||||||
|
lr: 0.0025
|
||||||
|
weight_decay: 0.000001
|
||||||
|
scheduler: warmuplr
|
||||||
|
scheduler_conf:
|
||||||
|
warmup_steps: 40000
|
||||||
|
|
||||||
|
specaug: specaug
|
||||||
|
specaug_conf:
|
||||||
|
apply_time_warp: true
|
||||||
|
time_warp_window: 5
|
||||||
|
time_warp_mode: bicubic
|
||||||
|
apply_freq_mask: true
|
||||||
|
freq_mask_width_range:
|
||||||
|
- 0
|
||||||
|
- 27
|
||||||
|
num_freq_mask: 2
|
||||||
|
apply_time_mask: true
|
||||||
|
time_mask_width_ratio_range:
|
||||||
|
- 0.
|
||||||
|
- 0.05
|
||||||
|
num_time_mask: 10
|
||||||
|
|
||||||
|
dataset_conf:
|
||||||
|
shuffle: True
|
||||||
|
shuffle_conf:
|
||||||
|
shuffle_size: 1024
|
||||||
|
sort_size: 500
|
||||||
|
batch_conf:
|
||||||
|
batch_type: token
|
||||||
|
batch_size: 10000
|
||||||
|
num_workers: 8
|
||||||
|
|
||||||
|
log_interval: 50
|
||||||
|
normalize: None
|
||||||
80
egs/librispeech_100h/conf/train_asr_conformer_uttnorm.yaml
Normal file
80
egs/librispeech_100h/conf/train_asr_conformer_uttnorm.yaml
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
encoder: conformer
|
||||||
|
encoder_conf:
|
||||||
|
output_size: 512
|
||||||
|
attention_heads: 8
|
||||||
|
linear_units: 2048
|
||||||
|
num_blocks: 12
|
||||||
|
dropout_rate: 0.1
|
||||||
|
positional_dropout_rate: 0.1
|
||||||
|
attention_dropout_rate: 0.1
|
||||||
|
input_layer: conv2d
|
||||||
|
normalize_before: true
|
||||||
|
macaron_style: true
|
||||||
|
rel_pos_type: latest
|
||||||
|
pos_enc_layer_type: rel_pos
|
||||||
|
selfattention_layer_type: rel_selfattn
|
||||||
|
activation_type: swish
|
||||||
|
use_cnn_module: true
|
||||||
|
cnn_module_kernel: 31
|
||||||
|
|
||||||
|
decoder: transformer
|
||||||
|
decoder_conf:
|
||||||
|
attention_heads: 8
|
||||||
|
linear_units: 2048
|
||||||
|
num_blocks: 6
|
||||||
|
dropout_rate: 0.1
|
||||||
|
positional_dropout_rate: 0.1
|
||||||
|
self_attention_dropout_rate: 0.1
|
||||||
|
src_attention_dropout_rate: 0.1
|
||||||
|
|
||||||
|
model_conf:
|
||||||
|
ctc_weight: 0.3
|
||||||
|
lsm_weight: 0.1
|
||||||
|
length_normalized_loss: false
|
||||||
|
|
||||||
|
accum_grad: 2
|
||||||
|
max_epoch: 50
|
||||||
|
patience: none
|
||||||
|
init: none
|
||||||
|
best_model_criterion:
|
||||||
|
- - valid
|
||||||
|
- acc
|
||||||
|
- max
|
||||||
|
keep_nbest_models: 10
|
||||||
|
|
||||||
|
optim: adam
|
||||||
|
optim_conf:
|
||||||
|
lr: 0.0025
|
||||||
|
weight_decay: 0.000001
|
||||||
|
scheduler: warmuplr
|
||||||
|
scheduler_conf:
|
||||||
|
warmup_steps: 40000
|
||||||
|
|
||||||
|
specaug: specaug
|
||||||
|
specaug_conf:
|
||||||
|
apply_time_warp: true
|
||||||
|
time_warp_window: 5
|
||||||
|
time_warp_mode: bicubic
|
||||||
|
apply_freq_mask: true
|
||||||
|
freq_mask_width_range:
|
||||||
|
- 0
|
||||||
|
- 27
|
||||||
|
num_freq_mask: 2
|
||||||
|
apply_time_mask: true
|
||||||
|
time_mask_width_ratio_range:
|
||||||
|
- 0.
|
||||||
|
- 0.05
|
||||||
|
num_time_mask: 10
|
||||||
|
|
||||||
|
dataset_conf:
|
||||||
|
shuffle: True
|
||||||
|
shuffle_conf:
|
||||||
|
shuffle_size: 1024
|
||||||
|
sort_size: 500
|
||||||
|
batch_conf:
|
||||||
|
batch_type: token
|
||||||
|
batch_size: 10000
|
||||||
|
num_workers: 8
|
||||||
|
|
||||||
|
log_interval: 50
|
||||||
|
normalize: utterance_mvn
|
||||||
Loading…
Reference in New Issue
Block a user