This commit is contained in:
嘉渊 2023-07-05 21:55:40 +08:00
parent 964a50d246
commit a43b3da4be
4 changed files with 29 additions and 14 deletions

View File

@ -6,9 +6,7 @@ on:
- main - main
push: push:
branches: branches:
- dev_wjm
- dev_jy - dev_jy
- dev_wjm_infer
jobs: jobs:
build: build:

View File

@ -5,7 +5,6 @@ on:
- main - main
push: push:
branches: branches:
- dev_wjm
- main - main
- dev_lyh - dev_lyh

View File

@ -34,20 +34,27 @@ decoder_conf:
self_attention_dropout_rate: 0. self_attention_dropout_rate: 0.
src_attention_dropout_rate: 0. src_attention_dropout_rate: 0.
# frontend related
frontend: wav_frontend
frontend_conf:
fs: 16000
window: hamming
n_mels: 80
frame_length: 25
frame_shift: 10
lfr_m: 1
lfr_n: 1
# hybrid CTC/attention # hybrid CTC/attention
model_conf: model_conf:
ctc_weight: 0.3 ctc_weight: 0.3
lsm_weight: 0.1 # label smoothing option lsm_weight: 0.1 # label smoothing option
length_normalized_loss: false length_normalized_loss: false
# minibatch related
batch_type: numel
batch_bins: 25000000
# optimization related # optimization related
accum_grad: 1 accum_grad: 1
grad_clip: 5 grad_clip: 5
max_epoch: 60 max_epoch: 180
val_scheduler_criterion: val_scheduler_criterion:
- valid - valid
- acc - acc
@ -65,10 +72,6 @@ scheduler: warmuplr
scheduler_conf: scheduler_conf:
warmup_steps: 35000 warmup_steps: 35000
num_workers: 4 # num of workers of data loader
use_amp: true # automatic mixed precision
unused_parameters: false # set as true if some params are unused in DDP
specaug: specaug specaug: specaug
specaug_conf: specaug_conf:
apply_time_warp: true apply_time_warp: true
@ -84,3 +87,18 @@ specaug_conf:
- 0. - 0.
- 0.05 - 0.05
num_time_mask: 10 num_time_mask: 10
dataset_conf:
data_names: speech,text
data_types: sound,text
shuffle: True
shuffle_conf:
shuffle_size: 2048
sort_size: 500
batch_conf:
batch_type: token
batch_size: 25000
num_workers: 8
log_interval: 50
normalize: None

View File

@ -3,8 +3,8 @@
. ./path.sh || exit 1; . ./path.sh || exit 1;
# machines configuration # machines configuration
CUDA_VISIBLE_DEVICES="0,1" CUDA_VISIBLE_DEVICES="0,1,2,3"
gpu_num=2 gpu_num=4
count=1 count=1
gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding
# for gpu decoding, inference_nj=ngpu*njob; for cpu decoding, inference_nj=njob # for gpu decoding, inference_nj=ngpu*njob; for cpu decoding, inference_nj=njob