diff --git a/egs/librispeech/conformer/conf/decode_asr_transformer.yaml b/egs/librispeech/conformer/conf/decode_asr_transformer_ctc0.3_beam5.yaml similarity index 68% rename from egs/librispeech/conformer/conf/decode_asr_transformer.yaml rename to egs/librispeech/conformer/conf/decode_asr_transformer_ctc0.3_beam5.yaml index 2a0cb6fe6..8f7c75d1c 100644 --- a/egs/librispeech/conformer/conf/decode_asr_transformer.yaml +++ b/egs/librispeech/conformer/conf/decode_asr_transformer_ctc0.3_beam5.yaml @@ -1,6 +1,6 @@ -beam_size: 10 +beam_size: 5 penalty: 0.0 maxlenratio: 0.0 minlenratio: 0.0 ctc_weight: 0.3 -lm_weight: 0.7 +lm_weight: 0.0 diff --git a/egs/librispeech_100h/conformer/conf/decode_asr_transformer.yaml b/egs/librispeech/conformer/conf/decode_asr_transformer_ctc0.3_beam60.yaml similarity index 68% rename from egs/librispeech_100h/conformer/conf/decode_asr_transformer.yaml rename to egs/librispeech/conformer/conf/decode_asr_transformer_ctc0.3_beam60.yaml index 2a0cb6fe6..0ebb9afc9 100644 --- a/egs/librispeech_100h/conformer/conf/decode_asr_transformer.yaml +++ b/egs/librispeech/conformer/conf/decode_asr_transformer_ctc0.3_beam60.yaml @@ -1,6 +1,6 @@ -beam_size: 10 +beam_size: 60 penalty: 0.0 maxlenratio: 0.0 minlenratio: 0.0 ctc_weight: 0.3 -lm_weight: 0.7 +lm_weight: 0.0 diff --git a/egs/librispeech/conformer/run.sh b/egs/librispeech/conformer/run.sh index b942dd236..b44dad38d 100755 --- a/egs/librispeech/conformer/run.sh +++ b/egs/librispeech/conformer/run.sh @@ -53,8 +53,8 @@ test_sets="test_clean test_other dev_clean dev_other" asr_config=conf/train_asr_conformer.yaml model_dir="baseline_$(basename "${asr_config}" .yaml)_${lang}_${token_type}_${tag}" -inference_config=conf/decode_asr_transformer.yaml -#inference_config=conf/decode_asr_transformer_beam60_ctc0.3.yaml +inference_config=conf/decode_asr_transformer_ctc0.3_beam5yaml +#inference_config=conf/decode_asr_transformer_ctc0.3_beam60.yaml inference_asr_model=valid.acc.ave_10best.pb # you can set gpu num for decoding here diff --git a/egs/librispeech_100h/conformer/conf/decode_asr_transformer_ctc0.3_beam1.yaml b/egs/librispeech_100h/conformer/conf/decode_asr_transformer_ctc0.3_beam1.yaml new file mode 100644 index 000000000..edc6bab0b --- /dev/null +++ b/egs/librispeech_100h/conformer/conf/decode_asr_transformer_ctc0.3_beam1.yaml @@ -0,0 +1,6 @@ +beam_size: 1 +penalty: 0.0 +maxlenratio: 0.0 +minlenratio: 0.0 +ctc_weight: 0.3 +lm_weight: 0.0 diff --git a/egs/librispeech_100h/conformer/conf/decode_asr_transformer_ctc0.3_beam20.yaml b/egs/librispeech_100h/conformer/conf/decode_asr_transformer_ctc0.3_beam20.yaml new file mode 100644 index 000000000..b2b042779 --- /dev/null +++ b/egs/librispeech_100h/conformer/conf/decode_asr_transformer_ctc0.3_beam20.yaml @@ -0,0 +1,6 @@ +beam_size: 20 +penalty: 0.0 +maxlenratio: 0.0 +minlenratio: 0.0 +ctc_weight: 0.3 +lm_weight: 0.0 diff --git a/egs/librispeech_100h/conformer/run.sh b/egs/librispeech_100h/conformer/run.sh index d1a20bca2..98cb90b3a 100755 --- a/egs/librispeech_100h/conformer/run.sh +++ b/egs/librispeech_100h/conformer/run.sh @@ -53,8 +53,8 @@ test_sets="test_clean test_other dev_clean dev_other" asr_config=conf/train_asr_conformer.yaml model_dir="baseline_$(basename "${asr_config}" .yaml)_${lang}_${token_type}_${tag}" -inference_config=conf/decode_asr_transformer.yaml -#inference_config=conf/decode_asr_transformer_beam60_ctc0.3.yaml +inference_config=conf/decode_asr_transformer_ctc0.3_beam1.yaml +#inference_config=conf/decode_asr_transformer_ctc0.3_beam20.yaml inference_asr_model=valid.acc.ave_10best.pb # you can set gpu num for decoding here