mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
592 lines
25 KiB
Bash
Executable File
592 lines
25 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
# Set bash to 'debug' mode, it will exit on :
|
|
# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands',
|
|
set -e
|
|
set -u
|
|
set -o pipefail
|
|
|
|
log() {
|
|
local fname=${BASH_SOURCE[1]##*/}
|
|
echo -e "$(date '+%Y-%m-%dT%H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
}
|
|
min() {
|
|
local a b
|
|
a=$1
|
|
for b in "$@"; do
|
|
if [ "${b}" -le "${a}" ]; then
|
|
a="${b}"
|
|
fi
|
|
done
|
|
echo "${a}"
|
|
}
|
|
SECONDS=0
|
|
|
|
# General configuration
|
|
stage=1 # Processes starts from the specified stage.
|
|
stop_stage=10000 # Processes is stopped at the specified stage.
|
|
skip_data_prep=false # Skip data preparation stages.
|
|
skip_train=false # Skip training stages.
|
|
skip_eval=false # Skip decoding and evaluation stages.
|
|
skip_upload=true # Skip packing and uploading stages.
|
|
ngpu=1 # The number of gpus ("0" uses cpu, otherwise use gpu).
|
|
num_nodes=1 # The number of nodes.
|
|
nj=16 # The number of parallel jobs.
|
|
inference_nj=16 # The number of parallel jobs in decoding.
|
|
gpu_inference=false # Whether to perform gpu decoding.
|
|
njob_infer=4
|
|
dumpdir=dump2 # Directory to dump features.
|
|
expdir=exp # Directory to save experiments.
|
|
python=python3 # Specify python to execute espnet commands.
|
|
device=0
|
|
|
|
# Data preparation related
|
|
local_data_opts= # The options given to local/data.sh.
|
|
|
|
# Speed perturbation related
|
|
speed_perturb_factors= # perturbation factors, e.g. "0.9 1.0 1.1" (separated by space).
|
|
|
|
# Feature extraction related
|
|
feats_type=raw # Feature type (raw or fbank_pitch).
|
|
audio_format=flac # Audio format: wav, flac, wav.ark, flac.ark (only in feats_type=raw).
|
|
fs=16000 # Sampling rate.
|
|
min_wav_duration=0.1 # Minimum duration in second.
|
|
max_wav_duration=20 # Maximum duration in second.
|
|
|
|
# Tokenization related
|
|
token_type=bpe # Tokenization type (char or bpe).
|
|
nbpe=30 # The number of BPE vocabulary.
|
|
bpemode=unigram # Mode of BPE (unigram or bpe).
|
|
oov="<unk>" # Out of vocabulary symbol.
|
|
blank="<blank>" # CTC blank symbol
|
|
sos_eos="<sos/eos>" # sos and eos symbole
|
|
bpe_input_sentence_size=100000000 # Size of input sentence for BPE.
|
|
bpe_nlsyms= # non-linguistic symbols list, separated by a comma, for BPE
|
|
bpe_char_cover=1.0 # character coverage when modeling BPE
|
|
|
|
# Language model related
|
|
use_lm=true # Use language model for ASR decoding.
|
|
lm_tag= # Suffix to the result dir for language model training.
|
|
lm_exp= # Specify the direcotry path for LM experiment.
|
|
# If this option is specified, lm_tag is ignored.
|
|
lm_stats_dir= # Specify the direcotry path for LM statistics.
|
|
lm_config= # Config for language model training.
|
|
lm_args= # Arguments for language model training, e.g., "--max_epoch 10".
|
|
# Note that it will overwrite args in lm config.
|
|
use_word_lm=false # Whether to use word language model.
|
|
num_splits_lm=1 # Number of splitting for lm corpus.
|
|
# shellcheck disable=SC2034
|
|
word_vocab_size=10000 # Size of word vocabulary.
|
|
|
|
# ASR model related
|
|
asr_tag= # Suffix to the result dir for asr model training.
|
|
asr_exp= # Specify the direcotry path for ASR experiment.
|
|
# If this option is specified, asr_tag is ignored.
|
|
sa_asr_exp=
|
|
asr_stats_dir= # Specify the direcotry path for ASR statistics.
|
|
asr_config= # Config for asr model training.
|
|
sa_asr_config=
|
|
asr_args= # Arguments for asr model training, e.g., "--max_epoch 10".
|
|
# Note that it will overwrite args in asr config.
|
|
feats_normalize=global_mvn # Normalizaton layer type.
|
|
num_splits_asr=1 # Number of splitting for lm corpus.
|
|
|
|
# Decoding related
|
|
inference_tag= # Suffix to the result dir for decoding.
|
|
inference_config= # Config for decoding.
|
|
inference_args= # Arguments for decoding, e.g., "--lm_weight 0.1".
|
|
# Note that it will overwrite args in inference config.
|
|
sa_asr_inference_tag=
|
|
sa_asr_inference_args=
|
|
|
|
inference_lm=valid.loss.ave.pb # Language modle path for decoding.
|
|
inference_asr_model=valid.acc.ave.pb # ASR model path for decoding.
|
|
# e.g.
|
|
# inference_asr_model=train.loss.best.pth
|
|
# inference_asr_model=3epoch.pth
|
|
# inference_asr_model=valid.acc.best.pth
|
|
# inference_asr_model=valid.loss.ave.pth
|
|
inference_sa_asr_model=valid.acc_spk.ave.pb
|
|
download_model= # Download a model from Model Zoo and use it for decoding.
|
|
|
|
# [Task dependent] Set the datadir name created by local/data.sh
|
|
train_set= # Name of training set.
|
|
valid_set= # Name of validation set used for monitoring/tuning network training.
|
|
test_sets= # Names of test sets. Multiple items (e.g., both dev and eval sets) can be specified.
|
|
bpe_train_text= # Text file path of bpe training set.
|
|
lm_train_text= # Text file path of language model training set.
|
|
lm_dev_text= # Text file path of language model development set.
|
|
lm_test_text= # Text file path of language model evaluation set.
|
|
nlsyms_txt=none # Non-linguistic symbol list if existing.
|
|
cleaner=none # Text cleaner.
|
|
g2p=none # g2p method (needed if token_type=phn).
|
|
lang=zh # The language type of corpus.
|
|
score_opts= # The options given to sclite scoring
|
|
local_score_opts= # The options given to local/score.sh.
|
|
|
|
help_message=$(cat << EOF
|
|
Usage: $0 --train-set "<train_set_name>" --valid-set "<valid_set_name>" --test_sets "<test_set_names>"
|
|
|
|
Options:
|
|
# General configuration
|
|
--stage # Processes starts from the specified stage (default="${stage}").
|
|
--stop_stage # Processes is stopped at the specified stage (default="${stop_stage}").
|
|
--skip_data_prep # Skip data preparation stages (default="${skip_data_prep}").
|
|
--skip_train # Skip training stages (default="${skip_train}").
|
|
--skip_eval # Skip decoding and evaluation stages (default="${skip_eval}").
|
|
--skip_upload # Skip packing and uploading stages (default="${skip_upload}").
|
|
--ngpu # The number of gpus ("0" uses cpu, otherwise use gpu, default="${ngpu}").
|
|
--num_nodes # The number of nodes (default="${num_nodes}").
|
|
--nj # The number of parallel jobs (default="${nj}").
|
|
--inference_nj # The number of parallel jobs in decoding (default="${inference_nj}").
|
|
--gpu_inference # Whether to perform gpu decoding (default="${gpu_inference}").
|
|
--dumpdir # Directory to dump features (default="${dumpdir}").
|
|
--expdir # Directory to save experiments (default="${expdir}").
|
|
--python # Specify python to execute espnet commands (default="${python}").
|
|
--device # Which GPUs are use for local training (defalut="${device}").
|
|
|
|
# Data preparation related
|
|
--local_data_opts # The options given to local/data.sh (default="${local_data_opts}").
|
|
|
|
# Speed perturbation related
|
|
--speed_perturb_factors # speed perturbation factors, e.g. "0.9 1.0 1.1" (separated by space, default="${speed_perturb_factors}").
|
|
|
|
# Feature extraction related
|
|
--feats_type # Feature type (raw, fbank_pitch or extracted, default="${feats_type}").
|
|
--audio_format # Audio format: wav, flac, wav.ark, flac.ark (only in feats_type=raw, default="${audio_format}").
|
|
--fs # Sampling rate (default="${fs}").
|
|
--min_wav_duration # Minimum duration in second (default="${min_wav_duration}").
|
|
--max_wav_duration # Maximum duration in second (default="${max_wav_duration}").
|
|
|
|
# Tokenization related
|
|
--token_type # Tokenization type (char or bpe, default="${token_type}").
|
|
--nbpe # The number of BPE vocabulary (default="${nbpe}").
|
|
--bpemode # Mode of BPE (unigram or bpe, default="${bpemode}").
|
|
--oov # Out of vocabulary symbol (default="${oov}").
|
|
--blank # CTC blank symbol (default="${blank}").
|
|
--sos_eos # sos and eos symbole (default="${sos_eos}").
|
|
--bpe_input_sentence_size # Size of input sentence for BPE (default="${bpe_input_sentence_size}").
|
|
--bpe_nlsyms # Non-linguistic symbol list for sentencepiece, separated by a comma. (default="${bpe_nlsyms}").
|
|
--bpe_char_cover # Character coverage when modeling BPE (default="${bpe_char_cover}").
|
|
|
|
# Language model related
|
|
--lm_tag # Suffix to the result dir for language model training (default="${lm_tag}").
|
|
--lm_exp # Specify the direcotry path for LM experiment.
|
|
# If this option is specified, lm_tag is ignored (default="${lm_exp}").
|
|
--lm_stats_dir # Specify the direcotry path for LM statistics (default="${lm_stats_dir}").
|
|
--lm_config # Config for language model training (default="${lm_config}").
|
|
--lm_args # Arguments for language model training (default="${lm_args}").
|
|
# e.g., --lm_args "--max_epoch 10"
|
|
# Note that it will overwrite args in lm config.
|
|
--use_word_lm # Whether to use word language model (default="${use_word_lm}").
|
|
--word_vocab_size # Size of word vocabulary (default="${word_vocab_size}").
|
|
--num_splits_lm # Number of splitting for lm corpus (default="${num_splits_lm}").
|
|
|
|
# ASR model related
|
|
--asr_tag # Suffix to the result dir for asr model training (default="${asr_tag}").
|
|
--asr_exp # Specify the direcotry path for ASR experiment.
|
|
# If this option is specified, asr_tag is ignored (default="${asr_exp}").
|
|
--asr_stats_dir # Specify the direcotry path for ASR statistics (default="${asr_stats_dir}").
|
|
--asr_config # Config for asr model training (default="${asr_config}").
|
|
--asr_args # Arguments for asr model training (default="${asr_args}").
|
|
# e.g., --asr_args "--max_epoch 10"
|
|
# Note that it will overwrite args in asr config.
|
|
--feats_normalize # Normalizaton layer type (default="${feats_normalize}").
|
|
--num_splits_asr # Number of splitting for lm corpus (default="${num_splits_asr}").
|
|
|
|
# Decoding related
|
|
--inference_tag # Suffix to the result dir for decoding (default="${inference_tag}").
|
|
--inference_config # Config for decoding (default="${inference_config}").
|
|
--inference_args # Arguments for decoding (default="${inference_args}").
|
|
# e.g., --inference_args "--lm_weight 0.1"
|
|
# Note that it will overwrite args in inference config.
|
|
--inference_lm # Language modle path for decoding (default="${inference_lm}").
|
|
--inference_asr_model # ASR model path for decoding (default="${inference_asr_model}").
|
|
--download_model # Download a model from Model Zoo and use it for decoding (default="${download_model}").
|
|
|
|
# [Task dependent] Set the datadir name created by local/data.sh
|
|
--train_set # Name of training set (required).
|
|
--valid_set # Name of validation set used for monitoring/tuning network training (required).
|
|
--test_sets # Names of test sets.
|
|
# Multiple items (e.g., both dev and eval sets) can be specified (required).
|
|
--bpe_train_text # Text file path of bpe training set.
|
|
--lm_train_text # Text file path of language model training set.
|
|
--lm_dev_text # Text file path of language model development set (default="${lm_dev_text}").
|
|
--lm_test_text # Text file path of language model evaluation set (default="${lm_test_text}").
|
|
--nlsyms_txt # Non-linguistic symbol list if existing (default="${nlsyms_txt}").
|
|
--cleaner # Text cleaner (default="${cleaner}").
|
|
--g2p # g2p method (default="${g2p}").
|
|
--lang # The language type of corpus (default=${lang}).
|
|
--score_opts # The options given to sclite scoring (default="{score_opts}").
|
|
--local_score_opts # The options given to local/score.sh (default="{local_score_opts}").
|
|
EOF
|
|
)
|
|
|
|
log "$0 $*"
|
|
# Save command line args for logging (they will be lost after utils/parse_options.sh)
|
|
run_args=$(python -m funasr.utils.cli_utils $0 "$@")
|
|
. utils/parse_options.sh
|
|
|
|
if [ $# -ne 0 ]; then
|
|
log "${help_message}"
|
|
log "Error: No positional arguments are required."
|
|
exit 2
|
|
fi
|
|
|
|
. ./path.sh
|
|
|
|
|
|
# Check required arguments
|
|
[ -z "${train_set}" ] && { log "${help_message}"; log "Error: --train_set is required"; exit 2; };
|
|
[ -z "${valid_set}" ] && { log "${help_message}"; log "Error: --valid_set is required"; exit 2; };
|
|
[ -z "${test_sets}" ] && { log "${help_message}"; log "Error: --test_sets is required"; exit 2; };
|
|
|
|
# Check feature type
|
|
if [ "${feats_type}" = raw ]; then
|
|
data_feats=${dumpdir}/raw
|
|
elif [ "${feats_type}" = fbank_pitch ]; then
|
|
data_feats=${dumpdir}/fbank_pitch
|
|
elif [ "${feats_type}" = fbank ]; then
|
|
data_feats=${dumpdir}/fbank
|
|
elif [ "${feats_type}" == extracted ]; then
|
|
data_feats=${dumpdir}/extracted
|
|
else
|
|
log "${help_message}"
|
|
log "Error: not supported: --feats_type ${feats_type}"
|
|
exit 2
|
|
fi
|
|
|
|
# Use the same text as ASR for bpe training if not specified.
|
|
[ -z "${bpe_train_text}" ] && bpe_train_text="${data_feats}/${train_set}/text"
|
|
# Use the same text as ASR for lm training if not specified.
|
|
[ -z "${lm_train_text}" ] && lm_train_text="${data_feats}/${train_set}/text"
|
|
# Use the same text as ASR for lm training if not specified.
|
|
[ -z "${lm_dev_text}" ] && lm_dev_text="${data_feats}/${valid_set}/text"
|
|
# Use the text of the 1st evaldir if lm_test is not specified
|
|
[ -z "${lm_test_text}" ] && lm_test_text="${data_feats}/${test_sets%% *}/text"
|
|
|
|
# Check tokenization type
|
|
if [ "${lang}" != noinfo ]; then
|
|
token_listdir=data/${lang}_token_list
|
|
else
|
|
token_listdir=data/token_list
|
|
fi
|
|
bpedir="${token_listdir}/bpe_${bpemode}${nbpe}"
|
|
bpeprefix="${bpedir}"/bpe
|
|
bpemodel="${bpeprefix}".model
|
|
bpetoken_list="${bpedir}"/tokens.txt
|
|
chartoken_list="${token_listdir}"/char/tokens.txt
|
|
# NOTE: keep for future development.
|
|
# shellcheck disable=SC2034
|
|
wordtoken_list="${token_listdir}"/word/tokens.txt
|
|
|
|
if [ "${token_type}" = bpe ]; then
|
|
token_list="${bpetoken_list}"
|
|
elif [ "${token_type}" = char ]; then
|
|
token_list="${chartoken_list}"
|
|
bpemodel=none
|
|
elif [ "${token_type}" = word ]; then
|
|
token_list="${wordtoken_list}"
|
|
bpemodel=none
|
|
else
|
|
log "Error: not supported --token_type '${token_type}'"
|
|
exit 2
|
|
fi
|
|
if ${use_word_lm}; then
|
|
log "Error: Word LM is not supported yet"
|
|
exit 2
|
|
|
|
lm_token_list="${wordtoken_list}"
|
|
lm_token_type=word
|
|
else
|
|
lm_token_list="${token_list}"
|
|
lm_token_type="${token_type}"
|
|
fi
|
|
|
|
|
|
# Set tag for naming of model directory
|
|
if [ -z "${asr_tag}" ]; then
|
|
if [ -n "${asr_config}" ]; then
|
|
asr_tag="$(basename "${asr_config}" .yaml)_${feats_type}"
|
|
else
|
|
asr_tag="train_${feats_type}"
|
|
fi
|
|
if [ "${lang}" != noinfo ]; then
|
|
asr_tag+="_${lang}_${token_type}"
|
|
else
|
|
asr_tag+="_${token_type}"
|
|
fi
|
|
if [ "${token_type}" = bpe ]; then
|
|
asr_tag+="${nbpe}"
|
|
fi
|
|
# Add overwritten arg's info
|
|
if [ -n "${asr_args}" ]; then
|
|
asr_tag+="$(echo "${asr_args}" | sed -e "s/--/\_/g" -e "s/[ |=/]//g")"
|
|
fi
|
|
if [ -n "${speed_perturb_factors}" ]; then
|
|
asr_tag+="_sp"
|
|
fi
|
|
fi
|
|
if [ -z "${lm_tag}" ]; then
|
|
if [ -n "${lm_config}" ]; then
|
|
lm_tag="$(basename "${lm_config}" .yaml)"
|
|
else
|
|
lm_tag="train"
|
|
fi
|
|
if [ "${lang}" != noinfo ]; then
|
|
lm_tag+="_${lang}_${lm_token_type}"
|
|
else
|
|
lm_tag+="_${lm_token_type}"
|
|
fi
|
|
if [ "${lm_token_type}" = bpe ]; then
|
|
lm_tag+="${nbpe}"
|
|
fi
|
|
# Add overwritten arg's info
|
|
if [ -n "${lm_args}" ]; then
|
|
lm_tag+="$(echo "${lm_args}" | sed -e "s/--/\_/g" -e "s/[ |=/]//g")"
|
|
fi
|
|
fi
|
|
|
|
# The directory used for collect-stats mode
|
|
if [ -z "${asr_stats_dir}" ]; then
|
|
if [ "${lang}" != noinfo ]; then
|
|
asr_stats_dir="${expdir}/asr_stats_${feats_type}_${lang}_${token_type}"
|
|
else
|
|
asr_stats_dir="${expdir}/asr_stats_${feats_type}_${token_type}"
|
|
fi
|
|
if [ "${token_type}" = bpe ]; then
|
|
asr_stats_dir+="${nbpe}"
|
|
fi
|
|
if [ -n "${speed_perturb_factors}" ]; then
|
|
asr_stats_dir+="_sp"
|
|
fi
|
|
fi
|
|
if [ -z "${lm_stats_dir}" ]; then
|
|
if [ "${lang}" != noinfo ]; then
|
|
lm_stats_dir="${expdir}/lm_stats_${lang}_${lm_token_type}"
|
|
else
|
|
lm_stats_dir="${expdir}/lm_stats_${lm_token_type}"
|
|
fi
|
|
if [ "${lm_token_type}" = bpe ]; then
|
|
lm_stats_dir+="${nbpe}"
|
|
fi
|
|
fi
|
|
# The directory used for training commands
|
|
if [ -z "${asr_exp}" ]; then
|
|
asr_exp="${expdir}/asr_${asr_tag}"
|
|
fi
|
|
if [ -z "${lm_exp}" ]; then
|
|
lm_exp="${expdir}/lm_${lm_tag}"
|
|
fi
|
|
|
|
|
|
if [ -z "${inference_tag}" ]; then
|
|
if [ -n "${inference_config}" ]; then
|
|
inference_tag="$(basename "${inference_config}" .yaml)"
|
|
else
|
|
inference_tag=inference
|
|
fi
|
|
# Add overwritten arg's info
|
|
if [ -n "${inference_args}" ]; then
|
|
inference_tag+="$(echo "${inference_args}" | sed -e "s/--/\_/g" -e "s/[ |=]//g")"
|
|
fi
|
|
if "${use_lm}"; then
|
|
inference_tag+="_lm_$(basename "${lm_exp}")_$(echo "${inference_lm}" | sed -e "s/\//_/g" -e "s/\.[^.]*$//g")"
|
|
fi
|
|
inference_tag+="_asr_model_$(echo "${inference_asr_model}" | sed -e "s/\//_/g" -e "s/\.[^.]*$//g")"
|
|
fi
|
|
|
|
if [ -z "${sa_asr_inference_tag}" ]; then
|
|
if [ -n "${inference_config}" ]; then
|
|
sa_asr_inference_tag="$(basename "${inference_config}" .yaml)"
|
|
else
|
|
sa_asr_inference_tag=sa_asr_inference
|
|
fi
|
|
# Add overwritten arg's info
|
|
if [ -n "${sa_asr_inference_args}" ]; then
|
|
sa_asr_inference_tag+="$(echo "${sa_asr_inference_args}" | sed -e "s/--/\_/g" -e "s/[ |=]//g")"
|
|
fi
|
|
if "${use_lm}"; then
|
|
sa_asr_inference_tag+="_lm_$(basename "${lm_exp}")_$(echo "${inference_lm}" | sed -e "s/\//_/g" -e "s/\.[^.]*$//g")"
|
|
fi
|
|
sa_asr_inference_tag+="_asr_model_$(echo "${inference_sa_asr_model}" | sed -e "s/\//_/g" -e "s/\.[^.]*$//g")"
|
|
fi
|
|
|
|
train_cmd="run.pl"
|
|
cuda_cmd="run.pl"
|
|
decode_cmd="run.pl"
|
|
|
|
# ========================== Main stages start from here. ==========================
|
|
|
|
if ! "${skip_data_prep}"; then
|
|
|
|
if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
|
|
if [ "${feats_type}" = raw ]; then
|
|
log "Stage 1: Format wav.scp: data/ -> ${data_feats}"
|
|
|
|
# ====== Recreating "wav.scp" ======
|
|
# Kaldi-wav.scp, which can describe the file path with unix-pipe, like "cat /some/path |",
|
|
# shouldn't be used in training process.
|
|
# "format_wav_scp.sh" dumps such pipe-style-wav to real audio file
|
|
# and it can also change the audio-format and sampling rate.
|
|
# If nothing is need, then format_wav_scp.sh does nothing:
|
|
# i.e. the input file format and rate is same as the output.
|
|
|
|
for dset in "${test_sets}" ; do
|
|
|
|
_suf=""
|
|
|
|
local/copy_data_dir.sh --validate_opts --non-print data/"${dset}" "${data_feats}${_suf}/${dset}"
|
|
|
|
rm -f ${data_feats}${_suf}/${dset}/{segments,wav.scp,reco2file_and_channel,reco2dur}
|
|
_opts=
|
|
if [ -e data/"${dset}"/segments ]; then
|
|
# "segments" is used for splitting wav files which are written in "wav".scp
|
|
# into utterances. The file format of segments:
|
|
# <segment_id> <record_id> <start_time> <end_time>
|
|
# "e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5"
|
|
# Where the time is written in seconds.
|
|
_opts+="--segments data/${dset}/segments "
|
|
fi
|
|
# shellcheck disable=SC2086
|
|
local/format_wav_scp.sh --nj "${nj}" --cmd "${train_cmd}" \
|
|
--audio-format "${audio_format}" --fs "${fs}" ${_opts} \
|
|
"data/${dset}/wav.scp" "${data_feats}${_suf}/${dset}"
|
|
|
|
echo "${feats_type}" > "${data_feats}${_suf}/${dset}/feats_type"
|
|
done
|
|
|
|
else
|
|
log "Error: not supported: --feats_type ${feats_type}"
|
|
exit 2
|
|
fi
|
|
fi
|
|
|
|
if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
|
|
log "Stage 2: Generate speaker profile by spectral-cluster"
|
|
mkdir -p "profile_log"
|
|
for dset in "${test_sets}"; do
|
|
# generate cluster_profile with spectral-cluster directly (for infering and without oracle information)
|
|
python local/gen_cluster_profile_infer.py "${data_feats}/${dset}" "data/${dset}" 0.996 0.815 &> "profile_log/gen_cluster_profile_infer_${dset}.log"
|
|
log "Successfully generate cluster profile for ${dset} (${data_feats}/${dset}/cluster_profile_infer.scp)"
|
|
done
|
|
fi
|
|
|
|
else
|
|
log "Skip the stages for data preparation"
|
|
fi
|
|
|
|
|
|
# ========================== Data preparation is done here. ==========================
|
|
|
|
if ! "${skip_eval}"; then
|
|
|
|
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
|
|
log "Stage 3: Decoding SA-ASR (cluster profile): training_dir=${sa_asr_exp}"
|
|
|
|
if ${gpu_inference}; then
|
|
_cmd="${cuda_cmd}"
|
|
inference_nj=$[${ngpu}*${njob_infer}]
|
|
_ngpu=1
|
|
|
|
else
|
|
_cmd="${decode_cmd}"
|
|
inference_nj=$njob_infer
|
|
_ngpu=0
|
|
fi
|
|
|
|
_opts=
|
|
if [ -n "${inference_config}" ]; then
|
|
_opts+="--config ${inference_config} "
|
|
fi
|
|
if "${use_lm}"; then
|
|
if "${use_word_lm}"; then
|
|
_opts+="--word_lm_train_config ${lm_exp}/config.yaml "
|
|
_opts+="--word_lm_file ${lm_exp}/${inference_lm} "
|
|
else
|
|
_opts+="--lm_train_config ${lm_exp}/config.yaml "
|
|
_opts+="--lm_file ${lm_exp}/${inference_lm} "
|
|
fi
|
|
fi
|
|
|
|
# 2. Generate run.sh
|
|
log "Generate '${sa_asr_exp}/${sa_asr_inference_tag}.cluster/run.sh'. You can resume the process from stage 17 using this script"
|
|
mkdir -p "${sa_asr_exp}/${sa_asr_inference_tag}.cluster"; echo "${run_args} --stage 17 \"\$@\"; exit \$?" > "${sa_asr_exp}/${sa_asr_inference_tag}.cluster/run.sh"; chmod +x "${sa_asr_exp}/${sa_asr_inference_tag}.cluster/run.sh"
|
|
|
|
for dset in ${test_sets}; do
|
|
_data="${data_feats}/${dset}"
|
|
_dir="${sa_asr_exp}/${sa_asr_inference_tag}.cluster/${dset}"
|
|
_logdir="${_dir}/logdir"
|
|
mkdir -p "${_logdir}"
|
|
|
|
_feats_type="$(<${_data}/feats_type)"
|
|
if [ "${_feats_type}" = raw ]; then
|
|
_scp=wav.scp
|
|
if [[ "${audio_format}" == *ark* ]]; then
|
|
_type=kaldi_ark
|
|
else
|
|
_type=sound
|
|
fi
|
|
else
|
|
_scp=feats.scp
|
|
_type=kaldi_ark
|
|
fi
|
|
|
|
# 1. Split the key file
|
|
key_file=${_data}/${_scp}
|
|
split_scps=""
|
|
_nj=$(min "${inference_nj}" "$(<${key_file} wc -l)")
|
|
for n in $(seq "${_nj}"); do
|
|
split_scps+=" ${_logdir}/keys.${n}.scp"
|
|
done
|
|
# shellcheck disable=SC2086
|
|
utils/split_scp.pl "${key_file}" ${split_scps}
|
|
|
|
# 2. Submit decoding jobs
|
|
log "Decoding started... log: '${_logdir}/sa_asr_inference.*.log'"
|
|
# shellcheck disable=SC2086
|
|
${_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1:"${_nj}" "${_logdir}"/asr_inference.JOB.log \
|
|
python -m funasr.bin.asr_inference_launch \
|
|
--batch_size 1 \
|
|
--mc True \
|
|
--nbest 1 \
|
|
--ngpu "${_ngpu}" \
|
|
--njob ${njob_infer} \
|
|
--gpuid_list ${device} \
|
|
--data_path_and_name_and_type "${_data}/${_scp},speech,${_type}" \
|
|
--data_path_and_name_and_type "${_data}/cluster_profile_infer.scp,profile,npy" \
|
|
--key_file "${_logdir}"/keys.JOB.scp \
|
|
--allow_variable_data_keys true \
|
|
--asr_train_config "${sa_asr_exp}"/config.yaml \
|
|
--asr_model_file "${sa_asr_exp}"/"${inference_sa_asr_model}" \
|
|
--output_dir "${_logdir}"/output.JOB \
|
|
--mode sa_asr \
|
|
${_opts}
|
|
|
|
# 3. Concatenates the output files from each jobs
|
|
for f in token token_int score text text_id; do
|
|
for i in $(seq "${_nj}"); do
|
|
cat "${_logdir}/output.${i}/1best_recog/${f}"
|
|
done | LC_ALL=C sort -k1 >"${_dir}/${f}"
|
|
done
|
|
done
|
|
fi
|
|
|
|
if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
|
|
log "Stage 4: Generate SA-ASR results (cluster profile)"
|
|
|
|
for dset in ${test_sets}; do
|
|
_dir="${sa_asr_exp}/${sa_asr_inference_tag}.cluster/${dset}"
|
|
|
|
python local/process_text_spk_merge.py ${_dir}
|
|
done
|
|
|
|
fi
|
|
|
|
else
|
|
log "Skip the evaluation stages"
|
|
fi
|
|
|
|
|
|
log "Successfully finished. [elapsed=${SECONDS}s]"
|