diff --git a/examples/aishell/llm_asr_nar/conf/template.yaml b/examples/aishell/llm_asr_nar/conf/template.yaml deleted file mode 100644 index d52963575..000000000 --- a/examples/aishell/llm_asr_nar/conf/template.yaml +++ /dev/null @@ -1,93 +0,0 @@ -# This is an example that demonstrates how to configure a model file. -# You can modify the configuration according to your own requirements. - -# to print the register_table: -# from funasr.register import tables -# tables.print() - -# network architecture -model: LLMASRNAR -model_conf: - lsm_weight: 0.1 # label smoothing option - length_normalized_loss: true - -# encoder -encoder: Paraformer -encoder_conf: - hub: funasr - init_param_path: "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" - freeze: false - -llm: Vicuna -llm_conf: - hub: hf - init_param_path: "/nfs/maziyang.mzy/models/vicuna-7b-v1.5" - freeze: true - -adaptor: Linear -adaptor_conf: - downsample_rate: 1 - llm_dim: 4096 - encoder_dim: 512 - -# frontend related -frontend: WavFrontend -frontend_conf: - fs: 16000 - window: hamming - n_mels: 80 - frame_length: 25 - frame_shift: 10 - lfr_m: 7 - lfr_n: 6 - cmvn_file: "/root/.cache/modelscope/hub/iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/am.mvn" - -specaug: SpecAugLFR -specaug_conf: - apply_time_warp: false - time_warp_window: 5 - time_warp_mode: bicubic - apply_freq_mask: true - freq_mask_width_range: - - 0 - - 30 - lfr_rate: 6 - num_freq_mask: 1 - apply_time_mask: true - time_mask_width_range: - - 0 - - 12 - num_time_mask: 1 - -train_conf: - accum_grad: 1 - grad_clip: 5 - max_epoch: 150 - keep_nbest_models: 10 - log_interval: 10 - -optim: adamw -optim_conf: - lr: 0.0001 - weight_decay: 0.000001 -scheduler: warmuplr -scheduler_conf: - warmup_steps: 1500 - -dataset: AudioLLMDataset -dataset_conf: - index_ds: IndexDSJsonl - batch_sampler: RankFullLocalShuffleBatchSampler - batch_type: example # example or length - batch_size: 8 # if batch_type is example, batch_size is the numbers of samples; if length, batch_size is source_token_len+target_token_len; - max_token_length: 2048 # filter samples if source_token_len+target_token_len > max_token_length, - buffer_size: 500 - shuffle: True - num_workers: 4 - preprocessor_text: TextPreprocessRemovePunctuation - -tokenizer: HuggingfaceTokenizer -tokenizer_conf: - unk_symbol: - init_param_path: "/nfs/maziyang.mzy/models/vicuna-7b-v1.5" -