* update train recipe

* v1.0.8

* llm

* update trainer
This commit is contained in:
zhifu gao 2024-02-21 19:22:59 +08:00 committed by GitHub
parent 9a6c6ab5ea
commit cdca62d933
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 24 additions and 22 deletions

View File

@ -6,7 +6,7 @@
#git clone https://www.modelscope.cn/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch.git ${local_path}
## generate jsonl from wav.scp and text.txt
#python funasr/datasets/audio_datasets/scp2jsonl.py \
#python -m funasr.datasets.audio_datasets.scp2jsonl \
#++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \
#++data_type_list='["source", "target"]' \
#++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl

View File

@ -72,13 +72,6 @@ def parse_context_length(data_list: list, data_type: str):
@hydra.main(config_name=None, version_base=None)
def main_hydra(cfg: DictConfig):
"""
python funasr/datasets/audio_datasets/scp2jsonl.py \
++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \
++data_type_list='["source", "target"]' \
++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl
"""
kwargs = OmegaConf.to_container(cfg, resolve=True)
@ -90,6 +83,13 @@ def main_hydra(cfg: DictConfig):
gen_jsonl_from_wav_text_list(scp_file_list, data_type_list=data_type_list, jsonl_file_out=jsonl_file_out)
"""
python -m funasr.datasets.audio_datasets.scp2jsonl \
++scp_file_list='["/Users/zhifu/funasr1.0/test_local/wav.scp", "/Users/zhifu/funasr1.0/test_local/text.txt"]' \
++data_type_list='["source", "target"]' \
++jsonl_file_out=/Users/zhifu/funasr1.0/test_local/audio_datasets.jsonl
"""
if __name__ == "__main__":
main_hydra()

View File

@ -4,7 +4,7 @@ import torch.nn.functional as F
try:
from rotary_embedding_torch import RotaryEmbedding
except:
print("Please install rotary_embedding_torch by: \n pip install -U rotary_embedding_torch")
print("If you want use mossformer, lease install rotary_embedding_torch by: \n pip install -U rotary_embedding_torch")
from funasr.models.transformer.layer_norm import GlobalLayerNorm, CumulativeLayerNorm, ScaleNorm
from funasr.models.transformer.embedding import ScaledSinuEmbedding
from funasr.models.transformer.mossformer import FLASH_ShareA_FFConvM

View File

@ -455,7 +455,9 @@ class Paraformer(torch.nn.Module):
speech, speech_lengths = data_in, data_lengths
if len(speech.shape) < 3:
speech = speech[None, :, :]
if speech_lengths is None:
if speech_lengths is not None:
speech_lengths = speech_lengths.squeeze(-1)
else:
speech_lengths = speech.shape[1]
else:
# extract fbank feats

View File

@ -181,7 +181,7 @@ class Trainer:
time2 = time.perf_counter()
time_escaped = (time2 - time1)/3600.0
print(f"\ntime_escaped_epoch: {time_escaped:.3f} hours, estimated to finish {self.max_epoch} epoch: {(self.max_epoch-epoch)*time_escaped:.3f}\n")
print(f"\nrank: {self.local_rank}, time_escaped_epoch: {time_escaped:.3f} hours, estimated to finish {self.max_epoch} epoch: {(self.max_epoch-epoch)*time_escaped:.3f}\n")
if self.rank == 0:
average_checkpoints(self.output_dir, self.avg_nbest_model)
@ -302,17 +302,14 @@ class Trainer:
)
pbar.set_description(description)
if self.writer:
self.writer.add_scalar(f'rank{self.local_rank}_Loss/train', loss.item(),
epoch*len(self.dataloader_train) + batch_idx)
self.writer.add_scalar(f'rank{self.local_rank}_Loss/train', loss.item(), self.batch_total)
self.writer.add_scalar(f'rank{self.local_rank}_lr/train', lr, self.batch_total)
for key, var in stats.items():
self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', var.item(),
epoch * len(self.dataloader_train) + batch_idx)
self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', var.item(), self.batch_total)
for key, var in speed_stats.items():
self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', eval(var),
epoch * len(self.dataloader_train) + batch_idx)
self.writer.add_scalar(f'rank{self.local_rank}_{key}/train', eval(var), self.batch_total)
# if batch_idx == 2:
# break
pbar.close()
def _validate_epoch(self, epoch):
@ -356,7 +353,10 @@ class Trainer:
if (batch_idx+1) % self.log_interval == 0 or (batch_idx+1) == len(self.dataloader_val):
pbar.update(self.log_interval)
time_now = datetime.now()
time_now = time_now.strftime("%Y-%m-%d %H:%M:%S")
description = (
f"{time_now}, "
f"rank: {self.local_rank}, "
f"validation epoch: {epoch}/{self.max_epoch}, "
f"step: {batch_idx+1}/{len(self.dataloader_val)}, "

View File

@ -1 +1 @@
1.0.7
1.0.8