mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
finetune
This commit is contained in:
parent
a70f5b3edf
commit
ed22e34d65
@ -173,11 +173,11 @@ def main(**kwargs):
|
|||||||
except:
|
except:
|
||||||
writer = None
|
writer = None
|
||||||
|
|
||||||
if use_ddp or use_fsdp:
|
# if use_ddp or use_fsdp:
|
||||||
context = Join([model])
|
# context = Join([model])
|
||||||
else:
|
# else:
|
||||||
|
# context = nullcontext()
|
||||||
context = nullcontext()
|
context = nullcontext()
|
||||||
|
|
||||||
for epoch in range(trainer.start_epoch, trainer.max_epoch + 1):
|
for epoch in range(trainer.start_epoch, trainer.max_epoch + 1):
|
||||||
time1 = time.perf_counter()
|
time1 = time.perf_counter()
|
||||||
with context:
|
with context:
|
||||||
|
|||||||
@ -250,14 +250,14 @@ class Trainer:
|
|||||||
optim.zero_grad()
|
optim.zero_grad()
|
||||||
speed_stats = {}
|
speed_stats = {}
|
||||||
time5 = time.perf_counter()
|
time5 = time.perf_counter()
|
||||||
# iterator_stop = torch.tensor(0).to(self.device)
|
iterator_stop = torch.tensor(0).to(self.device)
|
||||||
|
|
||||||
dataloader_train.batch_sampler.set_epoch(epoch)
|
dataloader_train.batch_sampler.set_epoch(epoch)
|
||||||
for batch_idx, batch in enumerate(dataloader_train):
|
for batch_idx, batch in enumerate(dataloader_train):
|
||||||
# if self.use_ddp or self.use_fsdp:
|
if self.use_ddp or self.use_fsdp:
|
||||||
# dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
|
dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
|
||||||
# if iterator_stop > 0:
|
if iterator_stop > 0:
|
||||||
# break
|
break
|
||||||
self.batch_total += 1
|
self.batch_total += 1
|
||||||
time1 = time.perf_counter()
|
time1 = time.perf_counter()
|
||||||
speed_stats["data_load"] = f"{time1-time5:0.3f}"
|
speed_stats["data_load"] = f"{time1-time5:0.3f}"
|
||||||
@ -340,7 +340,7 @@ class Trainer:
|
|||||||
|
|
||||||
speed_stats["total_time"] = total_time
|
speed_stats["total_time"] = total_time
|
||||||
lr = scheduler.get_last_lr()[0]
|
lr = scheduler.get_last_lr()[0]
|
||||||
batch_num_epoch = -1
|
batch_num_epoch = 1
|
||||||
if hasattr(dataloader_train, "__len__"):
|
if hasattr(dataloader_train, "__len__"):
|
||||||
batch_num_epoch = len(dataloader_train)
|
batch_num_epoch = len(dataloader_train)
|
||||||
self.log(epoch, batch_idx,
|
self.log(epoch, batch_idx,
|
||||||
@ -364,14 +364,16 @@ class Trainer:
|
|||||||
if (batch_idx+1) % self.save_checkpoint_interval == 0:
|
if (batch_idx+1) % self.save_checkpoint_interval == 0:
|
||||||
self.save_checkpoint(epoch, model=model, optim=optim, scheduler=scheduler, scaler=scaler, step=batch_idx+1)
|
self.save_checkpoint(epoch, model=model, optim=optim, scheduler=scheduler, scaler=scaler, step=batch_idx+1)
|
||||||
|
|
||||||
# else:
|
else:
|
||||||
# if self.use_ddp or self.use_fsdp:
|
if self.use_ddp or self.use_fsdp:
|
||||||
# iterator_stop.fill_(1)
|
iterator_stop.fill_(1)
|
||||||
# dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
|
dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
|
||||||
|
|
||||||
if self.use_ddp or self.use_fsdp:
|
if self.use_ddp or self.use_fsdp:
|
||||||
dist.barrier()
|
dist.barrier()
|
||||||
|
|
||||||
|
iterator_stop = torch.tensor(0).to(self.device)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def validate_epoch(self,
|
def validate_epoch(self,
|
||||||
@ -397,7 +399,7 @@ class Trainer:
|
|||||||
|
|
||||||
speed_stats = {}
|
speed_stats = {}
|
||||||
time5 = time.perf_counter()
|
time5 = time.perf_counter()
|
||||||
# iterator_stop = torch.tensor(0).to(self.device)
|
iterator_stop = torch.tensor(0).to(self.device)
|
||||||
dataloader_val.batch_sampler.set_epoch(epoch)
|
dataloader_val.batch_sampler.set_epoch(epoch)
|
||||||
for batch_idx, batch in enumerate(dataloader_val):
|
for batch_idx, batch in enumerate(dataloader_val):
|
||||||
# if self.use_ddp or self.use_fsdp:
|
# if self.use_ddp or self.use_fsdp:
|
||||||
@ -442,7 +444,7 @@ class Trainer:
|
|||||||
self.val_loss_avg = val_loss_avg.detach().cpu().item() / self.world_size
|
self.val_loss_avg = val_loss_avg.detach().cpu().item() / self.world_size
|
||||||
self.val_acc_avg = val_acc_avg.detach().cpu().item() / self.world_size
|
self.val_acc_avg = val_acc_avg.detach().cpu().item() / self.world_size
|
||||||
|
|
||||||
batch_num_epoch = -1
|
batch_num_epoch = 1
|
||||||
if hasattr(dataloader_val, "__len__"):
|
if hasattr(dataloader_val, "__len__"):
|
||||||
batch_num_epoch = len(dataloader_val)
|
batch_num_epoch = len(dataloader_val)
|
||||||
self.log(epoch, batch_idx,
|
self.log(epoch, batch_idx,
|
||||||
@ -455,16 +457,17 @@ class Trainer:
|
|||||||
tag="val",
|
tag="val",
|
||||||
)
|
)
|
||||||
|
|
||||||
# else:
|
else:
|
||||||
# if self.use_ddp or self.use_fsdp:
|
if self.use_ddp or self.use_fsdp:
|
||||||
# iterator_stop.fill_(1)
|
iterator_stop.fill_(1)
|
||||||
# dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
|
dist.all_reduce(iterator_stop, dist.ReduceOp.SUM)
|
||||||
|
|
||||||
self.val_acc_list.append(self.val_acc_avg)
|
self.val_acc_list.append(self.val_acc_avg)
|
||||||
model.train()
|
model.train()
|
||||||
|
|
||||||
if self.use_ddp or self.use_fsdp:
|
if self.use_ddp or self.use_fsdp:
|
||||||
dist.barrier()
|
dist.barrier()
|
||||||
|
iterator_stop = torch.tensor(0).to(self.device)
|
||||||
|
|
||||||
|
|
||||||
def log(self,
|
def log(self,
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user