mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
Merge pull request #1161 from alibaba-damo-academy/dev_lhn
fix loss normalization for ddp training
This commit is contained in:
commit
202ab8a2c9
@ -442,7 +442,7 @@ class UniASR(FunASRModel):
|
||||
stats["loss"] = torch.clone(loss.detach())
|
||||
# force_gatherable: to-device and to-tensor if scalar for DataParallel
|
||||
if self.length_normalized_loss:
|
||||
batch_size = (text_lengths + 1).sum().type_as(batch_size)
|
||||
batch_size = int((text_lengths + 1).sum())
|
||||
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
|
||||
return loss, stats, weight
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user