mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
update
This commit is contained in:
parent
b7e34131d2
commit
189b51d42b
@ -180,7 +180,6 @@ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
|
||||
--input_size $feats_dim \
|
||||
--ngpu $gpu_num \
|
||||
--num_worker_count $count \
|
||||
--multiprocessing_distributed true \
|
||||
--dist_init_method $init_method \
|
||||
--dist_world_size $world_size \
|
||||
--dist_rank $rank \
|
||||
|
||||
@ -77,6 +77,12 @@ def get_parser():
|
||||
help="Whether to use the find_unused_parameters in "
|
||||
"torch.nn.parallel.DistributedDataParallel ",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--gpu_id",
|
||||
type=int,
|
||||
default=0,
|
||||
help="local gpu id.",
|
||||
)
|
||||
|
||||
# cudnn related
|
||||
parser.add_argument(
|
||||
@ -399,6 +405,7 @@ if __name__ == '__main__':
|
||||
torch.backends.cudnn.deterministic = args.cudnn_deterministic
|
||||
|
||||
# ddp init
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
|
||||
args.distributed = args.dist_world_size > 1
|
||||
distributed_option = build_distributed(args)
|
||||
|
||||
|
||||
@ -62,7 +62,7 @@ class SequenceIterFactory(AbsIterFactory):
|
||||
# sampler
|
||||
dataset_conf = args.dataset_conf
|
||||
batch_sampler = LengthBatchSampler(
|
||||
batch_bins=dataset_conf["batch_size"],
|
||||
batch_bins=dataset_conf["batch_size"] * args.ngpu,
|
||||
shape_files=shape_files,
|
||||
sort_in_batch=dataset_conf["sort_in_batch"] if hasattr(dataset_conf, "sort_in_batch") else "descending",
|
||||
sort_batch=dataset_conf["sort_batch"] if hasattr(dataset_conf, "sort_batch") else "ascending",
|
||||
|
||||
Loading…
Reference in New Issue
Block a user