def __call__(self, global_step, backbone: paddle.nn.Layer, partial_fc: PartialFC = None): if global_step > 100 and self.rank is 0: paddle.save(backbone.state_dict(), os.path.join(self.output, "backbone.pdparams")) if global_step > 100 and partial_fc is not None: partial_fc.save_params()
def __call__(self, global_step, backbone: torch.nn.Module, partial_fc: PartialFC = None): if global_step > 100 and self.rank is 0: torch.save(backbone.module.state_dict(), os.path.join(self.output, "backbone.pth")) if global_step > 100 and partial_fc is not None: partial_fc.save_params()
def main(args): dist.init_process_group(backend='nccl', init_method='env://') local_rank = args.local_rank torch.cuda.set_device(local_rank) rank = dist.get_rank() world_size = dist.get_world_size() if not os.path.exists(cfg.output) and rank is 0: os.makedirs(cfg.output) else: time.sleep(2) log_root = logging.getLogger() init_logging(log_root, rank, cfg.output) trainset = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank) train_sampler = torch.utils.data.distributed.DistributedSampler( trainset, shuffle=True) train_loader = DataLoaderX(local_rank=local_rank, dataset=trainset, batch_size=cfg.batch_size, sampler=train_sampler, num_workers=0, pin_memory=True, drop_last=True) dropout = 0.4 if cfg.dataset is "webface" else 0 backbone = eval("backbones.{}".format(args.network))( False, dropout=dropout, fp16=cfg.fp16).to(local_rank) if args.resume: try: backbone_pth = os.path.join(cfg.output, "backbone.pth") backbone.load_state_dict( torch.load(backbone_pth, map_location=torch.device(local_rank))) if rank is 0: logging.info("backbone resume successfully!") except (FileNotFoundError, KeyError, IndexError, RuntimeError): logging.info("resume fail, backbone init successfully!") for ps in backbone.parameters(): dist.broadcast(ps, 0) backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[local_rank]) backbone.train() margin_softmax = eval("losses.{}".format(args.loss))() module_partial_fc = PartialFC(rank=rank, local_rank=local_rank, world_size=world_size, resume=args.resume, batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output) opt_backbone = torch.optim.SGD(params=[{ 'params': backbone.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) opt_pfc = torch.optim.SGD(params=[{ 'params': module_partial_fc.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) scheduler_backbone = torch.optim.lr_scheduler.LambdaLR( optimizer=opt_backbone, lr_lambda=cfg.lr_func) scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(optimizer=opt_pfc, lr_lambda=cfg.lr_func) start_epoch = 0 total_step = int( len(trainset) / cfg.batch_size / world_size * cfg.num_epoch) if rank is 0: logging.info("Total Step is: %d" % total_step) callback_verification = CallBackVerification(2000, rank, cfg.val_targets, cfg.rec) callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, None) callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) loss = AverageMeter() global_step = 0 grad_scaler = MaxClipGradScaler( cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None for epoch in range(start_epoch, cfg.num_epoch): train_sampler.set_epoch(epoch) for step, (img, label) in enumerate(train_loader): global_step += 1 features = F.normalize(backbone(img)) x_grad, loss_v = module_partial_fc.forward_backward( label, features, opt_pfc) if cfg.fp16: features.backward(grad_scaler.scale(x_grad)) grad_scaler.unscale_(opt_backbone) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) grad_scaler.step(opt_backbone) grad_scaler.update() else: features.backward(x_grad) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) opt_backbone.step() opt_pfc.step() module_partial_fc.update() opt_backbone.zero_grad() opt_pfc.zero_grad() loss.update(loss_v, 1) callback_logging(global_step, loss, epoch, cfg.fp16, grad_scaler) callback_verification(global_step, backbone) callback_checkpoint(global_step, backbone, module_partial_fc) scheduler_backbone.step() scheduler_pfc.step() dist.destroy_process_group()
def main(args): world_size = int(1.0) rank = int(0.0) if not os.path.exists(args.output): os.makedirs(args.output) else: time.sleep(2) writer = LogWriter(logdir=args.logdir) trainset = CommonDataset(root_dir=cfg.data_dir, label_file=cfg.file_list, is_bin=args.is_bin) train_loader = DataLoader( dataset=trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=0) backbone = eval("backbones.{}".format(args.network))() backbone.train() clip_by_norm = ClipGradByNorm(5.0) margin_softmax = eval("losses.{}".format(args.loss))() module_partial_fc = PartialFC( rank=0, world_size=1, resume=0, batch_size=args.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=args.embedding_size, prefix=args.output) scheduler_backbone_decay = paddle.optimizer.lr.LambdaDecay( learning_rate=args.lr, lr_lambda=cfg.lr_func, verbose=True) scheduler_backbone = paddle.optimizer.lr.LinearWarmup( learning_rate=scheduler_backbone_decay, warmup_steps=cfg.warmup_epoch, start_lr=0, end_lr=args.lr / 512 * args.batch_size, verbose=True) opt_backbone = paddle.optimizer.Momentum( parameters=backbone.parameters(), learning_rate=scheduler_backbone, momentum=0.9, weight_decay=args.weight_decay, grad_clip=clip_by_norm) scheduler_pfc_decay = paddle.optimizer.lr.LambdaDecay( learning_rate=args.lr, lr_lambda=cfg.lr_func, verbose=True) scheduler_pfc = paddle.optimizer.lr.LinearWarmup( learning_rate=scheduler_pfc_decay, warmup_steps=cfg.warmup_epoch, start_lr=0, end_lr=args.lr / 512 * args.batch_size, verbose=True) opt_pfc = paddle.optimizer.Momentum( parameters=module_partial_fc.parameters(), learning_rate=scheduler_pfc, momentum=0.9, weight_decay=args.weight_decay, grad_clip=clip_by_norm) start_epoch = 0 total_step = int( len(trainset) / args.batch_size / world_size * cfg.num_epoch) if rank == 0: print("Total Step is: %d" % total_step) callback_verification = CallBackVerification(2000, rank, cfg.val_targets, cfg.data_dir) callback_logging = CallBackLogging(10, rank, total_step, args.batch_size, world_size, writer) callback_checkpoint = CallBackModelCheckpoint(rank, args.output, args.network) loss = AverageMeter() global_step = 0 for epoch in range(start_epoch, cfg.num_epoch): for step, (img, label) in enumerate(train_loader): label = label.flatten() global_step += 1 sys.stdout.flush() features = F.normalize(backbone(img)) x_grad, loss_v = module_partial_fc.forward_backward( label, features, opt_pfc) sys.stdout.flush() (features.multiply(x_grad)).backward() sys.stdout.flush() opt_backbone.step() opt_pfc.step() module_partial_fc.update() opt_backbone.clear_gradients() opt_pfc.clear_gradients() sys.stdout.flush() lr_backbone_value = opt_backbone._global_learning_rate().numpy()[0] lr_pfc_value = opt_backbone._global_learning_rate().numpy()[0] loss.update(loss_v, 1) callback_logging(global_step, loss, epoch, lr_backbone_value, lr_pfc_value) sys.stdout.flush() callback_verification(global_step, backbone) callback_checkpoint(global_step, backbone, module_partial_fc) scheduler_backbone.step() scheduler_pfc.step() writer.close()
def main(args): world_size = int(1.0) rank = int(0.0) local_rank = args.local_rank if not os.path.exists(cfg.output): os.makedirs(cfg.output) else: time.sleep(2) if not os.path.exists(cfg.output): os.makedirs(cfg.output) else: time.sleep(2) writer = LogWriter(logdir=cfg.logdir) trainset = MXFaceDataset(root_dir=cfg.rec) train_loader = DataLoader(dataset=trainset, batch_size=cfg.batch_size, shuffle=True, drop_last=True, num_workers=0) dropout = 0.4 if cfg.dataset == "webface" else 0 backbone = eval("backbones.{}".format(args.network))(False, dropout=0.5, fp16=False) backbone.train() clip_by_norm = ClipGradByNorm(5.0) margin_softmax = eval("losses.{}".format(args.loss))() module_partial_fc = PartialFC(rank=0, local_rank=0, world_size=1, resume=0, batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output) scheduler_backbone = paddle.optimizer.lr.LambdaDecay(learning_rate=cfg.lr / 512 * cfg.batch_size, lr_lambda=cfg.lr_func, verbose=True) opt_backbone = paddle.optimizer.SGD(parameters=backbone.parameters(), learning_rate=scheduler_backbone, weight_decay=cfg.weight_decay, grad_clip=clip_by_norm) scheduler_pfc = paddle.optimizer.lr.LambdaDecay(learning_rate=cfg.lr / 512 * cfg.batch_size, lr_lambda=cfg.lr_func, verbose=True) opt_pfc = paddle.optimizer.SGD(parameters=module_partial_fc.parameters(), learning_rate=scheduler_pfc, weight_decay=cfg.weight_decay, grad_clip=clip_by_norm) start_epoch = 0 total_step = int( len(trainset) / cfg.batch_size / world_size * cfg.num_epoch) if rank == 0: print("Total Step is: %d" % total_step) callback_verification = CallBackVerification(2000, rank, cfg.val_targets, cfg.rec) callback_logging = CallBackLogging(100, rank, total_step, cfg.batch_size, world_size, writer) callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) loss = AverageMeter() global_step = 0 grad_scaler = MaxClipGradScaler( cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None for epoch in range(start_epoch, cfg.num_epoch): for step, (img, label) in enumerate(train_loader): label = label.flatten() global_step += 1 features = F.normalize(backbone(img)) x_grad, loss_v = module_partial_fc.forward_backward( label, features, opt_pfc) if cfg.fp16: scaled = grad_scaler.scale(x_grad) (features.multiply(scaled)).backward() grad_scaler._unscale(opt_backbone) grad_scaler.minimize(opt_backbone, scaled) else: (features.multiply(x_grad)).backward() opt_backbone.step() opt_pfc.step() module_partial_fc.update() opt_backbone.clear_gradients() opt_pfc.clear_gradients() loss.update(loss_v, 1) callback_logging(global_step, loss, epoch, cfg.fp16, grad_scaler) callback_verification(global_step, backbone) callback_checkpoint(global_step, backbone, module_partial_fc) scheduler_backbone.step() scheduler_pfc.step() writer.close()
def main(args): os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" cfg = get_config(args.config) try: world_size = int(os.environ['WORLD_SIZE']) rank = int(os.environ['RANK']) dist.init_process_group('nccl') except KeyError: world_size = 1 rank = 0 dist.init_process_group(backend='nccl', init_method="tcp://127.0.0.1:12584", rank=rank, world_size=world_size) local_rank = args.local_rank torch.cuda.set_device(local_rank) os.makedirs(cfg.output, exist_ok=True) init_logging(rank, cfg.output) if cfg.rec == "synthetic": train_set = SyntheticDataset(local_rank=local_rank) else: train_set = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank) train_sampler = torch.utils.data.distributed.DistributedSampler( train_set, shuffle=True) train_loader = DataLoaderX(local_rank=local_rank, dataset=train_set, batch_size=cfg.batch_size, sampler=train_sampler, num_workers=2, pin_memory=True, drop_last=True) backbone = get_model(cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).to(local_rank) summary(backbone, input_size=(3, 112, 112)) exit() if cfg.resume: try: backbone_pth = os.path.join(cfg.output, "backbone.pth") backbone.load_state_dict( torch.load(backbone_pth, map_location=torch.device(local_rank))) if rank == 0: logging.info("backbone resume successfully!") except (FileNotFoundError, KeyError, IndexError, RuntimeError): if rank == 0: logging.info("resume fail, backbone init successfully!") backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[local_rank]) backbone.train() if cfg.loss == 'magface': margin_softmax = losses.get_loss(cfg.loss, lambda_g=cfg.lambda_g) elif cfg.loss == 'mag_cosface': margin_softmax = losses.get_loss(cfg.loss) else: margin_softmax = losses.get_loss(cfg.loss, s=cfg.s, m1=cfg.m1, m2=cfg.m2, m3=cfg.m3) module_partial_fc = PartialFC(rank=rank, local_rank=local_rank, world_size=world_size, resume=cfg.resume, batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output) opt_backbone = torch.optim.SGD(params=[{ 'params': backbone.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) opt_pfc = torch.optim.SGD(params=[{ 'params': module_partial_fc.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) num_image = len(train_set) total_batch_size = cfg.batch_size * world_size cfg.warmup_step = num_image // total_batch_size * cfg.warmup_epoch cfg.total_step = num_image // total_batch_size * cfg.num_epoch def lr_step_func(current_step): cfg.decay_step = [ x * num_image // total_batch_size for x in cfg.decay_epoch ] if current_step < cfg.warmup_step: return current_step / cfg.warmup_step else: return 0.1**len([m for m in cfg.decay_step if m <= current_step]) scheduler_backbone = torch.optim.lr_scheduler.LambdaLR( optimizer=opt_backbone, lr_lambda=lr_step_func) scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(optimizer=opt_pfc, lr_lambda=lr_step_func) for key, value in cfg.items(): num_space = 25 - len(key) logging.info(": " + key + " " * num_space + str(value)) val_target = cfg.val_targets callback_verification = CallBackVerification(2000, rank, val_target, cfg.rec) callback_logging = CallBackLogging(50, rank, cfg.total_step, cfg.batch_size, world_size, None) callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) loss = AverageMeter() start_epoch = 0 global_step = 0 grad_amp = MaxClipGradScaler( cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None for epoch in range(start_epoch, cfg.num_epoch): train_sampler.set_epoch(epoch) for step, (img, label) in enumerate(train_loader): global_step += 1 x = backbone(img) features = F.normalize(x) x_grad, loss_v = module_partial_fc.forward_backward( label, features, opt_pfc, x) if cfg.fp16: features.backward(grad_amp.scale(x_grad)) grad_amp.unscale_(opt_backbone) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) grad_amp.step(opt_backbone) grad_amp.update() else: features.backward(x_grad) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) opt_backbone.step() opt_pfc.step() module_partial_fc.update() opt_backbone.zero_grad() opt_pfc.zero_grad() loss.update(loss_v, 1) callback_logging(global_step, loss, epoch, cfg.fp16, scheduler_backbone.get_last_lr()[0], grad_amp) callback_verification(global_step, backbone) scheduler_backbone.step() scheduler_pfc.step() callback_checkpoint(global_step, backbone, module_partial_fc) callback_verification('last', backbone) dist.destroy_process_group()
def main(args): torch.cuda.set_device(args.local_rank) cfg = get_config(args.config) os.makedirs(cfg.output, exist_ok=True) init_logging(rank, cfg.output) summary_writer = (SummaryWriter( log_dir=os.path.join(cfg.output, "tensorboard")) if rank == 0 else None) train_loader = get_dataloader(cfg.rec, local_rank=args.local_rank, batch_size=cfg.batch_size, dali=cfg.dali) backbone = get_model(cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).cuda() backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[args.local_rank]) backbone.train() if cfg.loss == "arcface": margin_loss = ArcFace() elif cfg.loss == "cosface": margin_loss = CosFace() else: raise module_partial_fc = PartialFC(margin_loss, cfg.embedding_size, cfg.num_classes, cfg.sample_rate, cfg.fp16) module_partial_fc.train().cuda() # TODO the params of partial fc must be last in the params list opt = torch.optim.SGD(params=[ { "params": backbone.parameters(), }, { "params": module_partial_fc.parameters(), }, ], lr=cfg.lr, momentum=0.9, weight_decay=cfg.weight_decay) total_batch_size = cfg.batch_size * world_size cfg.warmup_step = cfg.num_image // total_batch_size * cfg.warmup_epoch cfg.total_step = cfg.num_image // total_batch_size * cfg.num_epoch lr_scheduler = PolyScheduler(optimizer=opt, base_lr=cfg.lr, max_steps=cfg.total_step, warmup_steps=cfg.warmup_step) for key, value in cfg.items(): num_space = 25 - len(key) logging.info(": " + key + " " * num_space + str(value)) callback_verification = CallBackVerification(val_targets=cfg.val_targets, rec_prefix=cfg.rec, summary_writer=summary_writer) callback_logging = CallBackLogging(frequent=cfg.frequent, total_step=cfg.total_step, batch_size=cfg.batch_size, writer=summary_writer) loss_am = AverageMeter() start_epoch = 0 global_step = 0 amp = torch.cuda.amp.grad_scaler.GradScaler(growth_interval=100) for epoch in range(start_epoch, cfg.num_epoch): if isinstance(train_loader, DataLoader): train_loader.sampler.set_epoch(epoch) for _, (img, local_labels) in enumerate(train_loader): global_step += 1 local_embeddings = backbone(img) loss: torch.Tensor = module_partial_fc(local_embeddings, local_labels, opt) if cfg.fp16: amp.scale(loss).backward() amp.unscale_(opt) torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) amp.step(opt) amp.update() else: loss.backward() torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) opt.step() opt.zero_grad() lr_scheduler.step() with torch.no_grad(): loss_am.update(loss.item(), 1) callback_logging(global_step, loss_am, epoch, cfg.fp16, lr_scheduler.get_last_lr()[0], amp) if global_step % cfg.verbose == 0 and global_step > 200: callback_verification(global_step, backbone) path_pfc = os.path.join(cfg.output, "softmax_fc_gpu_{}.pt".format(rank)) torch.save(module_partial_fc.state_dict(), path_pfc) if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) if cfg.dali: train_loader.reset() if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) distributed.destroy_process_group()
def main(args): seed = 2333 seed = seed + rank torch.manual_seed(seed) np.random.seed(seed) torch.cuda.set_device(args.local_rank) cfg = get_config(args.config) os.makedirs(cfg.output, exist_ok=True) init_logging(rank, cfg.output) summary_writer = (SummaryWriter( log_dir=os.path.join(cfg.output, "tensorboard")) if rank == 0 else None) train_loader = get_dataloader(cfg.rec, local_rank=args.local_rank, batch_size=cfg.batch_size, dali=cfg.dali) backbone = get_model(cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).cuda() backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[args.local_rank], bucket_cap_mb=16, find_unused_parameters=True) backbone.train() # FIXME using gradient checkpoint if there are some unused parameters will cause error backbone._set_static_graph() margin_loss = CombinedMarginLoss(64, cfg.margin_list[0], cfg.margin_list[1], cfg.margin_list[2], cfg.interclass_filtering_threshold) if cfg.optimizer == "sgd": module_partial_fc = PartialFC(margin_loss, cfg.embedding_size, cfg.num_classes, cfg.sample_rate, cfg.fp16) module_partial_fc.train().cuda() opt = torch.optim.SGD(params=[{ "params": backbone.parameters() }, { "params": module_partial_fc.parameters() }], lr=cfg.lr, momentum=0.9, weight_decay=cfg.weight_decay) elif cfg.optimizer == "adamw": module_partial_fc = PartialFCAdamW(margin_loss, cfg.embedding_size, cfg.num_classes, cfg.sample_rate, cfg.fp16) module_partial_fc.train().cuda() opt = torch.optim.AdamW(params=[{ "params": backbone.parameters() }, { "params": module_partial_fc.parameters() }], lr=cfg.lr, weight_decay=cfg.weight_decay) else: raise cfg.total_batch_size = cfg.batch_size * world_size cfg.warmup_step = cfg.num_image // cfg.total_batch_size * cfg.warmup_epoch cfg.total_step = cfg.num_image // cfg.total_batch_size * cfg.num_epoch lr_scheduler = PolyScheduler(optimizer=opt, base_lr=cfg.lr, max_steps=cfg.total_step, warmup_steps=cfg.warmup_step) for key, value in cfg.items(): num_space = 25 - len(key) logging.info(": " + key + " " * num_space + str(value)) callback_verification = CallBackVerification(val_targets=cfg.val_targets, rec_prefix=cfg.rec, summary_writer=summary_writer) callback_logging = CallBackLogging(frequent=cfg.frequent, total_step=cfg.total_step, batch_size=cfg.batch_size, writer=summary_writer) loss_am = AverageMeter() start_epoch = 0 global_step = 0 amp = torch.cuda.amp.grad_scaler.GradScaler(growth_interval=100) for epoch in range(start_epoch, cfg.num_epoch): if isinstance(train_loader, DataLoader): train_loader.sampler.set_epoch(epoch) for _, (img, local_labels) in enumerate(train_loader): global_step += 1 local_embeddings = backbone(img) loss: torch.Tensor = module_partial_fc(local_embeddings, local_labels, opt) if cfg.fp16: amp.scale(loss).backward() amp.unscale_(opt) torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) amp.step(opt) amp.update() else: loss.backward() torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) opt.step() opt.zero_grad() lr_scheduler.step() with torch.no_grad(): loss_am.update(loss.item(), 1) callback_logging(global_step, loss_am, epoch, cfg.fp16, lr_scheduler.get_last_lr()[0], amp) if global_step % cfg.verbose == 0 and global_step > 200: callback_verification(global_step, backbone) path_pfc = os.path.join(cfg.output, "softmax_fc_gpu_{}.pt".format(rank)) torch.save(module_partial_fc.state_dict(), path_pfc) if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) if cfg.dali: train_loader.reset() if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) from torch2onnx import convert_onnx convert_onnx(backbone.module.cpu().eval(), path_module, os.path.join(cfg.output, "model.onnx")) distributed.destroy_process_group()