def main(args): cfg = get_config(args.config) logging.basicConfig(level=logging.NOTSET) logging.info(args.model_path) backbone = get_model(cfg.network, dropout=0.0, num_features=cfg.embedding_size).to("cuda") val_callback = CallBackVerification(1, 0, cfg.val_targets, cfg.ofrecord_path) state_dict = flow.load(args.model_path) new_parameters = dict() for key, value in state_dict.items(): if "num_batches_tracked" not in key: if key == "fc.weight": continue new_key = key.replace("backbone.", "") new_parameters[new_key] = value backbone.load_state_dict(new_parameters) infer_graph = EvalGraph(backbone, cfg) val_callback(1000, backbone, infer_graph)
def __init__(self, cfg, placement, load_path, world_size, rank): self.placement = placement self.load_path = load_path self.cfg = cfg self.world_size = world_size self.rank = rank # model self.backbone = get_model(cfg.network, dropout=0.0, num_features=cfg.embedding_size).to("cuda") self.train_module = Train_Module(cfg, self.backbone, self.placement, world_size).to("cuda") if cfg.resume: if load_path is not None: self.load_state_dict() else: logging.info("Model resume failed! load path is None ") # optimizer self.optimizer = make_optimizer(cfg, self.train_module) # data self.train_data_loader = make_data_loader(cfg, 'train', self.cfg.graph, self.cfg.synthetic) # loss if cfg.loss == "cosface": self.margin_softmax = flow.nn.CombinedMarginLoss(1, 0., 0.4).to("cuda") else: self.margin_softmax = flow.nn.CombinedMarginLoss(1, 0.5, 0.).to("cuda") self.of_cross_entropy = CrossEntropyLoss_sbp() # lr_scheduler self.decay_step = self.cal_decay_step() self.scheduler = flow.optim.lr_scheduler.MultiStepLR( optimizer=self.optimizer, milestones=self.decay_step, gamma=0.1) # log self.callback_logging = CallBackLogging(50, rank, cfg.total_step, cfg.batch_size, world_size, None) # val self.callback_verification = CallBackVerification( 600, rank, cfg.val_targets, cfg.ofrecord_path, is_consistent=cfg.graph) # save checkpoint self.callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) self.losses = AverageMeter() self.start_epoch = 0 self.global_step = 0
def main(args): cfg = get_config(args.config) logging.basicConfig(level=logging.NOTSET) logging.info(args.model_path) val_infer = Validator(cfg) val_callback = CallBackVerification(1, cfg.val_targets, cfg.eval_ofrecord_path, image_nums=cfg.val_image_num) val_infer.load_checkpoint(args.model_path) val_callback(1000, val_infer.get_symbol_val_fn)
def main(args): ''' For the CallBackVerification class, you can place you val_dataset, like ["lfw"], also you can use ["lfw", "cplfw", "calfw"]. For the callback_verification function, the batch_size must be divisible by 12000! Cause the length of dataset is 12000. ''' backbone = eval("backbones.{}".format(args.network))() model_params = args.network + '.pdparams' print('INFO:' + args.network + ' chose! ' + model_params + ' loaded!') state_dict = paddle.load(os.path.join(args.checkpoint, model_params)) backbone.set_state_dict(state_dict) callback_verification = CallBackVerification(1, 0, ["lfw", "cfp_fp", "agedb_30"], "MS1M_v2") callback_verification(1, backbone, batch_size=50)
def main(args): ''' For the CallBackVerification class, you can place you val_dataset, like ["lfw"], also you can use ["lfw", "cplfw", "calfw"], my cpu is pool ,so I use ["lfw"] for only one val_dataset. For the callback_verification function, the batch_size must be divisible by 12000! Cause the length of dataset is 12000. ''' backbone = eval("backbones.{}".format(args.network))() res_num = re.sub("\D", "", args.network) for candidate_model in candidate_models: if res_num in candidate_model: model_params = candidate_model break print('INFO:' + args.network + ' chose! ' + model_params + ' loaded!') state_dict = paddle.load(model_params) backbone.set_state_dict(state_dict) callback_verification = CallBackVerification(1, 0, ["lfw", "cplfw", "calfw"], "../faces_emore") callback_verification(1, backbone, batch_size=50)
def main(args): dist.init_process_group(backend='nccl', init_method='env://') local_rank = args.local_rank torch.cuda.set_device(local_rank) rank = dist.get_rank() world_size = dist.get_world_size() if not os.path.exists(cfg.output) and rank is 0: os.makedirs(cfg.output) else: time.sleep(2) log_root = logging.getLogger() init_logging(log_root, rank, cfg.output) trainset = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank) train_sampler = torch.utils.data.distributed.DistributedSampler( trainset, shuffle=True) train_loader = DataLoaderX(local_rank=local_rank, dataset=trainset, batch_size=cfg.batch_size, sampler=train_sampler, num_workers=0, pin_memory=True, drop_last=True) dropout = 0.4 if cfg.dataset is "webface" else 0 backbone = eval("backbones.{}".format(args.network))( False, dropout=dropout, fp16=cfg.fp16).to(local_rank) if args.resume: try: backbone_pth = os.path.join(cfg.output, "backbone.pth") backbone.load_state_dict( torch.load(backbone_pth, map_location=torch.device(local_rank))) if rank is 0: logging.info("backbone resume successfully!") except (FileNotFoundError, KeyError, IndexError, RuntimeError): logging.info("resume fail, backbone init successfully!") for ps in backbone.parameters(): dist.broadcast(ps, 0) backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[local_rank]) backbone.train() margin_softmax = eval("losses.{}".format(args.loss))() module_partial_fc = PartialFC(rank=rank, local_rank=local_rank, world_size=world_size, resume=args.resume, batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output) opt_backbone = torch.optim.SGD(params=[{ 'params': backbone.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) opt_pfc = torch.optim.SGD(params=[{ 'params': module_partial_fc.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) scheduler_backbone = torch.optim.lr_scheduler.LambdaLR( optimizer=opt_backbone, lr_lambda=cfg.lr_func) scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(optimizer=opt_pfc, lr_lambda=cfg.lr_func) start_epoch = 0 total_step = int( len(trainset) / cfg.batch_size / world_size * cfg.num_epoch) if rank is 0: logging.info("Total Step is: %d" % total_step) callback_verification = CallBackVerification(2000, rank, cfg.val_targets, cfg.rec) callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, None) callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) loss = AverageMeter() global_step = 0 grad_scaler = MaxClipGradScaler( cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None for epoch in range(start_epoch, cfg.num_epoch): train_sampler.set_epoch(epoch) for step, (img, label) in enumerate(train_loader): global_step += 1 features = F.normalize(backbone(img)) x_grad, loss_v = module_partial_fc.forward_backward( label, features, opt_pfc) if cfg.fp16: features.backward(grad_scaler.scale(x_grad)) grad_scaler.unscale_(opt_backbone) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) grad_scaler.step(opt_backbone) grad_scaler.update() else: features.backward(x_grad) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) opt_backbone.step() opt_pfc.step() module_partial_fc.update() opt_backbone.zero_grad() opt_pfc.zero_grad() loss.update(loss_v, 1) callback_logging(global_step, loss, epoch, cfg.fp16, grad_scaler) callback_verification(global_step, backbone) callback_checkpoint(global_step, backbone, module_partial_fc) scheduler_backbone.step() scheduler_pfc.step() dist.destroy_process_group()
def main(args): # dist world_size = int(os.environ['WORLD_SIZE']) local_rank = args.local_rank rank = int(os.environ['RANK']) dist_url = "tcp://{}:{}".format(os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"]) dist.init_process_group(backend='nccl', init_method=dist_url, rank=rank, world_size=world_size) torch.cuda.set_device(local_rank) # logging if not os.path.exists(cfg.output) and rank is 0: os.makedirs(cfg.output) else: time.sleep(2) log_root = logging.getLogger() init_logging(log_root, rank, cfg.output) # data trainset = MXFaceDataset(root_dir=cfg.rec) train_sampler = torch.utils.data.distributed.DistributedSampler(trainset) train_loader = DataLoader(trainset, cfg.batch_size, shuffle=False, num_workers=8, pin_memory=True, sampler=train_sampler, drop_last=True) # backbone and DDP f_ = open(args.pruned_info) cfg_ = [int(x) for x in f_.read().split()] f_.close() backbone = backbones.__dict__[args.network](dropout=cfg.dropout, fp16=cfg.fp16, cfg=cfg_) if args.resume: try: backbone_pth = os.path.join(cfg.output, "backbone.pth") backbone.load_state_dict( torch.load(backbone_pth, map_location=torch.device(local_rank))) if rank is 0: logging.info("backbone resume successfully!") except (FileNotFoundError, KeyError, IndexError, RuntimeError): logging.info("resume fail, backbone init successfully!") backbone = backbone.cuda() backbone = torch.nn.SyncBatchNorm.convert_sync_batchnorm(backbone) backbone = DDP(module=backbone, device_ids=[local_rank]) # fc and loss margin_softmax = losses.__dict__[args.loss]() module_partial_fc = PartialFC(rank=rank, local_rank=local_rank, world_size=world_size, resume=args.resume, batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output) # optimizer opt_backbone = torch.optim.SGD(params=[{ 'params': backbone.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) opt_pfc = torch.optim.SGD(params=[{ 'params': module_partial_fc.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) scheduler_backbone = torch.optim.lr_scheduler.LambdaLR( optimizer=opt_backbone, lr_lambda=cfg.lr_func) scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(optimizer=opt_pfc, lr_lambda=cfg.lr_func) # train and valid start_epoch = 0 total_step = int( len(trainset) / cfg.batch_size / world_size * cfg.num_epoch) if rank is 0: logging.info("Total Step is: %d" % total_step) callback_verification = CallBackVerification(2000, rank, cfg.val_targets, cfg.rec) callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, None) callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) loss = AverageMeter() global_step = 0 grad_scaler = MaxClipGradScaler( cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None for epoch in range(start_epoch, cfg.num_epoch): train_sampler.set_epoch(epoch) for step, (img, label) in enumerate(train_loader): img = img.cuda() label = label.cuda() global_step += 1 features = F.normalize(backbone(img)) x_grad, loss_v = module_partial_fc.forward_backward( label, features, opt_pfc) if cfg.fp16: features.backward(grad_scaler.scale(x_grad)) grad_scaler.unscale_(opt_backbone) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) grad_scaler.step(opt_backbone) grad_scaler.update() else: features.backward(x_grad) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) opt_backbone.step() opt_pfc.step() module_partial_fc.update() opt_backbone.zero_grad() opt_pfc.zero_grad() loss.update(loss_v, 1) lr = opt_backbone.state_dict()['param_groups'][0]['lr'] callback_logging(global_step, loss, epoch, cfg.fp16, grad_scaler, lr) callback_verification(global_step, backbone) callback_checkpoint(global_step, backbone, module_partial_fc) scheduler_backbone.step() scheduler_pfc.step() # release dist dist.destroy_process_group()
def main(args): cfg = get_config(args.config) if not cfg.tf32: torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False try: world_size = int(os.environ['WORLD_SIZE']) rank = int(os.environ['RANK']) dist_url = "tcp://{}:{}".format(os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"]) except KeyError: world_size = 1 rank = 0 dist_url = "tcp://127.0.0.1:12584" dist.init_process_group(backend='nccl', init_method=dist_url, rank=rank, world_size=world_size) local_rank = args.local_rank torch.cuda.set_device(local_rank) if not os.path.exists(cfg.output) and rank==0: os.makedirs(cfg.output) else: time.sleep(2) log_root = logging.getLogger() init_logging(log_root, rank, cfg.output) if rank==0: logging.info(args) logging.info(cfg) train_set = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank) train_sampler = torch.utils.data.distributed.DistributedSampler( train_set, shuffle=True) train_loader = DataLoaderX( local_rank=local_rank, dataset=train_set, batch_size=cfg.batch_size, sampler=train_sampler, num_workers=2, pin_memory=True, drop_last=True) dropout = 0.4 if cfg.dataset == "webface" else 0 backbone = get_model(cfg.network, dropout=dropout, fp16=cfg.fp16).to(local_rank) backbone_onnx = get_model(cfg.network, dropout=dropout, fp16=False) if args.resume: try: backbone_pth = os.path.join(cfg.output, "backbone.pth") backbone.load_state_dict(torch.load(backbone_pth, map_location=torch.device(local_rank))) if rank==0: logging.info("backbone resume successfully!") except (FileNotFoundError, KeyError, IndexError, RuntimeError): logging.info("resume fail, backbone init successfully!") for ps in backbone.parameters(): dist.broadcast(ps, 0) backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[local_rank]) backbone.train() cfg_vpl = cfg.vpl vpl_momentum = cfg_vpl['momentum'] if vpl_momentum: backbone_w = get_model(cfg.network, dropout=dropout, fp16=cfg.fp16).to(local_rank) backbone_w.train() for param_b, param_w in zip(backbone.module.parameters(), backbone_w.parameters()): param_w.data.copy_(param_b.data) param_w.requires_grad = False margin_softmax = losses.get_loss(cfg.loss) module_fc = VPL( rank=rank, local_rank=local_rank, world_size=world_size, resume=args.resume, batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output, cfg = cfg_vpl) #print('AAA') opt_backbone = torch.optim.SGD( params=[{'params': backbone.parameters()}], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) opt_pfc = torch.optim.SGD( params=[{'params': module_fc.parameters()}], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) #print('AAA') scheduler_backbone = torch.optim.lr_scheduler.LambdaLR( optimizer=opt_backbone, lr_lambda=cfg.lr_func) scheduler_pfc = torch.optim.lr_scheduler.LambdaLR( optimizer=opt_pfc, lr_lambda=cfg.lr_func) start_epoch = 0 total_step = int(len(train_set) / cfg.batch_size / world_size * cfg.num_epoch) if rank==0: logging.info("Total Step is: %d" % total_step) #for epoch in range(start_epoch, cfg.num_epoch): # _lr = cfg.lr_func(epoch) # logging.info('%d:%f'%(epoch, _lr)) callback_verification = CallBackVerification(10000, rank, cfg.val_targets, cfg.rec) callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, None) callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) loss = AverageMeter() global_step = 0 grad_amp = MaxClipGradScaler(cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None use_batch_shuffle = True alpha = 0.999 for epoch in range(start_epoch, cfg.num_epoch): train_sampler.set_epoch(epoch) for step, (img, label) in enumerate(train_loader): global_step += 1 #img = img.to(memory_format=torch.channels_last) features = F.normalize(backbone(img)) feature_w = None if vpl_momentum: with torch.no_grad(): for param_b, param_w in zip(backbone.module.parameters(), backbone_w.parameters()): param_w.data = param_w.data * alpha + param_b.data * (1. - alpha) if use_batch_shuffle: img_w, idx_unshuffle = batch_shuffle_ddp(img, rank, world_size) feature_w = F.normalize(backbone_w(img_w)) if use_batch_shuffle: feature_w = batch_unshuffle_ddp(feature_w, idx_unshuffle, rank, world_size) feature_w = feature_w.detach() x_grad, loss_v = module_fc.forward_backward(label, features, opt_pfc, feature_w) if cfg.fp16: features.backward(grad_amp.scale(x_grad)) grad_amp.unscale_(opt_backbone) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) grad_amp.step(opt_backbone) grad_amp.update() else: features.backward(x_grad) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) opt_backbone.step() opt_pfc.step() module_fc.update() opt_backbone.zero_grad() opt_pfc.zero_grad() loss.update(loss_v, 1) callback_logging(global_step, loss, epoch, cfg.fp16, grad_amp) callback_verification(global_step, backbone) callback_checkpoint(global_step, backbone, module_fc, backbone_onnx) scheduler_backbone.step() scheduler_pfc.step() dist.destroy_process_group()
def main(args): cfg = get_config(args.config) cfg.device_num_per_node = args.device_num_per_node cfg.total_batch_size = cfg.batch_size * cfg.device_num_per_node * cfg.num_nodes cfg.steps_per_epoch = math.ceil(cfg.num_image / cfg.total_batch_size) cfg.total_step = cfg.num_epoch * cfg.steps_per_epoch cfg.lr_steps = (np.array(cfg.decay_epoch) * cfg.steps_per_epoch).tolist() lr_scales = [0.1, 0.01, 0.001, 0.0001] cfg.lr_scales = lr_scales[:len(cfg.lr_steps)] cfg.output = os.path.join("work_dir", cfg.output, cfg.loss) world_size = cfg.num_nodes os.makedirs(cfg.output, exist_ok=True) log_root = logging.getLogger() init_logging(log_root, cfg.output) flow.config.gpu_device_num(cfg.device_num_per_node) logging.info("gpu num: %d" % cfg.device_num_per_node) if cfg.num_nodes > 1: assert cfg.num_nodes <= len( cfg.node_ips ), "The number of nodes should not be greater than length of node_ips list." flow.env.ctrl_port(12138) nodes = [] for ip in cfg.node_ips: addr_dict = {} addr_dict["addr"] = ip nodes.append(addr_dict) flow.env.machine(nodes) flow.env.log_dir(cfg.output) for key, value in cfg.items(): num_space = 35 - len(key) logging.info(": " + key + " " * num_space + str(value)) train_func = make_train_func(cfg) val_infer = Validator(cfg) callback_verification = CallBackVerification(3000, cfg.val_targets, cfg.eval_ofrecord_path) callback_logging = CallBackLogging(50, cfg.total_step, cfg.total_batch_size, world_size, None) if cfg.resume and os.path.exists(cfg.model_load_dir): logging.info("Loading model from {}".format(cfg.model_load_dir)) variables = flow.checkpoint.get(cfg.model_load_dir) flow.load_variables(variables) start_epoch = 0 global_step = 0 lr = cfg.lr for epoch in range(start_epoch, cfg.num_epoch): for steps in range(cfg.steps_per_epoch): train_func().async_get( callback_logging.metric_cb(global_step, epoch, lr)) callback_verification(global_step, val_infer.get_symbol_val_fn) global_step += 1 if epoch in cfg.decay_epoch: lr *= 0.1 logging.info("lr_steps: %d" % global_step) logging.info("lr change to %f" % lr) # snapshot path = os.path.join(cfg.output, "snapshot_" + str(epoch)) flow.checkpoint.save(path) logging.info("oneflow Model Saved in '{}'".format(path))
def main(args): world_size = int(1.0) rank = int(0.0) if not os.path.exists(args.output): os.makedirs(args.output) else: time.sleep(2) writer = LogWriter(logdir=args.logdir) trainset = CommonDataset(root_dir=cfg.data_dir, label_file=cfg.file_list, is_bin=args.is_bin) train_loader = DataLoader( dataset=trainset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=0) backbone = eval("backbones.{}".format(args.network))() backbone.train() clip_by_norm = ClipGradByNorm(5.0) margin_softmax = eval("losses.{}".format(args.loss))() module_partial_fc = PartialFC( rank=0, world_size=1, resume=0, batch_size=args.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=args.embedding_size, prefix=args.output) scheduler_backbone_decay = paddle.optimizer.lr.LambdaDecay( learning_rate=args.lr, lr_lambda=cfg.lr_func, verbose=True) scheduler_backbone = paddle.optimizer.lr.LinearWarmup( learning_rate=scheduler_backbone_decay, warmup_steps=cfg.warmup_epoch, start_lr=0, end_lr=args.lr / 512 * args.batch_size, verbose=True) opt_backbone = paddle.optimizer.Momentum( parameters=backbone.parameters(), learning_rate=scheduler_backbone, momentum=0.9, weight_decay=args.weight_decay, grad_clip=clip_by_norm) scheduler_pfc_decay = paddle.optimizer.lr.LambdaDecay( learning_rate=args.lr, lr_lambda=cfg.lr_func, verbose=True) scheduler_pfc = paddle.optimizer.lr.LinearWarmup( learning_rate=scheduler_pfc_decay, warmup_steps=cfg.warmup_epoch, start_lr=0, end_lr=args.lr / 512 * args.batch_size, verbose=True) opt_pfc = paddle.optimizer.Momentum( parameters=module_partial_fc.parameters(), learning_rate=scheduler_pfc, momentum=0.9, weight_decay=args.weight_decay, grad_clip=clip_by_norm) start_epoch = 0 total_step = int( len(trainset) / args.batch_size / world_size * cfg.num_epoch) if rank == 0: print("Total Step is: %d" % total_step) callback_verification = CallBackVerification(2000, rank, cfg.val_targets, cfg.data_dir) callback_logging = CallBackLogging(10, rank, total_step, args.batch_size, world_size, writer) callback_checkpoint = CallBackModelCheckpoint(rank, args.output, args.network) loss = AverageMeter() global_step = 0 for epoch in range(start_epoch, cfg.num_epoch): for step, (img, label) in enumerate(train_loader): label = label.flatten() global_step += 1 sys.stdout.flush() features = F.normalize(backbone(img)) x_grad, loss_v = module_partial_fc.forward_backward( label, features, opt_pfc) sys.stdout.flush() (features.multiply(x_grad)).backward() sys.stdout.flush() opt_backbone.step() opt_pfc.step() module_partial_fc.update() opt_backbone.clear_gradients() opt_pfc.clear_gradients() sys.stdout.flush() lr_backbone_value = opt_backbone._global_learning_rate().numpy()[0] lr_pfc_value = opt_backbone._global_learning_rate().numpy()[0] loss.update(loss_v, 1) callback_logging(global_step, loss, epoch, lr_backbone_value, lr_pfc_value) sys.stdout.flush() callback_verification(global_step, backbone) callback_checkpoint(global_step, backbone, module_partial_fc) scheduler_backbone.step() scheduler_pfc.step() writer.close()
def main(args): world_size = int(1.0) rank = int(0.0) local_rank = args.local_rank if not os.path.exists(cfg.output): os.makedirs(cfg.output) else: time.sleep(2) if not os.path.exists(cfg.output): os.makedirs(cfg.output) else: time.sleep(2) writer = LogWriter(logdir=cfg.logdir) trainset = MXFaceDataset(root_dir=cfg.rec) train_loader = DataLoader(dataset=trainset, batch_size=cfg.batch_size, shuffle=True, drop_last=True, num_workers=0) dropout = 0.4 if cfg.dataset == "webface" else 0 backbone = eval("backbones.{}".format(args.network))(False, dropout=0.5, fp16=False) backbone.train() clip_by_norm = ClipGradByNorm(5.0) margin_softmax = eval("losses.{}".format(args.loss))() module_partial_fc = PartialFC(rank=0, local_rank=0, world_size=1, resume=0, batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output) scheduler_backbone = paddle.optimizer.lr.LambdaDecay(learning_rate=cfg.lr / 512 * cfg.batch_size, lr_lambda=cfg.lr_func, verbose=True) opt_backbone = paddle.optimizer.SGD(parameters=backbone.parameters(), learning_rate=scheduler_backbone, weight_decay=cfg.weight_decay, grad_clip=clip_by_norm) scheduler_pfc = paddle.optimizer.lr.LambdaDecay(learning_rate=cfg.lr / 512 * cfg.batch_size, lr_lambda=cfg.lr_func, verbose=True) opt_pfc = paddle.optimizer.SGD(parameters=module_partial_fc.parameters(), learning_rate=scheduler_pfc, weight_decay=cfg.weight_decay, grad_clip=clip_by_norm) start_epoch = 0 total_step = int( len(trainset) / cfg.batch_size / world_size * cfg.num_epoch) if rank == 0: print("Total Step is: %d" % total_step) callback_verification = CallBackVerification(2000, rank, cfg.val_targets, cfg.rec) callback_logging = CallBackLogging(100, rank, total_step, cfg.batch_size, world_size, writer) callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) loss = AverageMeter() global_step = 0 grad_scaler = MaxClipGradScaler( cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None for epoch in range(start_epoch, cfg.num_epoch): for step, (img, label) in enumerate(train_loader): label = label.flatten() global_step += 1 features = F.normalize(backbone(img)) x_grad, loss_v = module_partial_fc.forward_backward( label, features, opt_pfc) if cfg.fp16: scaled = grad_scaler.scale(x_grad) (features.multiply(scaled)).backward() grad_scaler._unscale(opt_backbone) grad_scaler.minimize(opt_backbone, scaled) else: (features.multiply(x_grad)).backward() opt_backbone.step() opt_pfc.step() module_partial_fc.update() opt_backbone.clear_gradients() opt_pfc.clear_gradients() loss.update(loss_v, 1) callback_logging(global_step, loss, epoch, cfg.fp16, grad_scaler) callback_verification(global_step, backbone) callback_checkpoint(global_step, backbone, module_partial_fc) scheduler_backbone.step() scheduler_pfc.step() writer.close()
def main(args): os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" cfg = get_config(args.config) try: world_size = int(os.environ['WORLD_SIZE']) rank = int(os.environ['RANK']) dist.init_process_group('nccl') except KeyError: world_size = 1 rank = 0 dist.init_process_group(backend='nccl', init_method="tcp://127.0.0.1:12584", rank=rank, world_size=world_size) local_rank = args.local_rank torch.cuda.set_device(local_rank) os.makedirs(cfg.output, exist_ok=True) init_logging(rank, cfg.output) if cfg.rec == "synthetic": train_set = SyntheticDataset(local_rank=local_rank) else: train_set = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank) train_sampler = torch.utils.data.distributed.DistributedSampler( train_set, shuffle=True) train_loader = DataLoaderX(local_rank=local_rank, dataset=train_set, batch_size=cfg.batch_size, sampler=train_sampler, num_workers=2, pin_memory=True, drop_last=True) backbone = get_model(cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).to(local_rank) summary(backbone, input_size=(3, 112, 112)) exit() if cfg.resume: try: backbone_pth = os.path.join(cfg.output, "backbone.pth") backbone.load_state_dict( torch.load(backbone_pth, map_location=torch.device(local_rank))) if rank == 0: logging.info("backbone resume successfully!") except (FileNotFoundError, KeyError, IndexError, RuntimeError): if rank == 0: logging.info("resume fail, backbone init successfully!") backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[local_rank]) backbone.train() if cfg.loss == 'magface': margin_softmax = losses.get_loss(cfg.loss, lambda_g=cfg.lambda_g) elif cfg.loss == 'mag_cosface': margin_softmax = losses.get_loss(cfg.loss) else: margin_softmax = losses.get_loss(cfg.loss, s=cfg.s, m1=cfg.m1, m2=cfg.m2, m3=cfg.m3) module_partial_fc = PartialFC(rank=rank, local_rank=local_rank, world_size=world_size, resume=cfg.resume, batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output) opt_backbone = torch.optim.SGD(params=[{ 'params': backbone.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) opt_pfc = torch.optim.SGD(params=[{ 'params': module_partial_fc.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) num_image = len(train_set) total_batch_size = cfg.batch_size * world_size cfg.warmup_step = num_image // total_batch_size * cfg.warmup_epoch cfg.total_step = num_image // total_batch_size * cfg.num_epoch def lr_step_func(current_step): cfg.decay_step = [ x * num_image // total_batch_size for x in cfg.decay_epoch ] if current_step < cfg.warmup_step: return current_step / cfg.warmup_step else: return 0.1**len([m for m in cfg.decay_step if m <= current_step]) scheduler_backbone = torch.optim.lr_scheduler.LambdaLR( optimizer=opt_backbone, lr_lambda=lr_step_func) scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(optimizer=opt_pfc, lr_lambda=lr_step_func) for key, value in cfg.items(): num_space = 25 - len(key) logging.info(": " + key + " " * num_space + str(value)) val_target = cfg.val_targets callback_verification = CallBackVerification(2000, rank, val_target, cfg.rec) callback_logging = CallBackLogging(50, rank, cfg.total_step, cfg.batch_size, world_size, None) callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) loss = AverageMeter() start_epoch = 0 global_step = 0 grad_amp = MaxClipGradScaler( cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None for epoch in range(start_epoch, cfg.num_epoch): train_sampler.set_epoch(epoch) for step, (img, label) in enumerate(train_loader): global_step += 1 x = backbone(img) features = F.normalize(x) x_grad, loss_v = module_partial_fc.forward_backward( label, features, opt_pfc, x) if cfg.fp16: features.backward(grad_amp.scale(x_grad)) grad_amp.unscale_(opt_backbone) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) grad_amp.step(opt_backbone) grad_amp.update() else: features.backward(x_grad) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) opt_backbone.step() opt_pfc.step() module_partial_fc.update() opt_backbone.zero_grad() opt_pfc.zero_grad() loss.update(loss_v, 1) callback_logging(global_step, loss, epoch, cfg.fp16, scheduler_backbone.get_last_lr()[0], grad_amp) callback_verification(global_step, backbone) scheduler_backbone.step() scheduler_pfc.step() callback_checkpoint(global_step, backbone, module_partial_fc) callback_verification('last', backbone) dist.destroy_process_group()
def main(args): torch.cuda.set_device(args.local_rank) cfg = get_config(args.config) os.makedirs(cfg.output, exist_ok=True) init_logging(rank, cfg.output) summary_writer = (SummaryWriter( log_dir=os.path.join(cfg.output, "tensorboard")) if rank == 0 else None) train_loader = get_dataloader(cfg.rec, local_rank=args.local_rank, batch_size=cfg.batch_size, dali=cfg.dali) backbone = get_model(cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).cuda() backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[args.local_rank]) backbone.train() if cfg.loss == "arcface": margin_loss = ArcFace() elif cfg.loss == "cosface": margin_loss = CosFace() else: raise module_partial_fc = PartialFC(margin_loss, cfg.embedding_size, cfg.num_classes, cfg.sample_rate, cfg.fp16) module_partial_fc.train().cuda() # TODO the params of partial fc must be last in the params list opt = torch.optim.SGD(params=[ { "params": backbone.parameters(), }, { "params": module_partial_fc.parameters(), }, ], lr=cfg.lr, momentum=0.9, weight_decay=cfg.weight_decay) total_batch_size = cfg.batch_size * world_size cfg.warmup_step = cfg.num_image // total_batch_size * cfg.warmup_epoch cfg.total_step = cfg.num_image // total_batch_size * cfg.num_epoch lr_scheduler = PolyScheduler(optimizer=opt, base_lr=cfg.lr, max_steps=cfg.total_step, warmup_steps=cfg.warmup_step) for key, value in cfg.items(): num_space = 25 - len(key) logging.info(": " + key + " " * num_space + str(value)) callback_verification = CallBackVerification(val_targets=cfg.val_targets, rec_prefix=cfg.rec, summary_writer=summary_writer) callback_logging = CallBackLogging(frequent=cfg.frequent, total_step=cfg.total_step, batch_size=cfg.batch_size, writer=summary_writer) loss_am = AverageMeter() start_epoch = 0 global_step = 0 amp = torch.cuda.amp.grad_scaler.GradScaler(growth_interval=100) for epoch in range(start_epoch, cfg.num_epoch): if isinstance(train_loader, DataLoader): train_loader.sampler.set_epoch(epoch) for _, (img, local_labels) in enumerate(train_loader): global_step += 1 local_embeddings = backbone(img) loss: torch.Tensor = module_partial_fc(local_embeddings, local_labels, opt) if cfg.fp16: amp.scale(loss).backward() amp.unscale_(opt) torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) amp.step(opt) amp.update() else: loss.backward() torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) opt.step() opt.zero_grad() lr_scheduler.step() with torch.no_grad(): loss_am.update(loss.item(), 1) callback_logging(global_step, loss_am, epoch, cfg.fp16, lr_scheduler.get_last_lr()[0], amp) if global_step % cfg.verbose == 0 and global_step > 200: callback_verification(global_step, backbone) path_pfc = os.path.join(cfg.output, "softmax_fc_gpu_{}.pt".format(rank)) torch.save(module_partial_fc.state_dict(), path_pfc) if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) if cfg.dali: train_loader.reset() if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) distributed.destroy_process_group()
def main(args): seed = 2333 seed = seed + rank torch.manual_seed(seed) np.random.seed(seed) torch.cuda.set_device(args.local_rank) cfg = get_config(args.config) os.makedirs(cfg.output, exist_ok=True) init_logging(rank, cfg.output) summary_writer = (SummaryWriter( log_dir=os.path.join(cfg.output, "tensorboard")) if rank == 0 else None) train_loader = get_dataloader(cfg.rec, local_rank=args.local_rank, batch_size=cfg.batch_size, dali=cfg.dali) backbone = get_model(cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).cuda() backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[args.local_rank], bucket_cap_mb=16, find_unused_parameters=True) backbone.train() # FIXME using gradient checkpoint if there are some unused parameters will cause error backbone._set_static_graph() margin_loss = CombinedMarginLoss(64, cfg.margin_list[0], cfg.margin_list[1], cfg.margin_list[2], cfg.interclass_filtering_threshold) if cfg.optimizer == "sgd": module_partial_fc = PartialFC(margin_loss, cfg.embedding_size, cfg.num_classes, cfg.sample_rate, cfg.fp16) module_partial_fc.train().cuda() opt = torch.optim.SGD(params=[{ "params": backbone.parameters() }, { "params": module_partial_fc.parameters() }], lr=cfg.lr, momentum=0.9, weight_decay=cfg.weight_decay) elif cfg.optimizer == "adamw": module_partial_fc = PartialFCAdamW(margin_loss, cfg.embedding_size, cfg.num_classes, cfg.sample_rate, cfg.fp16) module_partial_fc.train().cuda() opt = torch.optim.AdamW(params=[{ "params": backbone.parameters() }, { "params": module_partial_fc.parameters() }], lr=cfg.lr, weight_decay=cfg.weight_decay) else: raise cfg.total_batch_size = cfg.batch_size * world_size cfg.warmup_step = cfg.num_image // cfg.total_batch_size * cfg.warmup_epoch cfg.total_step = cfg.num_image // cfg.total_batch_size * cfg.num_epoch lr_scheduler = PolyScheduler(optimizer=opt, base_lr=cfg.lr, max_steps=cfg.total_step, warmup_steps=cfg.warmup_step) for key, value in cfg.items(): num_space = 25 - len(key) logging.info(": " + key + " " * num_space + str(value)) callback_verification = CallBackVerification(val_targets=cfg.val_targets, rec_prefix=cfg.rec, summary_writer=summary_writer) callback_logging = CallBackLogging(frequent=cfg.frequent, total_step=cfg.total_step, batch_size=cfg.batch_size, writer=summary_writer) loss_am = AverageMeter() start_epoch = 0 global_step = 0 amp = torch.cuda.amp.grad_scaler.GradScaler(growth_interval=100) for epoch in range(start_epoch, cfg.num_epoch): if isinstance(train_loader, DataLoader): train_loader.sampler.set_epoch(epoch) for _, (img, local_labels) in enumerate(train_loader): global_step += 1 local_embeddings = backbone(img) loss: torch.Tensor = module_partial_fc(local_embeddings, local_labels, opt) if cfg.fp16: amp.scale(loss).backward() amp.unscale_(opt) torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) amp.step(opt) amp.update() else: loss.backward() torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) opt.step() opt.zero_grad() lr_scheduler.step() with torch.no_grad(): loss_am.update(loss.item(), 1) callback_logging(global_step, loss_am, epoch, cfg.fp16, lr_scheduler.get_last_lr()[0], amp) if global_step % cfg.verbose == 0 and global_step > 200: callback_verification(global_step, backbone) path_pfc = os.path.join(cfg.output, "softmax_fc_gpu_{}.pt".format(rank)) torch.save(module_partial_fc.state_dict(), path_pfc) if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) if cfg.dali: train_loader.reset() if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) from torch2onnx import convert_onnx convert_onnx(backbone.module.cpu().eval(), path_module, os.path.join(cfg.output, "model.onnx")) distributed.destroy_process_group()