def main(args): cfg = get_config(args.config) logging.basicConfig(level=logging.NOTSET) logging.info(args.model_path) backbone = get_model(cfg.network, dropout=0.0, num_features=cfg.embedding_size).to("cuda") val_callback = CallBackVerification(1, 0, cfg.val_targets, cfg.ofrecord_path) state_dict = flow.load(args.model_path) new_parameters = dict() for key, value in state_dict.items(): if "num_batches_tracked" not in key: if key == "fc.weight": continue new_key = key.replace("backbone.", "") new_parameters[new_key] = value backbone.load_state_dict(new_parameters) infer_graph = EvalGraph(backbone, cfg) val_callback(1000, backbone, infer_graph)
def convert_func(cfg, model_path, out_path, image_size): model_module = get_model(cfg.network, dropout=0.0, num_features=cfg.embedding_size).to("cuda") model_module.eval() print(model_module) model_graph = ModelGraph(model_module) model_graph._compile(flow.randn(1, 3, image_size, image_size).to("cuda")) with tempfile.TemporaryDirectory() as tmpdirname: new_parameters = dict() parameters = flow.load(model_path) for key, value in parameters.items(): if "num_batches_tracked" not in key: if key == "fc.weight": continue val = value new_key = key.replace("backbone.", "") new_parameters[new_key] = val model_module.load_state_dict(new_parameters) flow.save(model_module.state_dict(), tmpdirname) convert_to_onnx_and_check(model_graph, flow_weight_dir=tmpdirname, onnx_model_path="./", print_outlier=True)
def __init__(self, cfg, placement, load_path, world_size, rank): self.placement = placement self.load_path = load_path self.cfg = cfg self.world_size = world_size self.rank = rank # model self.backbone = get_model(cfg.network, dropout=0.0, num_features=cfg.embedding_size).to("cuda") self.train_module = Train_Module(cfg, self.backbone, self.placement, world_size).to("cuda") if cfg.resume: if load_path is not None: self.load_state_dict() else: logging.info("Model resume failed! load path is None ") # optimizer self.optimizer = make_optimizer(cfg, self.train_module) # data self.train_data_loader = make_data_loader(cfg, 'train', self.cfg.graph, self.cfg.synthetic) # loss if cfg.loss == "cosface": self.margin_softmax = flow.nn.CombinedMarginLoss(1, 0., 0.4).to("cuda") else: self.margin_softmax = flow.nn.CombinedMarginLoss(1, 0.5, 0.).to("cuda") self.of_cross_entropy = CrossEntropyLoss_sbp() # lr_scheduler self.decay_step = self.cal_decay_step() self.scheduler = flow.optim.lr_scheduler.MultiStepLR( optimizer=self.optimizer, milestones=self.decay_step, gamma=0.1) # log self.callback_logging = CallBackLogging(50, rank, cfg.total_step, cfg.batch_size, world_size, None) # val self.callback_verification = CallBackVerification( 600, rank, cfg.val_targets, cfg.ofrecord_path, is_consistent=cfg.graph) # save checkpoint self.callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) self.losses = AverageMeter() self.start_epoch = 0 self.global_step = 0
def inference(weight, name, img): if img is None: img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.uint8) else: img = cv2.imread(img) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = np.transpose(img, (2, 0, 1)) img = torch.from_numpy(img).unsqueeze(0).float() img.div_(255).sub_(0.5).div_(0.5) net = get_model(name, fp16=False) net.load_state_dict(torch.load(weight)) net.eval() feat = net(img).numpy() print(feat)
def __init__(self, prefix, data_shape, batch_size=1): image_size = (112, 112) self.image_size = image_size weight = torch.load(prefix) resnet = get_model(args.network, dropout=0, fp16=False).cuda() resnet.load_state_dict(weight) model = torch.nn.DataParallel(resnet) self.model = model self.model.eval() src = np.array( [[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366], [33.5493, 92.3655], [62.7299, 92.2041]], dtype=np.float32) src[:, 0] += 8.0 self.src = src self.batch_size = batch_size self.data_shape = data_shape
def load_insightface_pytorch_model(self, model_name=None, pytorch_model_path=None, pytorch_weight_path=None, input_shape=(3, 112, 112), train=False): start_time = time.time() if pytorch_model_path is not None: print('\nStarting load insightface pytorch model \'' + str(pytorch_model_path) + '\'...') try: self.pytorch_model = torch.load(pytorch_model_path) except Exception as ex: exception_printer('Load pytorch model failed.') return None elif model_name is not None and pytorch_weight_path is not None: print('\nStarting load insightface pytorch model name: ' + str(model_name) + ', weight: \'' + str(pytorch_weight_path) + '\'...') try: self.pytorch_model = get_model(name=model_name) self.pytorch_model.load_state_dict( torch.load(pytorch_weight_path)) except Exception as ex: exception_printer('Load pytorch weight failed.') return None self.pytorch_model.to(device=self.device) self.pytorch_model.train(train) summary(self.pytorch_model, input_size=input_shape) print('Load pytorch model success. Cost time: ' + str(time.time() - start_time) + 's.') return self.pytorch_model
def get_symbol_train_job(): if cfg.use_synthetic_data: (labels, images) = load_synthetic(cfg) else: labels, images = load_train_dataset(cfg) image_size = images.shape[2:] assert len( image_size) == 2, "The length of image size must be equal to 2." assert image_size[0] == image_size[ 1], "image_size[0] should be equal to image_size[1]." embedding = get_model(cfg.network, images, cfg) def _get_initializer(): return flow.random_normal_initializer(mean=0.0, stddev=0.01) trainable = True if cfg.model_parallel and cfg.device_num_per_node > 1: logging.info("Training is using model parallelism now.") labels = labels.with_distribute(flow.distribute.broadcast()) fc1_distribute = flow.distribute.broadcast() fc7_data_distribute = flow.distribute.split(1) fc7_model_distribute = flow.distribute.split(0) else: fc1_distribute = flow.distribute.split(0) fc7_data_distribute = flow.distribute.split(0) fc7_model_distribute = flow.distribute.broadcast() weight_regularizer = flow.regularizers.l2(0.0005) fc7_weight = flow.get_variable( name="fc7-weight", shape=(cfg.num_classes, embedding.shape[1]), dtype=embedding.dtype, initializer=_get_initializer(), regularizer=weight_regularizer, trainable=trainable, model_name="weight", distribute=fc7_model_distribute, ) if cfg.partial_fc and cfg.model_parallel: logging.info( "Training is using model parallelism and optimized by partial_fc now." ) size = cfg.device_num_per_node * cfg.num_nodes num_local = (cfg.num_classes + size - 1) // size num_sample = int(num_local * cfg.sample_rate) total_num_sample = num_sample * size ( mapped_label, sampled_label, sampled_weight, ) = flow.distributed_partial_fc_sample( weight=fc7_weight, label=labels, num_sample=total_num_sample, ) labels = mapped_label fc7_weight = sampled_weight fc7_weight = flow.math.l2_normalize(input=fc7_weight, axis=1, epsilon=1e-10) fc1 = flow.math.l2_normalize(input=embedding, axis=1, epsilon=1e-10) fc7 = flow.matmul(a=fc1.with_distribute(fc1_distribute), b=fc7_weight, transpose_b=True) fc7 = fc7.with_distribute(fc7_data_distribute) if cfg.loss == "cosface": fc7 = ( flow.combined_margin_loss(fc7, labels, m1=1, m2=0.0, m3=0.4) * 64) elif cfg.loss == "arcface": fc7 = ( flow.combined_margin_loss(fc7, labels, m1=1, m2=0.5, m3=0.0) * 64) else: raise ValueError() fc7 = fc7.with_distribute(fc7_data_distribute) loss = flow.nn.sparse_softmax_cross_entropy_with_logits( labels, fc7, name="softmax_loss") lr_scheduler = flow.optimizer.PiecewiseScalingScheduler( base_lr=cfg.lr, boundaries=cfg.lr_steps, scale=cfg.lr_scales, warmup=None) flow.optimizer.SGD( lr_scheduler, momentum=cfg.momentum if cfg.momentum > 0 else None, ).minimize(loss) return loss
def get_symbol_val_job(images: flow.typing.Numpy.Placeholder( (self.cfg.val_batch_size, 3, 112, 112))): print("val batch data: ", images.shape) embedding = get_model(cfg.network, images, cfg) return embedding
help='backbone network') parser.add_argument('--simplify', type=bool, default=False, help='onnx simplify') args = parser.parse_args() input_file = args.input if os.path.isdir(input_file): input_file = os.path.join(input_file, "backbone.pth") assert os.path.exists(input_file) model_name = os.path.basename(os.path.dirname(input_file)).lower() params = model_name.split("_") if len(params) >= 3 and params[1] in ('arcface', 'cosface'): if args.network is None: args.network = params[2] assert args.network is not None print(args) backbone_onnx = get_model(args.network, dropout=0) output_path = args.output if output_path is None: output_path = os.path.join(os.path.dirname(__file__), 'onnx') if not os.path.exists(output_path): os.makedirs(output_path) assert os.path.isdir(output_path) output_file = os.path.join(output_path, "%s.onnx" % model_name) convert_onnx(backbone_onnx, input_file, output_file, simplify=args.simplify)
def main(args): cfg = get_config(args.config) if not cfg.tf32: torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False try: world_size = int(os.environ['WORLD_SIZE']) rank = int(os.environ['RANK']) dist_url = "tcp://{}:{}".format(os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"]) except KeyError: world_size = 1 rank = 0 dist_url = "tcp://127.0.0.1:12584" dist.init_process_group(backend='nccl', init_method=dist_url, rank=rank, world_size=world_size) local_rank = args.local_rank torch.cuda.set_device(local_rank) if not os.path.exists(cfg.output) and rank==0: os.makedirs(cfg.output) else: time.sleep(2) log_root = logging.getLogger() init_logging(log_root, rank, cfg.output) if rank==0: logging.info(args) logging.info(cfg) train_set = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank) train_sampler = torch.utils.data.distributed.DistributedSampler( train_set, shuffle=True) train_loader = DataLoaderX( local_rank=local_rank, dataset=train_set, batch_size=cfg.batch_size, sampler=train_sampler, num_workers=2, pin_memory=True, drop_last=True) dropout = 0.4 if cfg.dataset == "webface" else 0 backbone = get_model(cfg.network, dropout=dropout, fp16=cfg.fp16).to(local_rank) backbone_onnx = get_model(cfg.network, dropout=dropout, fp16=False) if args.resume: try: backbone_pth = os.path.join(cfg.output, "backbone.pth") backbone.load_state_dict(torch.load(backbone_pth, map_location=torch.device(local_rank))) if rank==0: logging.info("backbone resume successfully!") except (FileNotFoundError, KeyError, IndexError, RuntimeError): logging.info("resume fail, backbone init successfully!") for ps in backbone.parameters(): dist.broadcast(ps, 0) backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[local_rank]) backbone.train() cfg_vpl = cfg.vpl vpl_momentum = cfg_vpl['momentum'] if vpl_momentum: backbone_w = get_model(cfg.network, dropout=dropout, fp16=cfg.fp16).to(local_rank) backbone_w.train() for param_b, param_w in zip(backbone.module.parameters(), backbone_w.parameters()): param_w.data.copy_(param_b.data) param_w.requires_grad = False margin_softmax = losses.get_loss(cfg.loss) module_fc = VPL( rank=rank, local_rank=local_rank, world_size=world_size, resume=args.resume, batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output, cfg = cfg_vpl) #print('AAA') opt_backbone = torch.optim.SGD( params=[{'params': backbone.parameters()}], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) opt_pfc = torch.optim.SGD( params=[{'params': module_fc.parameters()}], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) #print('AAA') scheduler_backbone = torch.optim.lr_scheduler.LambdaLR( optimizer=opt_backbone, lr_lambda=cfg.lr_func) scheduler_pfc = torch.optim.lr_scheduler.LambdaLR( optimizer=opt_pfc, lr_lambda=cfg.lr_func) start_epoch = 0 total_step = int(len(train_set) / cfg.batch_size / world_size * cfg.num_epoch) if rank==0: logging.info("Total Step is: %d" % total_step) #for epoch in range(start_epoch, cfg.num_epoch): # _lr = cfg.lr_func(epoch) # logging.info('%d:%f'%(epoch, _lr)) callback_verification = CallBackVerification(10000, rank, cfg.val_targets, cfg.rec) callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, None) callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) loss = AverageMeter() global_step = 0 grad_amp = MaxClipGradScaler(cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None use_batch_shuffle = True alpha = 0.999 for epoch in range(start_epoch, cfg.num_epoch): train_sampler.set_epoch(epoch) for step, (img, label) in enumerate(train_loader): global_step += 1 #img = img.to(memory_format=torch.channels_last) features = F.normalize(backbone(img)) feature_w = None if vpl_momentum: with torch.no_grad(): for param_b, param_w in zip(backbone.module.parameters(), backbone_w.parameters()): param_w.data = param_w.data * alpha + param_b.data * (1. - alpha) if use_batch_shuffle: img_w, idx_unshuffle = batch_shuffle_ddp(img, rank, world_size) feature_w = F.normalize(backbone_w(img_w)) if use_batch_shuffle: feature_w = batch_unshuffle_ddp(feature_w, idx_unshuffle, rank, world_size) feature_w = feature_w.detach() x_grad, loss_v = module_fc.forward_backward(label, features, opt_pfc, feature_w) if cfg.fp16: features.backward(grad_amp.scale(x_grad)) grad_amp.unscale_(opt_backbone) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) grad_amp.step(opt_backbone) grad_amp.update() else: features.backward(x_grad) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) opt_backbone.step() opt_pfc.step() module_fc.update() opt_backbone.zero_grad() opt_pfc.zero_grad() loss.update(loss_v, 1) callback_logging(global_step, loss, epoch, cfg.fp16, grad_amp) callback_verification(global_step, backbone) callback_checkpoint(global_step, backbone, module_fc, backbone_onnx) scheduler_backbone.step() scheduler_pfc.step() dist.destroy_process_group()
assert check, "Simplified ONNX model could not be validated" onnx.save(model, output) if __name__ == '__main__': import os import argparse from backbones import get_model parser = argparse.ArgumentParser(description='ArcFace PyTorch to onnx') parser.add_argument('input', type=str, help='input backbone.pth file or path') parser.add_argument('--output', type=str, default=None, help='output onnx path') parser.add_argument('--network', type=str, default=None, help='backbone network') parser.add_argument('--simplify', type=bool, default=False, help='onnx simplify') args = parser.parse_args() input_file = args.input if os.path.isdir(input_file): input_file = os.path.join(input_file, "model.pt") assert os.path.exists(input_file) # model_name = os.path.basename(os.path.dirname(input_file)).lower() # params = model_name.split("_") # if len(params) >= 3 and params[1] in ('arcface', 'cosface'): # if args.network is None: # args.network = params[2] assert args.network is not None print(args) backbone_onnx = get_model(args.network, dropout=0.0, fp16=False, num_features=512) if args.output is None: args.output = os.path.join(os.path.dirname(args.input), "model.onnx") convert_onnx(backbone_onnx, input_file, args.output, simplify=args.simplify)
def InferenceNet(images: tp.Numpy.Placeholder((1, 3, 112, 112))): logits = get_model(cfg.network, images, cfg) return logits
def main(args): os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" cfg = get_config(args.config) try: world_size = int(os.environ['WORLD_SIZE']) rank = int(os.environ['RANK']) dist.init_process_group('nccl') except KeyError: world_size = 1 rank = 0 dist.init_process_group(backend='nccl', init_method="tcp://127.0.0.1:12584", rank=rank, world_size=world_size) local_rank = args.local_rank torch.cuda.set_device(local_rank) os.makedirs(cfg.output, exist_ok=True) init_logging(rank, cfg.output) if cfg.rec == "synthetic": train_set = SyntheticDataset(local_rank=local_rank) else: train_set = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank) train_sampler = torch.utils.data.distributed.DistributedSampler( train_set, shuffle=True) train_loader = DataLoaderX(local_rank=local_rank, dataset=train_set, batch_size=cfg.batch_size, sampler=train_sampler, num_workers=2, pin_memory=True, drop_last=True) backbone = get_model(cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).to(local_rank) summary(backbone, input_size=(3, 112, 112)) exit() if cfg.resume: try: backbone_pth = os.path.join(cfg.output, "backbone.pth") backbone.load_state_dict( torch.load(backbone_pth, map_location=torch.device(local_rank))) if rank == 0: logging.info("backbone resume successfully!") except (FileNotFoundError, KeyError, IndexError, RuntimeError): if rank == 0: logging.info("resume fail, backbone init successfully!") backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[local_rank]) backbone.train() if cfg.loss == 'magface': margin_softmax = losses.get_loss(cfg.loss, lambda_g=cfg.lambda_g) elif cfg.loss == 'mag_cosface': margin_softmax = losses.get_loss(cfg.loss) else: margin_softmax = losses.get_loss(cfg.loss, s=cfg.s, m1=cfg.m1, m2=cfg.m2, m3=cfg.m3) module_partial_fc = PartialFC(rank=rank, local_rank=local_rank, world_size=world_size, resume=cfg.resume, batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes, sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output) opt_backbone = torch.optim.SGD(params=[{ 'params': backbone.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) opt_pfc = torch.optim.SGD(params=[{ 'params': module_partial_fc.parameters() }], lr=cfg.lr / 512 * cfg.batch_size * world_size, momentum=0.9, weight_decay=cfg.weight_decay) num_image = len(train_set) total_batch_size = cfg.batch_size * world_size cfg.warmup_step = num_image // total_batch_size * cfg.warmup_epoch cfg.total_step = num_image // total_batch_size * cfg.num_epoch def lr_step_func(current_step): cfg.decay_step = [ x * num_image // total_batch_size for x in cfg.decay_epoch ] if current_step < cfg.warmup_step: return current_step / cfg.warmup_step else: return 0.1**len([m for m in cfg.decay_step if m <= current_step]) scheduler_backbone = torch.optim.lr_scheduler.LambdaLR( optimizer=opt_backbone, lr_lambda=lr_step_func) scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(optimizer=opt_pfc, lr_lambda=lr_step_func) for key, value in cfg.items(): num_space = 25 - len(key) logging.info(": " + key + " " * num_space + str(value)) val_target = cfg.val_targets callback_verification = CallBackVerification(2000, rank, val_target, cfg.rec) callback_logging = CallBackLogging(50, rank, cfg.total_step, cfg.batch_size, world_size, None) callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output) loss = AverageMeter() start_epoch = 0 global_step = 0 grad_amp = MaxClipGradScaler( cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None for epoch in range(start_epoch, cfg.num_epoch): train_sampler.set_epoch(epoch) for step, (img, label) in enumerate(train_loader): global_step += 1 x = backbone(img) features = F.normalize(x) x_grad, loss_v = module_partial_fc.forward_backward( label, features, opt_pfc, x) if cfg.fp16: features.backward(grad_amp.scale(x_grad)) grad_amp.unscale_(opt_backbone) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) grad_amp.step(opt_backbone) grad_amp.update() else: features.backward(x_grad) clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2) opt_backbone.step() opt_pfc.step() module_partial_fc.update() opt_backbone.zero_grad() opt_pfc.zero_grad() loss.update(loss_v, 1) callback_logging(global_step, loss, epoch, cfg.fp16, scheduler_backbone.get_last_lr()[0], grad_amp) callback_verification(global_step, backbone) scheduler_backbone.step() scheduler_pfc.step() callback_checkpoint(global_step, backbone, module_partial_fc) callback_verification('last', backbone) dist.destroy_process_group()
def main(args): torch.cuda.set_device(args.local_rank) cfg = get_config(args.config) os.makedirs(cfg.output, exist_ok=True) init_logging(rank, cfg.output) summary_writer = (SummaryWriter( log_dir=os.path.join(cfg.output, "tensorboard")) if rank == 0 else None) train_loader = get_dataloader(cfg.rec, local_rank=args.local_rank, batch_size=cfg.batch_size, dali=cfg.dali) backbone = get_model(cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).cuda() backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[args.local_rank]) backbone.train() if cfg.loss == "arcface": margin_loss = ArcFace() elif cfg.loss == "cosface": margin_loss = CosFace() else: raise module_partial_fc = PartialFC(margin_loss, cfg.embedding_size, cfg.num_classes, cfg.sample_rate, cfg.fp16) module_partial_fc.train().cuda() # TODO the params of partial fc must be last in the params list opt = torch.optim.SGD(params=[ { "params": backbone.parameters(), }, { "params": module_partial_fc.parameters(), }, ], lr=cfg.lr, momentum=0.9, weight_decay=cfg.weight_decay) total_batch_size = cfg.batch_size * world_size cfg.warmup_step = cfg.num_image // total_batch_size * cfg.warmup_epoch cfg.total_step = cfg.num_image // total_batch_size * cfg.num_epoch lr_scheduler = PolyScheduler(optimizer=opt, base_lr=cfg.lr, max_steps=cfg.total_step, warmup_steps=cfg.warmup_step) for key, value in cfg.items(): num_space = 25 - len(key) logging.info(": " + key + " " * num_space + str(value)) callback_verification = CallBackVerification(val_targets=cfg.val_targets, rec_prefix=cfg.rec, summary_writer=summary_writer) callback_logging = CallBackLogging(frequent=cfg.frequent, total_step=cfg.total_step, batch_size=cfg.batch_size, writer=summary_writer) loss_am = AverageMeter() start_epoch = 0 global_step = 0 amp = torch.cuda.amp.grad_scaler.GradScaler(growth_interval=100) for epoch in range(start_epoch, cfg.num_epoch): if isinstance(train_loader, DataLoader): train_loader.sampler.set_epoch(epoch) for _, (img, local_labels) in enumerate(train_loader): global_step += 1 local_embeddings = backbone(img) loss: torch.Tensor = module_partial_fc(local_embeddings, local_labels, opt) if cfg.fp16: amp.scale(loss).backward() amp.unscale_(opt) torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) amp.step(opt) amp.update() else: loss.backward() torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) opt.step() opt.zero_grad() lr_scheduler.step() with torch.no_grad(): loss_am.update(loss.item(), 1) callback_logging(global_step, loss_am, epoch, cfg.fp16, lr_scheduler.get_last_lr()[0], amp) if global_step % cfg.verbose == 0 and global_step > 200: callback_verification(global_step, backbone) path_pfc = os.path.join(cfg.output, "softmax_fc_gpu_{}.pt".format(rank)) torch.save(module_partial_fc.state_dict(), path_pfc) if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) if cfg.dali: train_loader.reset() if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) distributed.destroy_process_group()
from ptflops import get_model_complexity_info from backbones import get_model import argparse if __name__ == '__main__': parser = argparse.ArgumentParser(description='') parser.add_argument('n', type=str, default="r100") args = parser.parse_args() net = get_model(args.n) macs, params = get_model_complexity_info( net, (3, 112, 112), as_strings=False, print_per_layer_stat=True, verbose=True) gmacs = macs / (1000**3) print("%.3f GFLOPs"%gmacs) print("%.3f Mparams"%(params/(1000**2))) if hasattr(net, "extra_gflops"): print("%.3f Extra-GFLOPs"%net.extra_gflops) print("%.3f Total-GFLOPs"%(gmacs+net.extra_gflops))
def main(args): seed = 2333 seed = seed + rank torch.manual_seed(seed) np.random.seed(seed) torch.cuda.set_device(args.local_rank) cfg = get_config(args.config) os.makedirs(cfg.output, exist_ok=True) init_logging(rank, cfg.output) summary_writer = (SummaryWriter( log_dir=os.path.join(cfg.output, "tensorboard")) if rank == 0 else None) train_loader = get_dataloader(cfg.rec, local_rank=args.local_rank, batch_size=cfg.batch_size, dali=cfg.dali) backbone = get_model(cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).cuda() backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[args.local_rank], bucket_cap_mb=16, find_unused_parameters=True) backbone.train() # FIXME using gradient checkpoint if there are some unused parameters will cause error backbone._set_static_graph() margin_loss = CombinedMarginLoss(64, cfg.margin_list[0], cfg.margin_list[1], cfg.margin_list[2], cfg.interclass_filtering_threshold) if cfg.optimizer == "sgd": module_partial_fc = PartialFC(margin_loss, cfg.embedding_size, cfg.num_classes, cfg.sample_rate, cfg.fp16) module_partial_fc.train().cuda() opt = torch.optim.SGD(params=[{ "params": backbone.parameters() }, { "params": module_partial_fc.parameters() }], lr=cfg.lr, momentum=0.9, weight_decay=cfg.weight_decay) elif cfg.optimizer == "adamw": module_partial_fc = PartialFCAdamW(margin_loss, cfg.embedding_size, cfg.num_classes, cfg.sample_rate, cfg.fp16) module_partial_fc.train().cuda() opt = torch.optim.AdamW(params=[{ "params": backbone.parameters() }, { "params": module_partial_fc.parameters() }], lr=cfg.lr, weight_decay=cfg.weight_decay) else: raise cfg.total_batch_size = cfg.batch_size * world_size cfg.warmup_step = cfg.num_image // cfg.total_batch_size * cfg.warmup_epoch cfg.total_step = cfg.num_image // cfg.total_batch_size * cfg.num_epoch lr_scheduler = PolyScheduler(optimizer=opt, base_lr=cfg.lr, max_steps=cfg.total_step, warmup_steps=cfg.warmup_step) for key, value in cfg.items(): num_space = 25 - len(key) logging.info(": " + key + " " * num_space + str(value)) callback_verification = CallBackVerification(val_targets=cfg.val_targets, rec_prefix=cfg.rec, summary_writer=summary_writer) callback_logging = CallBackLogging(frequent=cfg.frequent, total_step=cfg.total_step, batch_size=cfg.batch_size, writer=summary_writer) loss_am = AverageMeter() start_epoch = 0 global_step = 0 amp = torch.cuda.amp.grad_scaler.GradScaler(growth_interval=100) for epoch in range(start_epoch, cfg.num_epoch): if isinstance(train_loader, DataLoader): train_loader.sampler.set_epoch(epoch) for _, (img, local_labels) in enumerate(train_loader): global_step += 1 local_embeddings = backbone(img) loss: torch.Tensor = module_partial_fc(local_embeddings, local_labels, opt) if cfg.fp16: amp.scale(loss).backward() amp.unscale_(opt) torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) amp.step(opt) amp.update() else: loss.backward() torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5) opt.step() opt.zero_grad() lr_scheduler.step() with torch.no_grad(): loss_am.update(loss.item(), 1) callback_logging(global_step, loss_am, epoch, cfg.fp16, lr_scheduler.get_last_lr()[0], amp) if global_step % cfg.verbose == 0 and global_step > 200: callback_verification(global_step, backbone) path_pfc = os.path.join(cfg.output, "softmax_fc_gpu_{}.pt".format(rank)) torch.save(module_partial_fc.state_dict(), path_pfc) if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) if cfg.dali: train_loader.reset() if rank == 0: path_module = os.path.join(cfg.output, "model.pt") torch.save(backbone.module.state_dict(), path_module) from torch2onnx import convert_onnx convert_onnx(backbone.module.cpu().eval(), path_module, os.path.join(cfg.output, "model.onnx")) distributed.destroy_process_group()