def main(local_rank): dist.init_process_group(backend='nccl', init_method='env://') cfg.local_rank = local_rank torch.cuda.set_device(local_rank) cfg.rank = dist.get_rank() cfg.world_size = dist.get_world_size() trainset = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank) train_sampler = torch.utils.data.distributed.DistributedSampler( trainset, shuffle=True) train_loader = DataLoaderX(local_rank=local_rank, dataset=trainset, batch_size=cfg.batch_size, sampler=train_sampler, num_workers=0, pin_memory=True, drop_last=False) backbone = backbones.iresnet100(False).to(local_rank) backbone.train() # Broadcast init parameters for ps in backbone.parameters(): dist.broadcast(ps, 0) # DDP backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[cfg.local_rank]) backbone.train() # Memory classifer dist_sample_classifer = DistSampleClassifier(rank=dist.get_rank(), local_rank=local_rank, world_size=cfg.world_size) # Margin softmax margin_softmax = MarginSoftmax(s=64.0, m=0.4) # Optimizer for backbone and classifer optimizer = SGD([{ 'params': backbone.parameters() }, { 'params': dist_sample_classifer.parameters() }], lr=cfg.lr, momentum=0.9, weight_decay=cfg.weight_decay, rescale=cfg.world_size) # Lr scheduler scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer=optimizer, lr_lambda=cfg.lr_func) n_epochs = cfg.num_epoch start_epoch = 0 if local_rank == 0: writer = SummaryWriter(log_dir='logs/shows') # total_step = int( len(trainset) / cfg.batch_size / dist.get_world_size() * cfg.num_epoch) if dist.get_rank() == 0: print("Total Step is: %d" % total_step) losses = AverageMeter() global_step = 0 train_start = time.time() for epoch in range(start_epoch, n_epochs): train_sampler.set_epoch(epoch) for step, (img, label) in enumerate(train_loader): total_label, norm_weight = dist_sample_classifer.prepare( label, optimizer) features = F.normalize(backbone(img)) # Features all-gather total_features = torch.zeros(features.size()[0] * cfg.world_size, cfg.embedding_size, device=local_rank) dist.all_gather(list(total_features.chunk(cfg.world_size, dim=0)), features.data) total_features.requires_grad = True # Calculate logits logits = dist_sample_classifer(total_features, norm_weight) logits = margin_softmax(logits, total_label) with torch.no_grad(): max_fc = torch.max(logits, dim=1, keepdim=True)[0] dist.all_reduce(max_fc, dist.ReduceOp.MAX) # Calculate exp(logits) and all-reduce logits_exp = torch.exp(logits - max_fc) logits_sum_exp = logits_exp.sum(dim=1, keepdims=True) dist.all_reduce(logits_sum_exp, dist.ReduceOp.SUM) # Calculate prob logits_exp.div_(logits_sum_exp) # Get one-hot grad = logits_exp index = torch.where(total_label != -1)[0] one_hot = torch.zeros(index.size()[0], grad.size()[1], device=grad.device) one_hot.scatter_(1, total_label[index, None], 1) # Calculate loss loss = torch.zeros(grad.size()[0], 1, device=grad.device) loss[index] = grad[index].gather(1, total_label[index, None]) dist.all_reduce(loss, dist.ReduceOp.SUM) loss_v = loss.clamp_min_(1e-30).log_().mean() * (-1) # Calculate grad grad[index] -= one_hot grad.div_(features.size()[0]) logits.backward(grad) if total_features.grad is not None: total_features.grad.detach_() x_grad = torch.zeros_like(features) # Feature gradient all-reduce dist.reduce_scatter( x_grad, list(total_features.grad.chunk(cfg.world_size, dim=0))) x_grad.mul_(cfg.world_size) # Backward backbone features.backward(x_grad) optimizer.step() # Update classifer dist_sample_classifer.update() optimizer.zero_grad() losses.update(loss_v, 1) if cfg.local_rank == 0 and step % 50 == 0: time_now = (time.time() - train_start) / 3600 time_total = time_now / ((global_step + 1) / total_step) time_for_end = time_total - time_now writer.add_scalar('time_for_end', time_for_end, global_step) writer.add_scalar('loss', loss_v, global_step) print( "Speed %d samples/sec Loss %.4f Epoch: %d Global Step: %d Required: %1.f hours" % ((cfg.batch_size * global_step / (time.time() - train_start) * cfg.world_size), losses.avg, epoch, global_step, time_for_end)) losses.reset() global_step += 1 scheduler.step() if dist.get_rank() == 0: import os if not os.path.exists(cfg.output): os.makedirs(cfg.output) torch.save(backbone.module.state_dict(), os.path.join(cfg.output, str(epoch) + 'backbone.pth')) dist.destroy_process_group()
def main(local_rank, world_size, init_method='tcp://127.0.0.1:23499'): dist.init_process_group(backend='nccl', init_method=init_method, rank=local_rank, world_size=world_size) cfg.local_rank = local_rank torch.cuda.set_device(local_rank) cfg.rank = dist.get_rank() cfg.world_size = world_size print(cfg.rank, dist.get_world_size()) trainset = MXFaceDataset(root_dir='/root/face_datasets/webface/', local_rank=local_rank) train_sampler = torch.utils.data.distributed.DistributedSampler( trainset, shuffle=True) trainloader = DataLoaderX(local_rank=local_rank, dataset=trainset, batch_size=cfg.batch_size, sampler=train_sampler, num_workers=0, pin_memory=True, drop_last=False) backbone = iresnet50(False).to(cfg.local_rank) backbone.train() # backbone = nn.SyncBatchNorm.convert_sync_batchnorm(backbone) for ps in backbone.parameters(): dist.broadcast(ps, 0) backbone = torch.nn.parallel.DistributedDataParallel( backbone, broadcast_buffers=False, device_ids=[dist.get_rank()]) backbone.train() sub_start, sub_classnum = get_sub_class(cfg.rank, dist.get_world_size()) print(sub_start, sub_classnum) classifier_head = classifier(cfg.embedding_size, sub_classnum, sample_rate=0.4) cosface = CosFace(s=64.0, m=0.4) optimizer = SGD([{ 'params': backbone.parameters() }, { 'params': classifier_head.parameters() }], 0.1, momentum=0.9, weight_decay=cfg.weight_decay, rescale=cfg.world_size) warm_up_with_multistep_lr = lambda epoch: ( (epoch + 1) / (4 + 1))**2 if epoch < -1 else 0.1**len( [m for m in [20, 29] if m - 1 <= epoch]) scheduler = torch.optim.lr_scheduler.LambdaLR( optimizer, lr_lambda=warm_up_with_multistep_lr) n_epochs = 33 start_epoch = 0 if cfg.local_rank == 0: writer = SummaryWriter(log_dir='logs/shows') global_step = 0 loss_fun = nn.CrossEntropyLoss() for epoch in range(start_epoch, n_epochs): train_sampler.set_epoch(epoch) for step, (img, label) in enumerate(trainloader): start = time.time() lable_gather, norm_weight = classifier_head.prepare( label, optimizer) x = F.normalize(backbone(img)) x_gather = torch.zeros(x.size()[0] * cfg.world_size, cfg.embedding_size, device=cfg.local_rank) dist.all_gather(list(x_gather.chunk(cfg.world_size, dim=0)), x.data) x_gather.requires_grad = True logits = classifier_head(x_gather, norm_weight) logits = cosface(logits, lable_gather) with torch.no_grad(): max_v = torch.max(logits, dim=1, keepdim=True)[0] dist.all_reduce(max_v, dist.ReduceOp.MAX) exp = torch.exp(logits - max_v) sum_exp = exp.sum(dim=1, keepdims=True) dist.all_reduce(sum_exp, dist.ReduceOp.SUM) exp.div_(sum_exp.clamp_min(1e-20)) grad = exp index = torch.where(lable_gather != -1)[0] one_hot = torch.zeros(index.size()[0], grad.size()[1], device=grad.device) one_hot.scatter_(1, lable_gather[index, None], 1) loss = torch.zeros(grad.size()[0], 1, device=grad.device) loss[index] = grad[index].gather(1, lable_gather[index, None]) dist.all_reduce(loss, dist.ReduceOp.SUM) loss_v = loss.clamp_min_(1e-20).log_().mean() * (-1) grad[index] -= one_hot grad.div_(grad.size()[0]) logits.backward(grad) if x_gather.grad is not None: x_gather.grad.detach_() x_grad = torch.zeros_like(x) dist.reduce_scatter( x_grad, list(x_gather.grad.chunk(cfg.world_size, dim=0))) x.backward(x_grad) optimizer.step() classifier_head.update() optimizer.zero_grad() if cfg.rank == 0: print(x_gather.grad.max(), x_gather.grad.min()) print('loss_v', loss_v.item(), global_step) writer.add_scalar('loss', loss_v, global_step) print('lr', optimizer.state_dict()['param_groups'][0]['lr'], global_step) print(cfg.batch_size / (time.time() - start)) global_step += 1 scheduler.step() if cfg.rank == 0: torch.save(backbone.module.state_dict(), "models/" + str(epoch) + 'backbone.pth') dist.destroy_process_group()
for epoch in range(num_epochs): train_loss_log = AverageMeter() train_acc_log = AverageMeter() val_loss_log = AverageMeter() val_acc_log = AverageMeter() for i, (images, labels) in enumerate(train_loader): # Convert torch tensor to Variable # if i>0: # break # images = Variable(images.view(-1, 28*28).cuda()) images = Variable(images.cuda()) labels = Variable(labels.cuda()) # print(labels) # Forward + Backward + Optimize optimizer.zero_grad() # zero the gradient buffer outputs = net(images) train_loss = criterion(outputs, labels) if i == 0: # print(labels) # check dataLoader randomness if epoch == 0: # loss of the 1st mini-batch in the 1st epoch before backgrop, verify randomness of weight initialization train_init_loss = train_loss logger.append([0, train_init_loss, 0, 0, 0]) train_loss.backward() optimizer.step() prec1, prec5 = accuracy(outputs.data, labels.data, topk=(1, 5)) train_loss_log.update(train_loss.data[0], images.size(0)) train_acc_log.update(prec1[0], images.size(0)) if (i + 1) % 100 == 0:
def main(local_rank): cfg.local_rank = local_rank # cfg.rank = dist.get_rank() # cfg.world_size = dist.get_world_size() backbone = backbones.iresnet50(False) weights = torch.load("pytorch/partial_fc_glint360k_r50/16backbone.pth", map_location=torch.device('cpu')) backbone.load_state_dict(weights) backbone = backbone.float() backbone = backbone.eval() # embedding 512 img1 = cv2.imread('boy_1.jpg') img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB) img1 = image_preprocessing(img1) img1 = np.ones([112, 112, 3], dtype=np.float32) img1 = img1.transpose([2, 0, 1]) img1 = np.expand_dims(img1, axis=0) img1 = torch.from_numpy(img1).float() img1 = torch.autograd.Variable(img1, requires_grad=False).to('cpu') img2 = cv2.imread('man_2.jpg') img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB) img2 = image_preprocessing(img2) img2 = img2.transpose([2, 0, 1]) img2 = np.expand_dims(img2, axis=0) img2 = torch.from_numpy(img2).float() img2 = torch.autograd.Variable(img2, requires_grad=False).to('cpu') with torch.no_grad(): v1 = backbone.forward(img1) v2 = backbone.forward(img2) v1 = np.asarray(v1) import pickle pickle.dump(v1, open("sample.pkl", "wb")) print(v1) result = cosine_dist(v1, v2) print(result) exit(0) # Broadcast init parameters for ps in backbone.parameters(): dist.broadcast(ps, 0) # DDP backbone = torch.nn.parallel.DistributedDataParallel( module=backbone, broadcast_buffers=False, device_ids=[cfg.local_rank]) backbone.train() # Memory classifer dist_sample_classifer = DistSampleClassifier(rank=dist.get_rank(), local_rank=local_rank, world_size=cfg.world_size) # Margin softmax margin_softmax = MarginSoftmax(s=64.0, m=0.4) # Optimizer for backbone and classifer optimizer = SGD([{ 'params': backbone.parameters() }, { 'params': dist_sample_classifer.parameters() }], lr=cfg.lr, momentum=0.9, weight_decay=cfg.weight_decay, rescale=cfg.world_size) # Lr scheduler scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer=optimizer, lr_lambda=cfg.lr_func) n_epochs = cfg.num_epoch start_epoch = 0 if local_rank == 0: writer = SummaryWriter(log_dir='logs/shows') # total_step = int( len(trainset) / cfg.batch_size / dist.get_world_size() * cfg.num_epoch) if dist.get_rank() == 0: print("Total Step is: %d" % total_step) losses = AverageMeter() global_step = 0 train_start = time.time() for epoch in range(start_epoch, n_epochs): train_sampler.set_epoch(epoch) for step, (img, label) in enumerate(train_loader): total_label, norm_weight = dist_sample_classifer.prepare( label, optimizer) features = F.normalize(backbone(img)) # Features all-gather total_features = torch.zeros(features.size()[0] * cfg.world_size, cfg.embedding_size, device=local_rank) dist.all_gather(list(total_features.chunk(cfg.world_size, dim=0)), features.data) total_features.requires_grad = True # Calculate logits logits = dist_sample_classifer(total_features, norm_weight) logits = margin_softmax(logits, total_label) with torch.no_grad(): max_fc = torch.max(logits, dim=1, keepdim=True)[0] dist.all_reduce(max_fc, dist.ReduceOp.MAX) # Calculate exp(logits) and all-reduce logits_exp = torch.exp(logits - max_fc) logits_sum_exp = logits_exp.sum(dim=1, keepdims=True) dist.all_reduce(logits_sum_exp, dist.ReduceOp.SUM) # Calculate prob logits_exp.div_(logits_sum_exp) # Get one-hot grad = logits_exp index = torch.where(total_label != -1)[0] one_hot = torch.zeros(index.size()[0], grad.size()[1], device=grad.device) one_hot.scatter_(1, total_label[index, None], 1) # Calculate loss loss = torch.zeros(grad.size()[0], 1, device=grad.device) loss[index] = grad[index].gather(1, total_label[index, None]) dist.all_reduce(loss, dist.ReduceOp.SUM) loss_v = loss.clamp_min_(1e-30).log_().mean() * (-1) # Calculate grad grad[index] -= one_hot grad.div_(features.size()[0]) logits.backward(grad) if total_features.grad is not None: total_features.grad.detach_() x_grad = torch.zeros_like(features) # Feature gradient all-reduce dist.reduce_scatter( x_grad, list(total_features.grad.chunk(cfg.world_size, dim=0))) x_grad.mul_(cfg.world_size) # Backward backbone features.backward(x_grad) optimizer.step() # Update classifer dist_sample_classifer.update() optimizer.zero_grad() losses.update(loss_v, 1) if cfg.local_rank == 0 and step % 50 == 0: time_now = (time.time() - train_start) / 3600 time_total = time_now / ((global_step + 1) / total_step) time_for_end = time_total - time_now writer.add_scalar('time_for_end', time_for_end, global_step) writer.add_scalar('loss', loss_v, global_step) print( "Speed %d samples/sec Loss %.4f Epoch: %d Global Step: %d Required: %1.f hours" % ((cfg.batch_size * global_step / (time.time() - train_start) * cfg.world_size), losses.avg, epoch, global_step, time_for_end)) losses.reset() global_step += 1 scheduler.step() if dist.get_rank() == 0: import os if not os.path.exists(cfg.output): os.makedirs(cfg.output) torch.save(backbone.module.state_dict(), os.path.join(cfg.output, str(epoch) + 'backbone.pth')) dist.destroy_process_group()