Beispiel #1
0
def main(local_rank):
    dist.init_process_group(backend='nccl', init_method='env://')
    cfg.local_rank = local_rank
    torch.cuda.set_device(local_rank)
    cfg.rank = dist.get_rank()
    cfg.world_size = dist.get_world_size()
    trainset = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        trainset, shuffle=True)
    train_loader = DataLoaderX(local_rank=local_rank,
                               dataset=trainset,
                               batch_size=cfg.batch_size,
                               sampler=train_sampler,
                               num_workers=0,
                               pin_memory=True,
                               drop_last=False)

    backbone = backbones.iresnet100(False).to(local_rank)
    backbone.train()

    # Broadcast init parameters
    for ps in backbone.parameters():
        dist.broadcast(ps, 0)

    # DDP
    backbone = torch.nn.parallel.DistributedDataParallel(
        module=backbone, broadcast_buffers=False, device_ids=[cfg.local_rank])
    backbone.train()

    # Memory classifer
    dist_sample_classifer = DistSampleClassifier(rank=dist.get_rank(),
                                                 local_rank=local_rank,
                                                 world_size=cfg.world_size)

    # Margin softmax
    margin_softmax = MarginSoftmax(s=64.0, m=0.4)

    # Optimizer for backbone and classifer
    optimizer = SGD([{
        'params': backbone.parameters()
    }, {
        'params': dist_sample_classifer.parameters()
    }],
                    lr=cfg.lr,
                    momentum=0.9,
                    weight_decay=cfg.weight_decay,
                    rescale=cfg.world_size)

    # Lr scheduler
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer=optimizer,
                                                  lr_lambda=cfg.lr_func)
    n_epochs = cfg.num_epoch
    start_epoch = 0

    if local_rank == 0:
        writer = SummaryWriter(log_dir='logs/shows')

    #
    total_step = int(
        len(trainset) / cfg.batch_size / dist.get_world_size() * cfg.num_epoch)
    if dist.get_rank() == 0:
        print("Total Step is: %d" % total_step)

    losses = AverageMeter()
    global_step = 0
    train_start = time.time()
    for epoch in range(start_epoch, n_epochs):
        train_sampler.set_epoch(epoch)
        for step, (img, label) in enumerate(train_loader):
            total_label, norm_weight = dist_sample_classifer.prepare(
                label, optimizer)
            features = F.normalize(backbone(img))

            # Features all-gather
            total_features = torch.zeros(features.size()[0] * cfg.world_size,
                                         cfg.embedding_size,
                                         device=local_rank)
            dist.all_gather(list(total_features.chunk(cfg.world_size, dim=0)),
                            features.data)
            total_features.requires_grad = True

            # Calculate logits
            logits = dist_sample_classifer(total_features, norm_weight)
            logits = margin_softmax(logits, total_label)

            with torch.no_grad():
                max_fc = torch.max(logits, dim=1, keepdim=True)[0]
                dist.all_reduce(max_fc, dist.ReduceOp.MAX)

                # Calculate exp(logits) and all-reduce
                logits_exp = torch.exp(logits - max_fc)
                logits_sum_exp = logits_exp.sum(dim=1, keepdims=True)
                dist.all_reduce(logits_sum_exp, dist.ReduceOp.SUM)

                # Calculate prob
                logits_exp.div_(logits_sum_exp)

                # Get one-hot
                grad = logits_exp
                index = torch.where(total_label != -1)[0]
                one_hot = torch.zeros(index.size()[0],
                                      grad.size()[1],
                                      device=grad.device)
                one_hot.scatter_(1, total_label[index, None], 1)

                # Calculate loss
                loss = torch.zeros(grad.size()[0], 1, device=grad.device)
                loss[index] = grad[index].gather(1, total_label[index, None])
                dist.all_reduce(loss, dist.ReduceOp.SUM)
                loss_v = loss.clamp_min_(1e-30).log_().mean() * (-1)

                # Calculate grad
                grad[index] -= one_hot
                grad.div_(features.size()[0])

            logits.backward(grad)
            if total_features.grad is not None:
                total_features.grad.detach_()
            x_grad = torch.zeros_like(features)

            # Feature gradient all-reduce
            dist.reduce_scatter(
                x_grad, list(total_features.grad.chunk(cfg.world_size, dim=0)))
            x_grad.mul_(cfg.world_size)
            # Backward backbone
            features.backward(x_grad)
            optimizer.step()

            # Update classifer
            dist_sample_classifer.update()
            optimizer.zero_grad()
            losses.update(loss_v, 1)
            if cfg.local_rank == 0 and step % 50 == 0:
                time_now = (time.time() - train_start) / 3600
                time_total = time_now / ((global_step + 1) / total_step)
                time_for_end = time_total - time_now
                writer.add_scalar('time_for_end', time_for_end, global_step)
                writer.add_scalar('loss', loss_v, global_step)
                print(
                    "Speed %d samples/sec   Loss %.4f   Epoch: %d   Global Step: %d   Required: %1.f hours"
                    % ((cfg.batch_size * global_step /
                        (time.time() - train_start) * cfg.world_size),
                       losses.avg, epoch, global_step, time_for_end))
                losses.reset()

            global_step += 1
        scheduler.step()
        if dist.get_rank() == 0:
            import os
            if not os.path.exists(cfg.output):
                os.makedirs(cfg.output)
            torch.save(backbone.module.state_dict(),
                       os.path.join(cfg.output,
                                    str(epoch) + 'backbone.pth'))
    dist.destroy_process_group()
Beispiel #2
0
def main():
    print('=========================================')
    print('               Numpy DNN                 ')
    print('              26/Nov/2017                ')
    print('    By Thang Vu ([email protected])    ')
    print('=========================================')

    # load datasets
    path = 'data/mnist.pkl.gz'
    train_set, val_set, test_set = load_mnist_datasets(path)
    batch_size = 128
    X_train, y_train = train_set
    X_val, y_val = val_set
    X_test, y_test = test_set

    # bookeeping for best model based on validation set
    best_val_acc = -1
    best_model = None

    # create model and optimization method
    dnn = DNN()
    sgd = SGD(lr=0.1, lr_decay=0.1, weight_decay=1e-3, momentum=0.9)
    
    # Train 
    batch_size = 128
    for epoch in range(20):
        dnn.train_mode() # set model to train mode (because of dropout)
        
        num_train = X_train.shape[0]
        num_batch = num_train//batch_size
        for batch in range(num_batch):
            # get batch data
            batch_mask = np.random.choice(num_train, batch_size)
            X_batch = X_train[batch_mask]
            y_batch = y_train[batch_mask]
           
            # forward
            output = dnn.forward(X_batch)
            loss, dout = softmax_cross_entropy_loss(output, y_batch)
            if batch%100 == 0:
                print("Epoch %2d Iter %3d Loss %.5f" %(epoch, batch, loss))

            # backward and update
            grads = dnn.backward(dout)
            sgd.step(dnn.params, grads)
                                
        sgd.decay_learning_rate() # decay learning rate after one epoch
        dnn.eval_mode() # set model to eval mode 
        train_acc = check_acc(dnn, X_train, y_train)
        val_acc = check_acc(dnn, X_val, y_val)

        if(best_val_acc < val_acc):
            best_val_acc = val_acc
            best_model = dnn

        # store best model based n acc_val
        print('Epoch finish. ')
        print('Train acc %.3f' %train_acc)
        print('Val acc %.3f' %val_acc)
        print('-'*30)
        print('')

    print('Train finished. Best acc %.3f' %best_val_acc)
    test_acc = check_acc(best_model, X_test, y_test)
    print('Test acc %.3f' %test_acc)
Beispiel #3
0
def main(local_rank, world_size, init_method='tcp://127.0.0.1:23499'):
    dist.init_process_group(backend='nccl',
                            init_method=init_method,
                            rank=local_rank,
                            world_size=world_size)
    cfg.local_rank = local_rank
    torch.cuda.set_device(local_rank)
    cfg.rank = dist.get_rank()
    cfg.world_size = world_size
    print(cfg.rank, dist.get_world_size())
    trainset = MXFaceDataset(root_dir='/root/face_datasets/webface/',
                             local_rank=local_rank)
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        trainset, shuffle=True)
    trainloader = DataLoaderX(local_rank=local_rank,
                              dataset=trainset,
                              batch_size=cfg.batch_size,
                              sampler=train_sampler,
                              num_workers=0,
                              pin_memory=True,
                              drop_last=False)
    backbone = iresnet50(False).to(cfg.local_rank)
    backbone.train()
    # backbone = nn.SyncBatchNorm.convert_sync_batchnorm(backbone)
    for ps in backbone.parameters():
        dist.broadcast(ps, 0)

    backbone = torch.nn.parallel.DistributedDataParallel(
        backbone, broadcast_buffers=False, device_ids=[dist.get_rank()])
    backbone.train()
    sub_start, sub_classnum = get_sub_class(cfg.rank, dist.get_world_size())
    print(sub_start, sub_classnum)
    classifier_head = classifier(cfg.embedding_size,
                                 sub_classnum,
                                 sample_rate=0.4)
    cosface = CosFace(s=64.0, m=0.4)
    optimizer = SGD([{
        'params': backbone.parameters()
    }, {
        'params': classifier_head.parameters()
    }],
                    0.1,
                    momentum=0.9,
                    weight_decay=cfg.weight_decay,
                    rescale=cfg.world_size)
    warm_up_with_multistep_lr = lambda epoch: (
        (epoch + 1) / (4 + 1))**2 if epoch < -1 else 0.1**len(
            [m for m in [20, 29] if m - 1 <= epoch])
    scheduler = torch.optim.lr_scheduler.LambdaLR(
        optimizer, lr_lambda=warm_up_with_multistep_lr)
    n_epochs = 33
    start_epoch = 0

    if cfg.local_rank == 0:
        writer = SummaryWriter(log_dir='logs/shows')
    global_step = 0
    loss_fun = nn.CrossEntropyLoss()
    for epoch in range(start_epoch, n_epochs):
        train_sampler.set_epoch(epoch)
        for step, (img, label) in enumerate(trainloader):
            start = time.time()
            lable_gather, norm_weight = classifier_head.prepare(
                label, optimizer)
            x = F.normalize(backbone(img))
            x_gather = torch.zeros(x.size()[0] * cfg.world_size,
                                   cfg.embedding_size,
                                   device=cfg.local_rank)
            dist.all_gather(list(x_gather.chunk(cfg.world_size, dim=0)),
                            x.data)
            x_gather.requires_grad = True

            logits = classifier_head(x_gather, norm_weight)

            logits = cosface(logits, lable_gather)

            with torch.no_grad():
                max_v = torch.max(logits, dim=1, keepdim=True)[0]
                dist.all_reduce(max_v, dist.ReduceOp.MAX)
                exp = torch.exp(logits - max_v)
                sum_exp = exp.sum(dim=1, keepdims=True)
                dist.all_reduce(sum_exp, dist.ReduceOp.SUM)
                exp.div_(sum_exp.clamp_min(1e-20))
                grad = exp
                index = torch.where(lable_gather != -1)[0]
                one_hot = torch.zeros(index.size()[0],
                                      grad.size()[1],
                                      device=grad.device)
                one_hot.scatter_(1, lable_gather[index, None], 1)

                loss = torch.zeros(grad.size()[0], 1, device=grad.device)
                loss[index] = grad[index].gather(1, lable_gather[index, None])
                dist.all_reduce(loss, dist.ReduceOp.SUM)
                loss_v = loss.clamp_min_(1e-20).log_().mean() * (-1)

                grad[index] -= one_hot
                grad.div_(grad.size()[0])

            logits.backward(grad)
            if x_gather.grad is not None:
                x_gather.grad.detach_()
            x_grad = torch.zeros_like(x)
            dist.reduce_scatter(
                x_grad, list(x_gather.grad.chunk(cfg.world_size, dim=0)))
            x.backward(x_grad)
            optimizer.step()
            classifier_head.update()
            optimizer.zero_grad()
            if cfg.rank == 0:
                print(x_gather.grad.max(), x_gather.grad.min())
                print('loss_v', loss_v.item(), global_step)
                writer.add_scalar('loss', loss_v, global_step)
                print('lr',
                      optimizer.state_dict()['param_groups'][0]['lr'],
                      global_step)
                print(cfg.batch_size / (time.time() - start))

            global_step += 1
        scheduler.step()
        if cfg.rank == 0:
            torch.save(backbone.module.state_dict(),
                       "models/" + str(epoch) + 'backbone.pth')
    dist.destroy_process_group()
Beispiel #4
0
        #        images = Variable(images.view(-1, 28*28).cuda())
        images = Variable(images.cuda())
        labels = Variable(labels.cuda())
        #        print(labels)
        # Forward + Backward + Optimize
        optimizer.zero_grad()  # zero the gradient buffer
        outputs = net(images)
        train_loss = criterion(outputs, labels)
        if i == 0:
            #        print(labels)  # check dataLoader randomness
            if epoch == 0:
                # loss of the 1st mini-batch in the 1st epoch before backgrop, verify randomness of weight initialization
                train_init_loss = train_loss
                logger.append([0, train_init_loss, 0, 0, 0])
        train_loss.backward()
        optimizer.step()
        prec1, prec5 = accuracy(outputs.data, labels.data, topk=(1, 5))
        train_loss_log.update(train_loss.data[0], images.size(0))
        train_acc_log.update(prec1[0], images.size(0))

        if (i + 1) % 100 == 0:
            print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Acc: %.8f' %
                  (epoch + 1, num_epochs, i + 1, len(train_dataset) //
                   batch_size, train_loss_log.avg, train_acc_log.avg))

    # Test the Model
    net.eval()
    correct = 0
    loss = 0
    total = 0
    for images, labels in test_loader:
Beispiel #5
0
def main(local_rank):
    cfg.local_rank = local_rank
    # cfg.rank = dist.get_rank()
    # cfg.world_size = dist.get_world_size()

    backbone = backbones.iresnet50(False)

    weights = torch.load("pytorch/partial_fc_glint360k_r50/16backbone.pth",
                         map_location=torch.device('cpu'))
    backbone.load_state_dict(weights)
    backbone = backbone.float()
    backbone = backbone.eval()

    # embedding 512

    img1 = cv2.imread('boy_1.jpg')
    img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
    img1 = image_preprocessing(img1)
    img1 = np.ones([112, 112, 3], dtype=np.float32)
    img1 = img1.transpose([2, 0, 1])
    img1 = np.expand_dims(img1, axis=0)
    img1 = torch.from_numpy(img1).float()
    img1 = torch.autograd.Variable(img1, requires_grad=False).to('cpu')

    img2 = cv2.imread('man_2.jpg')
    img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
    img2 = image_preprocessing(img2)
    img2 = img2.transpose([2, 0, 1])
    img2 = np.expand_dims(img2, axis=0)
    img2 = torch.from_numpy(img2).float()
    img2 = torch.autograd.Variable(img2, requires_grad=False).to('cpu')
    with torch.no_grad():
        v1 = backbone.forward(img1)
        v2 = backbone.forward(img2)

    v1 = np.asarray(v1)
    import pickle
    pickle.dump(v1, open("sample.pkl", "wb"))
    print(v1)

    result = cosine_dist(v1, v2)
    print(result)

    exit(0)
    # Broadcast init parameters
    for ps in backbone.parameters():
        dist.broadcast(ps, 0)

    # DDP
    backbone = torch.nn.parallel.DistributedDataParallel(
        module=backbone, broadcast_buffers=False, device_ids=[cfg.local_rank])
    backbone.train()

    # Memory classifer
    dist_sample_classifer = DistSampleClassifier(rank=dist.get_rank(),
                                                 local_rank=local_rank,
                                                 world_size=cfg.world_size)

    # Margin softmax
    margin_softmax = MarginSoftmax(s=64.0, m=0.4)

    # Optimizer for backbone and classifer
    optimizer = SGD([{
        'params': backbone.parameters()
    }, {
        'params': dist_sample_classifer.parameters()
    }],
                    lr=cfg.lr,
                    momentum=0.9,
                    weight_decay=cfg.weight_decay,
                    rescale=cfg.world_size)

    # Lr scheduler
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer=optimizer,
                                                  lr_lambda=cfg.lr_func)
    n_epochs = cfg.num_epoch
    start_epoch = 0

    if local_rank == 0:
        writer = SummaryWriter(log_dir='logs/shows')

    #
    total_step = int(
        len(trainset) / cfg.batch_size / dist.get_world_size() * cfg.num_epoch)
    if dist.get_rank() == 0:
        print("Total Step is: %d" % total_step)

    losses = AverageMeter()
    global_step = 0
    train_start = time.time()
    for epoch in range(start_epoch, n_epochs):
        train_sampler.set_epoch(epoch)
        for step, (img, label) in enumerate(train_loader):
            total_label, norm_weight = dist_sample_classifer.prepare(
                label, optimizer)
            features = F.normalize(backbone(img))

            # Features all-gather
            total_features = torch.zeros(features.size()[0] * cfg.world_size,
                                         cfg.embedding_size,
                                         device=local_rank)
            dist.all_gather(list(total_features.chunk(cfg.world_size, dim=0)),
                            features.data)
            total_features.requires_grad = True

            # Calculate logits
            logits = dist_sample_classifer(total_features, norm_weight)
            logits = margin_softmax(logits, total_label)

            with torch.no_grad():
                max_fc = torch.max(logits, dim=1, keepdim=True)[0]
                dist.all_reduce(max_fc, dist.ReduceOp.MAX)

                # Calculate exp(logits) and all-reduce
                logits_exp = torch.exp(logits - max_fc)
                logits_sum_exp = logits_exp.sum(dim=1, keepdims=True)
                dist.all_reduce(logits_sum_exp, dist.ReduceOp.SUM)

                # Calculate prob
                logits_exp.div_(logits_sum_exp)

                # Get one-hot
                grad = logits_exp
                index = torch.where(total_label != -1)[0]
                one_hot = torch.zeros(index.size()[0],
                                      grad.size()[1],
                                      device=grad.device)
                one_hot.scatter_(1, total_label[index, None], 1)

                # Calculate loss
                loss = torch.zeros(grad.size()[0], 1, device=grad.device)
                loss[index] = grad[index].gather(1, total_label[index, None])
                dist.all_reduce(loss, dist.ReduceOp.SUM)
                loss_v = loss.clamp_min_(1e-30).log_().mean() * (-1)

                # Calculate grad
                grad[index] -= one_hot
                grad.div_(features.size()[0])

            logits.backward(grad)
            if total_features.grad is not None:
                total_features.grad.detach_()
            x_grad = torch.zeros_like(features)

            # Feature gradient all-reduce
            dist.reduce_scatter(
                x_grad, list(total_features.grad.chunk(cfg.world_size, dim=0)))
            x_grad.mul_(cfg.world_size)
            # Backward backbone
            features.backward(x_grad)
            optimizer.step()

            # Update classifer
            dist_sample_classifer.update()
            optimizer.zero_grad()
            losses.update(loss_v, 1)
            if cfg.local_rank == 0 and step % 50 == 0:
                time_now = (time.time() - train_start) / 3600
                time_total = time_now / ((global_step + 1) / total_step)
                time_for_end = time_total - time_now
                writer.add_scalar('time_for_end', time_for_end, global_step)
                writer.add_scalar('loss', loss_v, global_step)
                print(
                    "Speed %d samples/sec   Loss %.4f   Epoch: %d   Global Step: %d   Required: %1.f hours"
                    % ((cfg.batch_size * global_step /
                        (time.time() - train_start) * cfg.world_size),
                       losses.avg, epoch, global_step, time_for_end))
                losses.reset()

            global_step += 1
        scheduler.step()
        if dist.get_rank() == 0:
            import os
            if not os.path.exists(cfg.output):
                os.makedirs(cfg.output)
            torch.save(backbone.module.state_dict(),
                       os.path.join(cfg.output,
                                    str(epoch) + 'backbone.pth'))
    dist.destroy_process_group()