Example #1
0
def main(local_rank):
    dist.init_process_group(backend='nccl', init_method='env://')
    cfg.local_rank = local_rank
    torch.cuda.set_device(local_rank)
    cfg.rank = dist.get_rank()
    cfg.world_size = dist.get_world_size()
    trainset = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        trainset, shuffle=True)
    train_loader = DataLoaderX(local_rank=local_rank,
                               dataset=trainset,
                               batch_size=cfg.batch_size,
                               sampler=train_sampler,
                               num_workers=0,
                               pin_memory=True,
                               drop_last=False)

    backbone = backbones.iresnet100(False).to(local_rank)
    backbone.train()

    # Broadcast init parameters
    for ps in backbone.parameters():
        dist.broadcast(ps, 0)

    # DDP
    backbone = torch.nn.parallel.DistributedDataParallel(
        module=backbone, broadcast_buffers=False, device_ids=[cfg.local_rank])
    backbone.train()

    # Memory classifer
    dist_sample_classifer = DistSampleClassifier(rank=dist.get_rank(),
                                                 local_rank=local_rank,
                                                 world_size=cfg.world_size)

    # Margin softmax
    margin_softmax = MarginSoftmax(s=64.0, m=0.4)

    # Optimizer for backbone and classifer
    optimizer = SGD([{
        'params': backbone.parameters()
    }, {
        'params': dist_sample_classifer.parameters()
    }],
                    lr=cfg.lr,
                    momentum=0.9,
                    weight_decay=cfg.weight_decay,
                    rescale=cfg.world_size)

    # Lr scheduler
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer=optimizer,
                                                  lr_lambda=cfg.lr_func)
    n_epochs = cfg.num_epoch
    start_epoch = 0

    if local_rank == 0:
        writer = SummaryWriter(log_dir='logs/shows')

    #
    total_step = int(
        len(trainset) / cfg.batch_size / dist.get_world_size() * cfg.num_epoch)
    if dist.get_rank() == 0:
        print("Total Step is: %d" % total_step)

    losses = AverageMeter()
    global_step = 0
    train_start = time.time()
    for epoch in range(start_epoch, n_epochs):
        train_sampler.set_epoch(epoch)
        for step, (img, label) in enumerate(train_loader):
            total_label, norm_weight = dist_sample_classifer.prepare(
                label, optimizer)
            features = F.normalize(backbone(img))

            # Features all-gather
            total_features = torch.zeros(features.size()[0] * cfg.world_size,
                                         cfg.embedding_size,
                                         device=local_rank)
            dist.all_gather(list(total_features.chunk(cfg.world_size, dim=0)),
                            features.data)
            total_features.requires_grad = True

            # Calculate logits
            logits = dist_sample_classifer(total_features, norm_weight)
            logits = margin_softmax(logits, total_label)

            with torch.no_grad():
                max_fc = torch.max(logits, dim=1, keepdim=True)[0]
                dist.all_reduce(max_fc, dist.ReduceOp.MAX)

                # Calculate exp(logits) and all-reduce
                logits_exp = torch.exp(logits - max_fc)
                logits_sum_exp = logits_exp.sum(dim=1, keepdims=True)
                dist.all_reduce(logits_sum_exp, dist.ReduceOp.SUM)

                # Calculate prob
                logits_exp.div_(logits_sum_exp)

                # Get one-hot
                grad = logits_exp
                index = torch.where(total_label != -1)[0]
                one_hot = torch.zeros(index.size()[0],
                                      grad.size()[1],
                                      device=grad.device)
                one_hot.scatter_(1, total_label[index, None], 1)

                # Calculate loss
                loss = torch.zeros(grad.size()[0], 1, device=grad.device)
                loss[index] = grad[index].gather(1, total_label[index, None])
                dist.all_reduce(loss, dist.ReduceOp.SUM)
                loss_v = loss.clamp_min_(1e-30).log_().mean() * (-1)

                # Calculate grad
                grad[index] -= one_hot
                grad.div_(features.size()[0])

            logits.backward(grad)
            if total_features.grad is not None:
                total_features.grad.detach_()
            x_grad = torch.zeros_like(features)

            # Feature gradient all-reduce
            dist.reduce_scatter(
                x_grad, list(total_features.grad.chunk(cfg.world_size, dim=0)))
            x_grad.mul_(cfg.world_size)
            # Backward backbone
            features.backward(x_grad)
            optimizer.step()

            # Update classifer
            dist_sample_classifer.update()
            optimizer.zero_grad()
            losses.update(loss_v, 1)
            if cfg.local_rank == 0 and step % 50 == 0:
                time_now = (time.time() - train_start) / 3600
                time_total = time_now / ((global_step + 1) / total_step)
                time_for_end = time_total - time_now
                writer.add_scalar('time_for_end', time_for_end, global_step)
                writer.add_scalar('loss', loss_v, global_step)
                print(
                    "Speed %d samples/sec   Loss %.4f   Epoch: %d   Global Step: %d   Required: %1.f hours"
                    % ((cfg.batch_size * global_step /
                        (time.time() - train_start) * cfg.world_size),
                       losses.avg, epoch, global_step, time_for_end))
                losses.reset()

            global_step += 1
        scheduler.step()
        if dist.get_rank() == 0:
            import os
            if not os.path.exists(cfg.output):
                os.makedirs(cfg.output)
            torch.save(backbone.module.state_dict(),
                       os.path.join(cfg.output,
                                    str(epoch) + 'backbone.pth'))
    dist.destroy_process_group()
Example #2
0
def main(args):
    dist.init_process_group(backend='nccl', init_method='env://')
    local_rank = args.local_rank
    torch.cuda.set_device(local_rank)
    rank = dist.get_rank()
    world_size = dist.get_world_size()

    if not os.path.exists(cfg.output) and rank is 0:
        os.makedirs(cfg.output)
    else:
        time.sleep(2)

    log_root = logging.getLogger()
    init_logging(log_root, rank, cfg.output)
    trainset = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        trainset, shuffle=True)
    train_loader = DataLoaderX(local_rank=local_rank,
                               dataset=trainset,
                               batch_size=cfg.batch_size,
                               sampler=train_sampler,
                               num_workers=0,
                               pin_memory=True,
                               drop_last=True)

    dropout = 0.4 if cfg.dataset is "webface" else 0
    backbone = eval("backbones.{}".format(args.network))(
        False, dropout=dropout, fp16=cfg.fp16).to(local_rank)

    if args.resume:
        try:
            backbone_pth = os.path.join(cfg.output, "backbone.pth")
            backbone.load_state_dict(
                torch.load(backbone_pth,
                           map_location=torch.device(local_rank)))
            if rank is 0:
                logging.info("backbone resume successfully!")
        except (FileNotFoundError, KeyError, IndexError, RuntimeError):
            logging.info("resume fail, backbone init successfully!")

    for ps in backbone.parameters():
        dist.broadcast(ps, 0)
    backbone = torch.nn.parallel.DistributedDataParallel(
        module=backbone, broadcast_buffers=False, device_ids=[local_rank])
    backbone.train()

    margin_softmax = eval("losses.{}".format(args.loss))()
    module_partial_fc = PartialFC(rank=rank,
                                  local_rank=local_rank,
                                  world_size=world_size,
                                  resume=args.resume,
                                  batch_size=cfg.batch_size,
                                  margin_softmax=margin_softmax,
                                  num_classes=cfg.num_classes,
                                  sample_rate=cfg.sample_rate,
                                  embedding_size=cfg.embedding_size,
                                  prefix=cfg.output)

    opt_backbone = torch.optim.SGD(params=[{
        'params': backbone.parameters()
    }],
                                   lr=cfg.lr / 512 * cfg.batch_size *
                                   world_size,
                                   momentum=0.9,
                                   weight_decay=cfg.weight_decay)
    opt_pfc = torch.optim.SGD(params=[{
        'params': module_partial_fc.parameters()
    }],
                              lr=cfg.lr / 512 * cfg.batch_size * world_size,
                              momentum=0.9,
                              weight_decay=cfg.weight_decay)

    scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
        optimizer=opt_backbone, lr_lambda=cfg.lr_func)
    scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(optimizer=opt_pfc,
                                                      lr_lambda=cfg.lr_func)

    start_epoch = 0
    total_step = int(
        len(trainset) / cfg.batch_size / world_size * cfg.num_epoch)
    if rank is 0: logging.info("Total Step is: %d" % total_step)

    callback_verification = CallBackVerification(2000, rank, cfg.val_targets,
                                                 cfg.rec)
    callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size,
                                       world_size, None)
    callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)

    loss = AverageMeter()
    global_step = 0
    grad_scaler = MaxClipGradScaler(
        cfg.batch_size, 128 *
        cfg.batch_size, growth_interval=100) if cfg.fp16 else None
    for epoch in range(start_epoch, cfg.num_epoch):
        train_sampler.set_epoch(epoch)
        for step, (img, label) in enumerate(train_loader):
            global_step += 1
            features = F.normalize(backbone(img))
            x_grad, loss_v = module_partial_fc.forward_backward(
                label, features, opt_pfc)
            if cfg.fp16:
                features.backward(grad_scaler.scale(x_grad))
                grad_scaler.unscale_(opt_backbone)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                grad_scaler.step(opt_backbone)
                grad_scaler.update()
            else:
                features.backward(x_grad)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                opt_backbone.step()

            opt_pfc.step()
            module_partial_fc.update()
            opt_backbone.zero_grad()
            opt_pfc.zero_grad()
            loss.update(loss_v, 1)
            callback_logging(global_step, loss, epoch, cfg.fp16, grad_scaler)
            callback_verification(global_step, backbone)
        callback_checkpoint(global_step, backbone, module_partial_fc)
        scheduler_backbone.step()
        scheduler_pfc.step()
    dist.destroy_process_group()
Example #3
0
def main(args):
    cfg = get_config(args.config)
    if not cfg.tf32:
        torch.backends.cuda.matmul.allow_tf32 = False
        torch.backends.cudnn.allow_tf32 = False
    try:
        world_size = int(os.environ['WORLD_SIZE'])
        rank = int(os.environ['RANK'])
        dist_url = "tcp://{}:{}".format(os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"])
    except KeyError:
        world_size = 1
        rank = 0
        dist_url = "tcp://127.0.0.1:12584"

    dist.init_process_group(backend='nccl', init_method=dist_url, rank=rank, world_size=world_size)
    local_rank = args.local_rank
    torch.cuda.set_device(local_rank)

    if not os.path.exists(cfg.output) and rank==0:
        os.makedirs(cfg.output)
    else:
        time.sleep(2)

    log_root = logging.getLogger()
    init_logging(log_root, rank, cfg.output)
    if rank==0:
        logging.info(args)
        logging.info(cfg)

    train_set = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        train_set, shuffle=True)
    train_loader = DataLoaderX(
        local_rank=local_rank, dataset=train_set, batch_size=cfg.batch_size,
        sampler=train_sampler, num_workers=2, pin_memory=True, drop_last=True)

    dropout = 0.4 if cfg.dataset == "webface" else 0
    backbone = get_model(cfg.network, dropout=dropout, fp16=cfg.fp16).to(local_rank)
    backbone_onnx = get_model(cfg.network, dropout=dropout, fp16=False)

    if args.resume:
        try:
            backbone_pth = os.path.join(cfg.output, "backbone.pth")
            backbone.load_state_dict(torch.load(backbone_pth, map_location=torch.device(local_rank)))
            if rank==0:
                logging.info("backbone resume successfully!")
        except (FileNotFoundError, KeyError, IndexError, RuntimeError):
            logging.info("resume fail, backbone init successfully!")

    for ps in backbone.parameters():
        dist.broadcast(ps, 0)
    backbone = torch.nn.parallel.DistributedDataParallel(
        module=backbone, broadcast_buffers=False, device_ids=[local_rank])
    backbone.train()

    cfg_vpl = cfg.vpl
    vpl_momentum = cfg_vpl['momentum']
    if vpl_momentum:
        backbone_w = get_model(cfg.network, dropout=dropout, fp16=cfg.fp16).to(local_rank)
        backbone_w.train()
        for param_b, param_w in zip(backbone.module.parameters(), backbone_w.parameters()):
            param_w.data.copy_(param_b.data)
            param_w.requires_grad = False

    margin_softmax = losses.get_loss(cfg.loss)
    module_fc = VPL(
        rank=rank, local_rank=local_rank, world_size=world_size, resume=args.resume,
        batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes,
        sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output,
        cfg = cfg_vpl)
    #print('AAA')

    opt_backbone = torch.optim.SGD(
        params=[{'params': backbone.parameters()}],
        lr=cfg.lr / 512 * cfg.batch_size * world_size,
        momentum=0.9, weight_decay=cfg.weight_decay)
    opt_pfc = torch.optim.SGD(
        params=[{'params': module_fc.parameters()}],
        lr=cfg.lr / 512 * cfg.batch_size * world_size,
        momentum=0.9, weight_decay=cfg.weight_decay)

    #print('AAA')
    scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
        optimizer=opt_backbone, lr_lambda=cfg.lr_func)
    scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(
        optimizer=opt_pfc, lr_lambda=cfg.lr_func)

    start_epoch = 0
    total_step = int(len(train_set) / cfg.batch_size / world_size * cfg.num_epoch)
    if rank==0: logging.info("Total Step is: %d" % total_step)

    #for epoch in range(start_epoch, cfg.num_epoch):
    #    _lr = cfg.lr_func(epoch)
    #    logging.info('%d:%f'%(epoch, _lr))

    callback_verification = CallBackVerification(10000, rank, cfg.val_targets, cfg.rec)
    callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, None)
    callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)

    loss = AverageMeter()
    global_step = 0
    grad_amp = MaxClipGradScaler(cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None
    use_batch_shuffle = True
    alpha = 0.999
    for epoch in range(start_epoch, cfg.num_epoch):
        train_sampler.set_epoch(epoch)
        for step, (img, label) in enumerate(train_loader):
            global_step += 1
            #img = img.to(memory_format=torch.channels_last)
            features = F.normalize(backbone(img))
            feature_w = None
            if vpl_momentum:
                with torch.no_grad():
                    for param_b, param_w in zip(backbone.module.parameters(), backbone_w.parameters()):
                        param_w.data = param_w.data * alpha + param_b.data * (1. - alpha)
                    if use_batch_shuffle:
                        img_w, idx_unshuffle = batch_shuffle_ddp(img, rank, world_size)

                    feature_w = F.normalize(backbone_w(img_w))
                    if use_batch_shuffle:
                        feature_w = batch_unshuffle_ddp(feature_w, idx_unshuffle, rank, world_size)
                    feature_w = feature_w.detach()

            x_grad, loss_v = module_fc.forward_backward(label, features, opt_pfc, feature_w)
            if cfg.fp16:
                features.backward(grad_amp.scale(x_grad))
                grad_amp.unscale_(opt_backbone)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                grad_amp.step(opt_backbone)
                grad_amp.update()
            else:
                features.backward(x_grad)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                opt_backbone.step()

            opt_pfc.step()
            module_fc.update()
            opt_backbone.zero_grad()
            opt_pfc.zero_grad()
            loss.update(loss_v, 1)
            callback_logging(global_step, loss, epoch, cfg.fp16, grad_amp)
            callback_verification(global_step, backbone)
        callback_checkpoint(global_step, backbone, module_fc, backbone_onnx)
        scheduler_backbone.step()
        scheduler_pfc.step()
    dist.destroy_process_group()
Example #4
0
import torch
import torch.nn as nn
import torch.optim as optim
from dataset import Data
from dataset import DataLoaderX
from config import DefaultConfig
from student_model import student_model
from run import run_epoch

opt = DefaultConfig()

if __name__ == '__main__':
    train_dataset = Data(train=True)
    test_dataset = Data(train=False)
    train_loader = DataLoaderX(train_dataset, batch_size=opt.batch_size, num_workers=4, pin_memory=True, shuffle=True)
    test_loader = DataLoaderX(test_dataset, batch_size=opt.batch_size, num_workers=4, pin_memory=True)
    num_skills = train_dataset.max_skill_num + 1

    m = student_model(num_skills=num_skills, state_size=opt.state_size,
                      num_heads=opt.num_heads, dropout=opt.dropout, infer=False)

    torch.backends.cudnn.benchmark = True
    best_auc = 0
    optimizer = optim.Adam(m.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1000, gamma=opt.lr_decay)
    criterion = nn.BCELoss()
    for epoch in range(opt.max_epoch):
        rmse, auc, r2, acc = run_epoch(m, train_loader, optimizer, scheduler, criterion,
                                           num_skills=num_skills, epoch_id=epoch, is_training=True)
        print('Epoch %d:\nTrain metrics: auc: %.3f, acc: %.3f, rmse: %.3f, r2: %.3f' \
              % (epoch + 1, auc, acc, rmse, r2))
Example #5
0
def main(path_to_train, path_to_test, sep=','):
    #sad se u dataset mogu slati datframe ili csv

    train_dataset = Data(path_to_csv=path_to_train,
                         train=True,
                         standard_load=False,
                         sep=sep)
    test_dataset = Data(path_to_csv=path_to_test,
                        train=False,
                        standard_load=False,
                        sep=sep)
    train_loader = DataLoaderX(train_dataset,
                               batch_size=opt.batch_size,
                               num_workers=4,
                               pin_memory=True,
                               shuffle=True)
    test_loader = DataLoaderX(test_dataset,
                              batch_size=opt.batch_size,
                              num_workers=4,
                              pin_memory=True)
    num_skills = train_dataset.skill_num

    m = student_model(num_skills=num_skills,
                      state_size=opt.state_size,
                      num_heads=opt.num_heads,
                      dropout=opt.dropout,
                      infer=False)

    torch.backends.cudnn.benchmark = True
    best_auc = 0
    optimizer = optim.Adam(m.parameters(),
                           lr=opt.lr,
                           weight_decay=opt.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=1000,
                                          gamma=opt.lr_decay)
    criterion = nn.BCELoss()
    for epoch in range(opt.max_epoch):
        rmse, auc, r2, acc = run_epoch(m,
                                       train_loader,
                                       optimizer,
                                       scheduler,
                                       criterion,
                                       num_skills=num_skills,
                                       epoch_id=epoch,
                                       is_training=True)
        print('Epoch %d:\nTrain metrics: auc: %.3f, acc: %.3f, rmse: %.3f, r2: %.3f' \
              % (epoch + 1, auc, acc, rmse, r2))
        rmse, auc, r2, acc = run_epoch(m,
                                       test_loader,
                                       optimizer,
                                       scheduler,
                                       criterion,
                                       num_skills=num_skills,
                                       epoch_id=epoch,
                                       is_training=False)
        print('\nTest metrics: auc: %.3f, acc: %.3f, rmse: %.3f, r2: %.3f' \
              % (auc, acc, rmse ,r2))

        if auc > best_auc:
            best_auc = auc
            torch.save(
                m.state_dict(),
                'models/sakt_model_auc_{}.pkl'.format(int(best_auc * 1000)))
Example #6
0
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
    cfg = get_config(args.config)

    try:
        world_size = int(os.environ['WORLD_SIZE'])
        rank = int(os.environ['RANK'])
        dist.init_process_group('nccl')
    except KeyError:
        world_size = 1
        rank = 0
        dist.init_process_group(backend='nccl',
                                init_method="tcp://127.0.0.1:12584",
                                rank=rank,
                                world_size=world_size)

    local_rank = args.local_rank
    torch.cuda.set_device(local_rank)
    os.makedirs(cfg.output, exist_ok=True)
    init_logging(rank, cfg.output)

    if cfg.rec == "synthetic":
        train_set = SyntheticDataset(local_rank=local_rank)
    else:
        train_set = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)

    train_sampler = torch.utils.data.distributed.DistributedSampler(
        train_set, shuffle=True)
    train_loader = DataLoaderX(local_rank=local_rank,
                               dataset=train_set,
                               batch_size=cfg.batch_size,
                               sampler=train_sampler,
                               num_workers=2,
                               pin_memory=True,
                               drop_last=True)
    backbone = get_model(cfg.network,
                         dropout=0.0,
                         fp16=cfg.fp16,
                         num_features=cfg.embedding_size).to(local_rank)
    summary(backbone, input_size=(3, 112, 112))
    exit()

    if cfg.resume:
        try:
            backbone_pth = os.path.join(cfg.output, "backbone.pth")
            backbone.load_state_dict(
                torch.load(backbone_pth,
                           map_location=torch.device(local_rank)))
            if rank == 0:
                logging.info("backbone resume successfully!")
        except (FileNotFoundError, KeyError, IndexError, RuntimeError):
            if rank == 0:
                logging.info("resume fail, backbone init successfully!")

    backbone = torch.nn.parallel.DistributedDataParallel(
        module=backbone, broadcast_buffers=False, device_ids=[local_rank])
    backbone.train()
    if cfg.loss == 'magface':
        margin_softmax = losses.get_loss(cfg.loss, lambda_g=cfg.lambda_g)
    elif cfg.loss == 'mag_cosface':
        margin_softmax = losses.get_loss(cfg.loss)
    else:
        margin_softmax = losses.get_loss(cfg.loss,
                                         s=cfg.s,
                                         m1=cfg.m1,
                                         m2=cfg.m2,
                                         m3=cfg.m3)
    module_partial_fc = PartialFC(rank=rank,
                                  local_rank=local_rank,
                                  world_size=world_size,
                                  resume=cfg.resume,
                                  batch_size=cfg.batch_size,
                                  margin_softmax=margin_softmax,
                                  num_classes=cfg.num_classes,
                                  sample_rate=cfg.sample_rate,
                                  embedding_size=cfg.embedding_size,
                                  prefix=cfg.output)

    opt_backbone = torch.optim.SGD(params=[{
        'params': backbone.parameters()
    }],
                                   lr=cfg.lr / 512 * cfg.batch_size *
                                   world_size,
                                   momentum=0.9,
                                   weight_decay=cfg.weight_decay)
    opt_pfc = torch.optim.SGD(params=[{
        'params': module_partial_fc.parameters()
    }],
                              lr=cfg.lr / 512 * cfg.batch_size * world_size,
                              momentum=0.9,
                              weight_decay=cfg.weight_decay)

    num_image = len(train_set)
    total_batch_size = cfg.batch_size * world_size
    cfg.warmup_step = num_image // total_batch_size * cfg.warmup_epoch
    cfg.total_step = num_image // total_batch_size * cfg.num_epoch

    def lr_step_func(current_step):
        cfg.decay_step = [
            x * num_image // total_batch_size for x in cfg.decay_epoch
        ]
        if current_step < cfg.warmup_step:
            return current_step / cfg.warmup_step
        else:
            return 0.1**len([m for m in cfg.decay_step if m <= current_step])

    scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
        optimizer=opt_backbone, lr_lambda=lr_step_func)
    scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(optimizer=opt_pfc,
                                                      lr_lambda=lr_step_func)

    for key, value in cfg.items():
        num_space = 25 - len(key)
        logging.info(": " + key + " " * num_space + str(value))

    val_target = cfg.val_targets
    callback_verification = CallBackVerification(2000, rank, val_target,
                                                 cfg.rec)
    callback_logging = CallBackLogging(50, rank, cfg.total_step,
                                       cfg.batch_size, world_size, None)
    callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)

    loss = AverageMeter()
    start_epoch = 0
    global_step = 0
    grad_amp = MaxClipGradScaler(
        cfg.batch_size, 128 *
        cfg.batch_size, growth_interval=100) if cfg.fp16 else None
    for epoch in range(start_epoch, cfg.num_epoch):
        train_sampler.set_epoch(epoch)
        for step, (img, label) in enumerate(train_loader):
            global_step += 1
            x = backbone(img)
            features = F.normalize(x)
            x_grad, loss_v = module_partial_fc.forward_backward(
                label, features, opt_pfc, x)
            if cfg.fp16:
                features.backward(grad_amp.scale(x_grad))
                grad_amp.unscale_(opt_backbone)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                grad_amp.step(opt_backbone)
                grad_amp.update()
            else:
                features.backward(x_grad)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                opt_backbone.step()

            opt_pfc.step()
            module_partial_fc.update()
            opt_backbone.zero_grad()
            opt_pfc.zero_grad()
            loss.update(loss_v, 1)
            callback_logging(global_step, loss, epoch, cfg.fp16,
                             scheduler_backbone.get_last_lr()[0], grad_amp)
            callback_verification(global_step, backbone)
            scheduler_backbone.step()
            scheduler_pfc.step()
        callback_checkpoint(global_step, backbone, module_partial_fc)

    callback_verification('last', backbone)
    dist.destroy_process_group()
Example #7
0
def main(local_rank, world_size, init_method='tcp://127.0.0.1:23499'):
    dist.init_process_group(backend='nccl',
                            init_method=init_method,
                            rank=local_rank,
                            world_size=world_size)
    cfg.local_rank = local_rank
    torch.cuda.set_device(local_rank)
    cfg.rank = dist.get_rank()
    cfg.world_size = world_size
    print(cfg.rank, dist.get_world_size())
    trainset = MXFaceDataset(root_dir='/root/face_datasets/webface/',
                             local_rank=local_rank)
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        trainset, shuffle=True)
    trainloader = DataLoaderX(local_rank=local_rank,
                              dataset=trainset,
                              batch_size=cfg.batch_size,
                              sampler=train_sampler,
                              num_workers=0,
                              pin_memory=True,
                              drop_last=False)
    backbone = iresnet50(False).to(cfg.local_rank)
    backbone.train()
    # backbone = nn.SyncBatchNorm.convert_sync_batchnorm(backbone)
    for ps in backbone.parameters():
        dist.broadcast(ps, 0)

    backbone = torch.nn.parallel.DistributedDataParallel(
        backbone, broadcast_buffers=False, device_ids=[dist.get_rank()])
    backbone.train()
    sub_start, sub_classnum = get_sub_class(cfg.rank, dist.get_world_size())
    print(sub_start, sub_classnum)
    classifier_head = classifier(cfg.embedding_size,
                                 sub_classnum,
                                 sample_rate=0.4)
    cosface = CosFace(s=64.0, m=0.4)
    optimizer = SGD([{
        'params': backbone.parameters()
    }, {
        'params': classifier_head.parameters()
    }],
                    0.1,
                    momentum=0.9,
                    weight_decay=cfg.weight_decay,
                    rescale=cfg.world_size)
    warm_up_with_multistep_lr = lambda epoch: (
        (epoch + 1) / (4 + 1))**2 if epoch < -1 else 0.1**len(
            [m for m in [20, 29] if m - 1 <= epoch])
    scheduler = torch.optim.lr_scheduler.LambdaLR(
        optimizer, lr_lambda=warm_up_with_multistep_lr)
    n_epochs = 33
    start_epoch = 0

    if cfg.local_rank == 0:
        writer = SummaryWriter(log_dir='logs/shows')
    global_step = 0
    loss_fun = nn.CrossEntropyLoss()
    for epoch in range(start_epoch, n_epochs):
        train_sampler.set_epoch(epoch)
        for step, (img, label) in enumerate(trainloader):
            start = time.time()
            lable_gather, norm_weight = classifier_head.prepare(
                label, optimizer)
            x = F.normalize(backbone(img))
            x_gather = torch.zeros(x.size()[0] * cfg.world_size,
                                   cfg.embedding_size,
                                   device=cfg.local_rank)
            dist.all_gather(list(x_gather.chunk(cfg.world_size, dim=0)),
                            x.data)
            x_gather.requires_grad = True

            logits = classifier_head(x_gather, norm_weight)

            logits = cosface(logits, lable_gather)

            with torch.no_grad():
                max_v = torch.max(logits, dim=1, keepdim=True)[0]
                dist.all_reduce(max_v, dist.ReduceOp.MAX)
                exp = torch.exp(logits - max_v)
                sum_exp = exp.sum(dim=1, keepdims=True)
                dist.all_reduce(sum_exp, dist.ReduceOp.SUM)
                exp.div_(sum_exp.clamp_min(1e-20))
                grad = exp
                index = torch.where(lable_gather != -1)[0]
                one_hot = torch.zeros(index.size()[0],
                                      grad.size()[1],
                                      device=grad.device)
                one_hot.scatter_(1, lable_gather[index, None], 1)

                loss = torch.zeros(grad.size()[0], 1, device=grad.device)
                loss[index] = grad[index].gather(1, lable_gather[index, None])
                dist.all_reduce(loss, dist.ReduceOp.SUM)
                loss_v = loss.clamp_min_(1e-20).log_().mean() * (-1)

                grad[index] -= one_hot
                grad.div_(grad.size()[0])

            logits.backward(grad)
            if x_gather.grad is not None:
                x_gather.grad.detach_()
            x_grad = torch.zeros_like(x)
            dist.reduce_scatter(
                x_grad, list(x_gather.grad.chunk(cfg.world_size, dim=0)))
            x.backward(x_grad)
            optimizer.step()
            classifier_head.update()
            optimizer.zero_grad()
            if cfg.rank == 0:
                print(x_gather.grad.max(), x_gather.grad.min())
                print('loss_v', loss_v.item(), global_step)
                writer.add_scalar('loss', loss_v, global_step)
                print('lr',
                      optimizer.state_dict()['param_groups'][0]['lr'],
                      global_step)
                print(cfg.batch_size / (time.time() - start))

            global_step += 1
        scheduler.step()
        if cfg.rank == 0:
            torch.save(backbone.module.state_dict(),
                       "models/" + str(epoch) + 'backbone.pth')
    dist.destroy_process_group()