Ejemplo n.º 1
0
    def __call__(self, global_step, loss: AverageMeter, epoch: int, fp16: bool,
                 grad_scaler: torch.cuda.amp.GradScaler):
        if self.rank is 0 and global_step > 0 and global_step % self.frequent == 0:
            if self.init:
                try:
                    speed: float = self.frequent * self.batch_size / (
                        time.time() - self.tic)
                    speed_total = speed * self.world_size
                except ZeroDivisionError:
                    speed_total = float('inf')

                time_now = (time.time() - self.time_start) / 3600
                time_total = time_now / ((global_step + 1) / self.total_step)
                time_for_end = time_total - time_now
                if self.writer is not None:
                    self.writer.add_scalar('time_for_end', time_for_end,
                                           global_step)
                    self.writer.add_scalar('loss', loss.avg, global_step)
                if fp16:
                    msg = "Speed %.2f samples/sec   Loss %.4f   Epoch: %d   Global Step: %d   "\
                          "Fp16 Grad Scale: %2.f   Required: %1.f hours" % (
                        speed_total, loss.avg, epoch, global_step, grad_scaler.get_scale(), time_for_end
                    )
                else:
                    msg = "Speed %.2f samples/sec   Loss %.4f   Epoch: %d   Global Step: %d   Required: %1.f hours" % (
                        speed_total, loss.avg, epoch, global_step,
                        time_for_end)
                logging.info(msg)
                loss.reset()
                self.tic = time.time()
            else:
                self.init = True
                self.tic = time.time()
Ejemplo n.º 2
0
    def __call__(self, global_step, loss: AverageMeter, epoch: int,
                 lr_backbone_value, lr_pfc_value):
        if self.rank is 0 and global_step > 0 and global_step % self.frequent == 0:
            if self.init:
                try:
                    speed: float = self.frequent * self.batch_size / (
                        time.time() - self.tic)
                    speed_total = speed * self.world_size
                except ZeroDivisionError:
                    speed_total = float('inf')

                time_now = (time.time() - self.time_start) / 3600
                time_total = time_now / ((global_step + 1) / self.total_step)
                time_for_end = time_total - time_now
                if self.writer is not None:
                    self.writer.add_scalar('time_for_end', time_for_end,
                                           global_step)
                    self.writer.add_scalar('loss', loss.avg, global_step)
                msg = "Speed %.2f samples/sec   Loss %.4f   Epoch: %d   Global Step: %d   Required: %1.f hours, lr_backbone_value: %f, lr_pfc_value: %f" % (
                    speed_total, loss.avg, epoch, global_step, time_for_end,
                    lr_backbone_value, lr_pfc_value)
                logging.info(msg)
                loss.reset()
                self.tic = time.time()
            else:
                self.init = True
                self.tic = time.time()
Ejemplo n.º 3
0
    def __init__(self, cfg, placement, load_path, world_size, rank):
        self.placement = placement
        self.load_path = load_path
        self.cfg = cfg
        self.world_size = world_size
        self.rank = rank

        # model
        self.backbone = get_model(cfg.network,
                                  dropout=0.0,
                                  num_features=cfg.embedding_size).to("cuda")
        self.train_module = Train_Module(cfg, self.backbone, self.placement,
                                         world_size).to("cuda")
        if cfg.resume:
            if load_path is not None:
                self.load_state_dict()
            else:
                logging.info("Model resume failed! load path is None ")

        # optimizer
        self.optimizer = make_optimizer(cfg, self.train_module)

        # data
        self.train_data_loader = make_data_loader(cfg, 'train', self.cfg.graph,
                                                  self.cfg.synthetic)

        # loss
        if cfg.loss == "cosface":
            self.margin_softmax = flow.nn.CombinedMarginLoss(1, 0.,
                                                             0.4).to("cuda")
        else:
            self.margin_softmax = flow.nn.CombinedMarginLoss(1, 0.5,
                                                             0.).to("cuda")

        self.of_cross_entropy = CrossEntropyLoss_sbp()

        # lr_scheduler
        self.decay_step = self.cal_decay_step()
        self.scheduler = flow.optim.lr_scheduler.MultiStepLR(
            optimizer=self.optimizer, milestones=self.decay_step, gamma=0.1)

        # log
        self.callback_logging = CallBackLogging(50, rank, cfg.total_step,
                                                cfg.batch_size, world_size,
                                                None)
        # val
        self.callback_verification = CallBackVerification(
            600,
            rank,
            cfg.val_targets,
            cfg.ofrecord_path,
            is_consistent=cfg.graph)
        # save checkpoint
        self.callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)

        self.losses = AverageMeter()
        self.start_epoch = 0
        self.global_step = 0
Ejemplo n.º 4
0
class CallBackLogging(object):
    def __init__(self,
                 frequent,
                 total_step,
                 batch_size,
                 world_size,
                 writer=None):
        self.frequent: int = frequent

        self.time_start = time.time()
        self.total_step: int = total_step
        self.batch_size: int = batch_size
        self.world_size: int = world_size
        self.writer = writer

        self.init = False
        self.tic = 0
        self.losses = AverageMeter()

    def metric_cb(self, global_step: int, epoch: int, learning_rate: float):
        def callback(loss):
            loss = loss.mean()
            self.losses.update(loss, 1)
            if global_step % self.frequent == 0:

                if self.init:
                    try:
                        speed: float = self.frequent * self.batch_size / (
                            time.time() - self.tic)
                        speed_total = speed * self.world_size
                    except ZeroDivisionError:
                        speed_total = float('inf')

                    time_now = (time.time() - self.time_start) / 3600
                    time_total = time_now / (
                        (global_step + 1) / self.total_step)
                    time_for_end = time_total - time_now
                    if self.writer is not None:
                        self.writer.add_scalar('time_for_end', time_for_end,
                                               global_step)
                        self.writer.add_scalar('learning_rate', learning_rate,
                                               global_step)
                        self.writer.add_scalar('loss', loss.avg, global_step)
                    else:
                        msg = "Speed %.2f samples/sec   Loss %.4f   LearningRate %.4f   Epoch: %d   Global Step: %d   " \
                              "Required: %1.f hours" % (
                                  speed_total, self.losses.avg, learning_rate, epoch, global_step, time_for_end
                              )
                    logging.info(msg)
                    self.losses.reset()
                    self.tic = time.time()
                else:
                    self.init = True
                    self.tic = time.time()

        return callback
Ejemplo n.º 5
0
    def __call__(
        self,
        global_step: int,
        loss: AverageMeter,
        epoch: int,
        fp16: bool,
        learning_rate: float,
        grad_scaler=None,
    ):
        if self.rank == 0 and global_step % self.frequent == 0:
            if self.init:
                try:
                    speed: float = self.frequent * self.batch_size / (
                        time.time() - self.tic)
                    speed_total = speed * self.world_size
                except ZeroDivisionError:
                    speed_total = float("inf")

                time_now = (time.time() - self.time_start) / 3600
                time_total = time_now / ((global_step + 1) / self.total_step)
                time_for_end = time_total - time_now
                if self.writer is not None:
                    self.writer.add_scalar("time_for_end", time_for_end,
                                           global_step)
                    self.writer.add_scalar("learning_rate", learning_rate,
                                           global_step)
                    self.writer.add_scalar("loss", loss.avg, global_step)
                if fp16:
                    msg = (
                        "Speed %.2f samples/sec   Loss %.4f   LearningRate %.4f   Epoch: %d   Global Step: %d   "
                        "Fp16 Grad Scale: %2.f   Required: %1.f hours" % (
                            speed_total,
                            loss.avg,
                            learning_rate,
                            epoch,
                            global_step,
                            time_for_end,
                        ))
                else:
                    msg = (
                        "Speed %.2f samples/sec   Loss %.4f   LearningRate %.4f   Epoch: %d   Global Step: %d   "
                        "Required: %1.f hours" % (
                            speed_total,
                            loss.avg,
                            learning_rate,
                            epoch,
                            global_step,
                            time_for_end,
                        ))
                logging.info(msg)
                loss.reset()
                self.tic = time.time()
            else:
                self.init = True
                self.tic = time.time()
Ejemplo n.º 6
0
    def __init__(self,
                 frequent,
                 total_step,
                 batch_size,
                 world_size,
                 writer=None):
        self.frequent: int = frequent

        self.time_start = time.time()
        self.total_step: int = total_step
        self.batch_size: int = batch_size
        self.world_size: int = world_size
        self.writer = writer

        self.init = False
        self.tic = 0
        self.losses = AverageMeter()
Ejemplo n.º 7
0
def main(args):
    dist.init_process_group(backend='nccl', init_method='env://')
    local_rank = args.local_rank
    torch.cuda.set_device(local_rank)
    rank = dist.get_rank()
    world_size = dist.get_world_size()

    if not os.path.exists(cfg.output) and rank is 0:
        os.makedirs(cfg.output)
    else:
        time.sleep(2)

    log_root = logging.getLogger()
    init_logging(log_root, rank, cfg.output)
    trainset = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        trainset, shuffle=True)
    train_loader = DataLoaderX(local_rank=local_rank,
                               dataset=trainset,
                               batch_size=cfg.batch_size,
                               sampler=train_sampler,
                               num_workers=0,
                               pin_memory=True,
                               drop_last=True)

    dropout = 0.4 if cfg.dataset is "webface" else 0
    backbone = eval("backbones.{}".format(args.network))(
        False, dropout=dropout, fp16=cfg.fp16).to(local_rank)

    if args.resume:
        try:
            backbone_pth = os.path.join(cfg.output, "backbone.pth")
            backbone.load_state_dict(
                torch.load(backbone_pth,
                           map_location=torch.device(local_rank)))
            if rank is 0:
                logging.info("backbone resume successfully!")
        except (FileNotFoundError, KeyError, IndexError, RuntimeError):
            logging.info("resume fail, backbone init successfully!")

    for ps in backbone.parameters():
        dist.broadcast(ps, 0)
    backbone = torch.nn.parallel.DistributedDataParallel(
        module=backbone, broadcast_buffers=False, device_ids=[local_rank])
    backbone.train()

    margin_softmax = eval("losses.{}".format(args.loss))()
    module_partial_fc = PartialFC(rank=rank,
                                  local_rank=local_rank,
                                  world_size=world_size,
                                  resume=args.resume,
                                  batch_size=cfg.batch_size,
                                  margin_softmax=margin_softmax,
                                  num_classes=cfg.num_classes,
                                  sample_rate=cfg.sample_rate,
                                  embedding_size=cfg.embedding_size,
                                  prefix=cfg.output)

    opt_backbone = torch.optim.SGD(params=[{
        'params': backbone.parameters()
    }],
                                   lr=cfg.lr / 512 * cfg.batch_size *
                                   world_size,
                                   momentum=0.9,
                                   weight_decay=cfg.weight_decay)
    opt_pfc = torch.optim.SGD(params=[{
        'params': module_partial_fc.parameters()
    }],
                              lr=cfg.lr / 512 * cfg.batch_size * world_size,
                              momentum=0.9,
                              weight_decay=cfg.weight_decay)

    scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
        optimizer=opt_backbone, lr_lambda=cfg.lr_func)
    scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(optimizer=opt_pfc,
                                                      lr_lambda=cfg.lr_func)

    start_epoch = 0
    total_step = int(
        len(trainset) / cfg.batch_size / world_size * cfg.num_epoch)
    if rank is 0: logging.info("Total Step is: %d" % total_step)

    callback_verification = CallBackVerification(2000, rank, cfg.val_targets,
                                                 cfg.rec)
    callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size,
                                       world_size, None)
    callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)

    loss = AverageMeter()
    global_step = 0
    grad_scaler = MaxClipGradScaler(
        cfg.batch_size, 128 *
        cfg.batch_size, growth_interval=100) if cfg.fp16 else None
    for epoch in range(start_epoch, cfg.num_epoch):
        train_sampler.set_epoch(epoch)
        for step, (img, label) in enumerate(train_loader):
            global_step += 1
            features = F.normalize(backbone(img))
            x_grad, loss_v = module_partial_fc.forward_backward(
                label, features, opt_pfc)
            if cfg.fp16:
                features.backward(grad_scaler.scale(x_grad))
                grad_scaler.unscale_(opt_backbone)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                grad_scaler.step(opt_backbone)
                grad_scaler.update()
            else:
                features.backward(x_grad)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                opt_backbone.step()

            opt_pfc.step()
            module_partial_fc.update()
            opt_backbone.zero_grad()
            opt_pfc.zero_grad()
            loss.update(loss_v, 1)
            callback_logging(global_step, loss, epoch, cfg.fp16, grad_scaler)
            callback_verification(global_step, backbone)
        callback_checkpoint(global_step, backbone, module_partial_fc)
        scheduler_backbone.step()
        scheduler_pfc.step()
    dist.destroy_process_group()
Ejemplo n.º 8
0
class Trainer(object):
    def __init__(self, cfg, placement, load_path, world_size, rank):
        self.placement = placement
        self.load_path = load_path
        self.cfg = cfg
        self.world_size = world_size
        self.rank = rank

        # model
        self.backbone = get_model(cfg.network,
                                  dropout=0.0,
                                  num_features=cfg.embedding_size).to("cuda")
        self.train_module = Train_Module(cfg, self.backbone, self.placement,
                                         world_size).to("cuda")
        if cfg.resume:
            if load_path is not None:
                self.load_state_dict()
            else:
                logging.info("Model resume failed! load path is None ")

        # optimizer
        self.optimizer = make_optimizer(cfg, self.train_module)

        # data
        self.train_data_loader = make_data_loader(cfg, 'train', self.cfg.graph,
                                                  self.cfg.synthetic)

        # loss
        if cfg.loss == "cosface":
            self.margin_softmax = flow.nn.CombinedMarginLoss(1, 0.,
                                                             0.4).to("cuda")
        else:
            self.margin_softmax = flow.nn.CombinedMarginLoss(1, 0.5,
                                                             0.).to("cuda")

        self.of_cross_entropy = CrossEntropyLoss_sbp()

        # lr_scheduler
        self.decay_step = self.cal_decay_step()
        self.scheduler = flow.optim.lr_scheduler.MultiStepLR(
            optimizer=self.optimizer, milestones=self.decay_step, gamma=0.1)

        # log
        self.callback_logging = CallBackLogging(50, rank, cfg.total_step,
                                                cfg.batch_size, world_size,
                                                None)
        # val
        self.callback_verification = CallBackVerification(
            600,
            rank,
            cfg.val_targets,
            cfg.ofrecord_path,
            is_consistent=cfg.graph)
        # save checkpoint
        self.callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)

        self.losses = AverageMeter()
        self.start_epoch = 0
        self.global_step = 0

    def __call__(self):
        # Train
        if self.cfg.graph:
            self.train_graph()
        else:
            self.train_eager()

    def load_state_dict(self):

        if self.is_consistent:
            state_dict = flow.load(self.load_path, consistent_src_rank=0)
        elif self.rank == 0:
            state_dict = flow.load(self.load_path)
        else:
            return
        logging.info("Model resume successfully!")
        self.model.load_state_dict(state_dict)

    def cal_decay_step(self):
        cfg = self.cfg
        num_image = cfg.num_image
        total_batch_size = cfg.batch_size * self.world_size
        self.warmup_step = num_image // total_batch_size * cfg.warmup_epoch
        self.cfg.total_step = num_image // total_batch_size * cfg.num_epoch
        logging.info("Total Step is:%d" % self.cfg.total_step)
        return [x * num_image // total_batch_size for x in cfg.decay_epoch]

    def train_graph(self):
        train_graph = TrainGraph(self.train_module, self.cfg,
                                 self.margin_softmax, self.of_cross_entropy,
                                 self.train_data_loader, self.optimizer,
                                 self.scheduler)
        # train_graph.debug()
        val_graph = EvalGraph(self.backbone, self.cfg)

        for epoch in range(self.start_epoch, self.cfg.num_epoch):
            self.train_module.train()
            one_epoch_steps = len(self.train_data_loader)
            for steps in range(one_epoch_steps):
                self.global_step += 1
                loss = train_graph()
                loss = loss.to_consistent(
                    sbp=flow.sbp.broadcast).to_local().numpy()
                self.losses.update(loss, 1)
                self.callback_logging(self.global_step, self.losses, epoch,
                                      False,
                                      self.scheduler.get_last_lr()[0])
                self.callback_verification(self.global_step, self.train_module,
                                           val_graph)
            self.callback_checkpoint(self.global_step,
                                     epoch,
                                     self.train_module,
                                     is_consistent=True)

    def train_eager(self):
        self.train_module = ddp(self.train_module)
        for epoch in range(self.start_epoch, self.cfg.num_epoch):
            self.train_module.train()

            one_epoch_steps = len(self.train_data_loader)
            for steps in range(one_epoch_steps):
                self.global_step += 1
                image, label = self.train_data_loader()
                image = image.to("cuda")
                label = label.to("cuda")
                features_fc7 = self.train_module(image, label)
                features_fc7 = self.margin_softmax(features_fc7, label) * 64
                loss = self.of_cross_entropy(features_fc7, label)
                loss.backward()
                self.optimizer.step()
                self.optimizer.zero_grad()

                loss = loss.numpy()
                self.losses.update(loss, 1)
                self.callback_logging(self.global_step, self.losses, epoch,
                                      False,
                                      self.scheduler.get_last_lr()[0])
                self.callback_verification(self.global_step, self.backbone)
                self.scheduler.step()
            self.callback_checkpoint(self.global_step, epoch,
                                     self.train_module)
Ejemplo n.º 9
0
def main(args):
    # dist
    world_size = int(os.environ['WORLD_SIZE'])
    local_rank = args.local_rank
    rank = int(os.environ['RANK'])
    dist_url = "tcp://{}:{}".format(os.environ["MASTER_ADDR"],
                                    os.environ["MASTER_PORT"])
    dist.init_process_group(backend='nccl',
                            init_method=dist_url,
                            rank=rank,
                            world_size=world_size)
    torch.cuda.set_device(local_rank)

    # logging
    if not os.path.exists(cfg.output) and rank is 0:
        os.makedirs(cfg.output)
    else:
        time.sleep(2)
    log_root = logging.getLogger()
    init_logging(log_root, rank, cfg.output)

    # data
    trainset = MXFaceDataset(root_dir=cfg.rec)
    train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
    train_loader = DataLoader(trainset,
                              cfg.batch_size,
                              shuffle=False,
                              num_workers=8,
                              pin_memory=True,
                              sampler=train_sampler,
                              drop_last=True)

    # backbone and DDP
    f_ = open(args.pruned_info)
    cfg_ = [int(x) for x in f_.read().split()]
    f_.close()
    backbone = backbones.__dict__[args.network](dropout=cfg.dropout,
                                                fp16=cfg.fp16,
                                                cfg=cfg_)
    if args.resume:
        try:
            backbone_pth = os.path.join(cfg.output, "backbone.pth")
            backbone.load_state_dict(
                torch.load(backbone_pth,
                           map_location=torch.device(local_rank)))
            if rank is 0:
                logging.info("backbone resume successfully!")
        except (FileNotFoundError, KeyError, IndexError, RuntimeError):
            logging.info("resume fail, backbone init successfully!")
    backbone = backbone.cuda()
    backbone = torch.nn.SyncBatchNorm.convert_sync_batchnorm(backbone)
    backbone = DDP(module=backbone, device_ids=[local_rank])

    # fc and loss
    margin_softmax = losses.__dict__[args.loss]()
    module_partial_fc = PartialFC(rank=rank,
                                  local_rank=local_rank,
                                  world_size=world_size,
                                  resume=args.resume,
                                  batch_size=cfg.batch_size,
                                  margin_softmax=margin_softmax,
                                  num_classes=cfg.num_classes,
                                  sample_rate=cfg.sample_rate,
                                  embedding_size=cfg.embedding_size,
                                  prefix=cfg.output)

    # optimizer
    opt_backbone = torch.optim.SGD(params=[{
        'params': backbone.parameters()
    }],
                                   lr=cfg.lr / 512 * cfg.batch_size *
                                   world_size,
                                   momentum=0.9,
                                   weight_decay=cfg.weight_decay)
    opt_pfc = torch.optim.SGD(params=[{
        'params': module_partial_fc.parameters()
    }],
                              lr=cfg.lr / 512 * cfg.batch_size * world_size,
                              momentum=0.9,
                              weight_decay=cfg.weight_decay)
    scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
        optimizer=opt_backbone, lr_lambda=cfg.lr_func)
    scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(optimizer=opt_pfc,
                                                      lr_lambda=cfg.lr_func)

    # train and valid
    start_epoch = 0
    total_step = int(
        len(trainset) / cfg.batch_size / world_size * cfg.num_epoch)
    if rank is 0: logging.info("Total Step is: %d" % total_step)

    callback_verification = CallBackVerification(2000, rank, cfg.val_targets,
                                                 cfg.rec)
    callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size,
                                       world_size, None)
    callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)

    loss = AverageMeter()
    global_step = 0
    grad_scaler = MaxClipGradScaler(
        cfg.batch_size, 128 *
        cfg.batch_size, growth_interval=100) if cfg.fp16 else None
    for epoch in range(start_epoch, cfg.num_epoch):
        train_sampler.set_epoch(epoch)
        for step, (img, label) in enumerate(train_loader):
            img = img.cuda()
            label = label.cuda()
            global_step += 1
            features = F.normalize(backbone(img))

            x_grad, loss_v = module_partial_fc.forward_backward(
                label, features, opt_pfc)
            if cfg.fp16:
                features.backward(grad_scaler.scale(x_grad))
                grad_scaler.unscale_(opt_backbone)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                grad_scaler.step(opt_backbone)
                grad_scaler.update()
            else:
                features.backward(x_grad)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                opt_backbone.step()

            opt_pfc.step()
            module_partial_fc.update()
            opt_backbone.zero_grad()
            opt_pfc.zero_grad()
            loss.update(loss_v, 1)

            lr = opt_backbone.state_dict()['param_groups'][0]['lr']
            callback_logging(global_step, loss, epoch, cfg.fp16, grad_scaler,
                             lr)
            callback_verification(global_step, backbone)
        callback_checkpoint(global_step, backbone, module_partial_fc)

        scheduler_backbone.step()
        scheduler_pfc.step()

    # release dist
    dist.destroy_process_group()
Ejemplo n.º 10
0
def main(args):
    cfg = get_config(args.config)
    if not cfg.tf32:
        torch.backends.cuda.matmul.allow_tf32 = False
        torch.backends.cudnn.allow_tf32 = False
    try:
        world_size = int(os.environ['WORLD_SIZE'])
        rank = int(os.environ['RANK'])
        dist_url = "tcp://{}:{}".format(os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"])
    except KeyError:
        world_size = 1
        rank = 0
        dist_url = "tcp://127.0.0.1:12584"

    dist.init_process_group(backend='nccl', init_method=dist_url, rank=rank, world_size=world_size)
    local_rank = args.local_rank
    torch.cuda.set_device(local_rank)

    if not os.path.exists(cfg.output) and rank==0:
        os.makedirs(cfg.output)
    else:
        time.sleep(2)

    log_root = logging.getLogger()
    init_logging(log_root, rank, cfg.output)
    if rank==0:
        logging.info(args)
        logging.info(cfg)

    train_set = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        train_set, shuffle=True)
    train_loader = DataLoaderX(
        local_rank=local_rank, dataset=train_set, batch_size=cfg.batch_size,
        sampler=train_sampler, num_workers=2, pin_memory=True, drop_last=True)

    dropout = 0.4 if cfg.dataset == "webface" else 0
    backbone = get_model(cfg.network, dropout=dropout, fp16=cfg.fp16).to(local_rank)
    backbone_onnx = get_model(cfg.network, dropout=dropout, fp16=False)

    if args.resume:
        try:
            backbone_pth = os.path.join(cfg.output, "backbone.pth")
            backbone.load_state_dict(torch.load(backbone_pth, map_location=torch.device(local_rank)))
            if rank==0:
                logging.info("backbone resume successfully!")
        except (FileNotFoundError, KeyError, IndexError, RuntimeError):
            logging.info("resume fail, backbone init successfully!")

    for ps in backbone.parameters():
        dist.broadcast(ps, 0)
    backbone = torch.nn.parallel.DistributedDataParallel(
        module=backbone, broadcast_buffers=False, device_ids=[local_rank])
    backbone.train()

    cfg_vpl = cfg.vpl
    vpl_momentum = cfg_vpl['momentum']
    if vpl_momentum:
        backbone_w = get_model(cfg.network, dropout=dropout, fp16=cfg.fp16).to(local_rank)
        backbone_w.train()
        for param_b, param_w in zip(backbone.module.parameters(), backbone_w.parameters()):
            param_w.data.copy_(param_b.data)
            param_w.requires_grad = False

    margin_softmax = losses.get_loss(cfg.loss)
    module_fc = VPL(
        rank=rank, local_rank=local_rank, world_size=world_size, resume=args.resume,
        batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes,
        sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output,
        cfg = cfg_vpl)
    #print('AAA')

    opt_backbone = torch.optim.SGD(
        params=[{'params': backbone.parameters()}],
        lr=cfg.lr / 512 * cfg.batch_size * world_size,
        momentum=0.9, weight_decay=cfg.weight_decay)
    opt_pfc = torch.optim.SGD(
        params=[{'params': module_fc.parameters()}],
        lr=cfg.lr / 512 * cfg.batch_size * world_size,
        momentum=0.9, weight_decay=cfg.weight_decay)

    #print('AAA')
    scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
        optimizer=opt_backbone, lr_lambda=cfg.lr_func)
    scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(
        optimizer=opt_pfc, lr_lambda=cfg.lr_func)

    start_epoch = 0
    total_step = int(len(train_set) / cfg.batch_size / world_size * cfg.num_epoch)
    if rank==0: logging.info("Total Step is: %d" % total_step)

    #for epoch in range(start_epoch, cfg.num_epoch):
    #    _lr = cfg.lr_func(epoch)
    #    logging.info('%d:%f'%(epoch, _lr))

    callback_verification = CallBackVerification(10000, rank, cfg.val_targets, cfg.rec)
    callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, None)
    callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)

    loss = AverageMeter()
    global_step = 0
    grad_amp = MaxClipGradScaler(cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None
    use_batch_shuffle = True
    alpha = 0.999
    for epoch in range(start_epoch, cfg.num_epoch):
        train_sampler.set_epoch(epoch)
        for step, (img, label) in enumerate(train_loader):
            global_step += 1
            #img = img.to(memory_format=torch.channels_last)
            features = F.normalize(backbone(img))
            feature_w = None
            if vpl_momentum:
                with torch.no_grad():
                    for param_b, param_w in zip(backbone.module.parameters(), backbone_w.parameters()):
                        param_w.data = param_w.data * alpha + param_b.data * (1. - alpha)
                    if use_batch_shuffle:
                        img_w, idx_unshuffle = batch_shuffle_ddp(img, rank, world_size)

                    feature_w = F.normalize(backbone_w(img_w))
                    if use_batch_shuffle:
                        feature_w = batch_unshuffle_ddp(feature_w, idx_unshuffle, rank, world_size)
                    feature_w = feature_w.detach()

            x_grad, loss_v = module_fc.forward_backward(label, features, opt_pfc, feature_w)
            if cfg.fp16:
                features.backward(grad_amp.scale(x_grad))
                grad_amp.unscale_(opt_backbone)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                grad_amp.step(opt_backbone)
                grad_amp.update()
            else:
                features.backward(x_grad)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                opt_backbone.step()

            opt_pfc.step()
            module_fc.update()
            opt_backbone.zero_grad()
            opt_pfc.zero_grad()
            loss.update(loss_v, 1)
            callback_logging(global_step, loss, epoch, cfg.fp16, grad_amp)
            callback_verification(global_step, backbone)
        callback_checkpoint(global_step, backbone, module_fc, backbone_onnx)
        scheduler_backbone.step()
        scheduler_pfc.step()
    dist.destroy_process_group()
Ejemplo n.º 11
0
def main(args):
    world_size = int(1.0)
    rank = int(0.0)

    if not os.path.exists(args.output):
        os.makedirs(args.output)
    else:
        time.sleep(2)

    writer = LogWriter(logdir=args.logdir)
    trainset = CommonDataset(root_dir=cfg.data_dir, label_file=cfg.file_list, is_bin=args.is_bin)
    train_loader = DataLoader(
        dataset=trainset,
        batch_size=args.batch_size,
        shuffle=True,
        drop_last=True,
        num_workers=0)

    backbone = eval("backbones.{}".format(args.network))()
    backbone.train()

    clip_by_norm = ClipGradByNorm(5.0)
    margin_softmax = eval("losses.{}".format(args.loss))()

    module_partial_fc = PartialFC(
        rank=0,
        world_size=1,
        resume=0,
        batch_size=args.batch_size,
        margin_softmax=margin_softmax,
        num_classes=cfg.num_classes,
        sample_rate=cfg.sample_rate,
        embedding_size=args.embedding_size,
        prefix=args.output)

    scheduler_backbone_decay = paddle.optimizer.lr.LambdaDecay(
        learning_rate=args.lr, lr_lambda=cfg.lr_func, verbose=True)
    scheduler_backbone = paddle.optimizer.lr.LinearWarmup(
        learning_rate=scheduler_backbone_decay,
        warmup_steps=cfg.warmup_epoch,
        start_lr=0,
        end_lr=args.lr / 512 * args.batch_size,
        verbose=True)
    opt_backbone = paddle.optimizer.Momentum(
        parameters=backbone.parameters(),
        learning_rate=scheduler_backbone,
        momentum=0.9,
        weight_decay=args.weight_decay,
        grad_clip=clip_by_norm)

    scheduler_pfc_decay = paddle.optimizer.lr.LambdaDecay(
        learning_rate=args.lr, lr_lambda=cfg.lr_func, verbose=True)
    scheduler_pfc = paddle.optimizer.lr.LinearWarmup(
        learning_rate=scheduler_pfc_decay,
        warmup_steps=cfg.warmup_epoch,
        start_lr=0,
        end_lr=args.lr / 512 * args.batch_size,
        verbose=True)
    opt_pfc = paddle.optimizer.Momentum(
        parameters=module_partial_fc.parameters(),
        learning_rate=scheduler_pfc,
        momentum=0.9,
        weight_decay=args.weight_decay,
        grad_clip=clip_by_norm)

    start_epoch = 0
    total_step = int(
        len(trainset) / args.batch_size / world_size * cfg.num_epoch)
    if rank == 0:
        print("Total Step is: %d" % total_step)

    callback_verification = CallBackVerification(2000, rank, cfg.val_targets,
                                                 cfg.data_dir)
    callback_logging = CallBackLogging(10, rank, total_step, args.batch_size,
                                       world_size, writer)
    callback_checkpoint = CallBackModelCheckpoint(rank, args.output,
                                                  args.network)

    loss = AverageMeter()
    global_step = 0
    for epoch in range(start_epoch, cfg.num_epoch):
        for step, (img, label) in enumerate(train_loader):
            label = label.flatten()
            global_step += 1
            sys.stdout.flush()
            features = F.normalize(backbone(img))
            x_grad, loss_v = module_partial_fc.forward_backward(
                label, features, opt_pfc)
            sys.stdout.flush()
            (features.multiply(x_grad)).backward()
            sys.stdout.flush()
            opt_backbone.step()
            opt_pfc.step()
            module_partial_fc.update()
            opt_backbone.clear_gradients()
            opt_pfc.clear_gradients()
            sys.stdout.flush()

            lr_backbone_value = opt_backbone._global_learning_rate().numpy()[0]
            lr_pfc_value = opt_backbone._global_learning_rate().numpy()[0]

            loss.update(loss_v, 1)
            callback_logging(global_step, loss, epoch, lr_backbone_value,
                             lr_pfc_value)
            sys.stdout.flush()
            callback_verification(global_step, backbone)
        callback_checkpoint(global_step, backbone, module_partial_fc)
        scheduler_backbone.step()
        scheduler_pfc.step()
    writer.close()
Ejemplo n.º 12
0
def main(args):
    world_size = int(1.0)
    rank = int(0.0)
    local_rank = args.local_rank

    if not os.path.exists(cfg.output):
        os.makedirs(cfg.output)
    else:
        time.sleep(2)

    if not os.path.exists(cfg.output):
        os.makedirs(cfg.output)
    else:
        time.sleep(2)

    writer = LogWriter(logdir=cfg.logdir)

    trainset = MXFaceDataset(root_dir=cfg.rec)
    train_loader = DataLoader(dataset=trainset,
                              batch_size=cfg.batch_size,
                              shuffle=True,
                              drop_last=True,
                              num_workers=0)

    dropout = 0.4 if cfg.dataset == "webface" else 0
    backbone = eval("backbones.{}".format(args.network))(False,
                                                         dropout=0.5,
                                                         fp16=False)
    backbone.train()

    clip_by_norm = ClipGradByNorm(5.0)
    margin_softmax = eval("losses.{}".format(args.loss))()

    module_partial_fc = PartialFC(rank=0,
                                  local_rank=0,
                                  world_size=1,
                                  resume=0,
                                  batch_size=cfg.batch_size,
                                  margin_softmax=margin_softmax,
                                  num_classes=cfg.num_classes,
                                  sample_rate=cfg.sample_rate,
                                  embedding_size=cfg.embedding_size,
                                  prefix=cfg.output)

    scheduler_backbone = paddle.optimizer.lr.LambdaDecay(learning_rate=cfg.lr /
                                                         512 * cfg.batch_size,
                                                         lr_lambda=cfg.lr_func,
                                                         verbose=True)
    opt_backbone = paddle.optimizer.SGD(parameters=backbone.parameters(),
                                        learning_rate=scheduler_backbone,
                                        weight_decay=cfg.weight_decay,
                                        grad_clip=clip_by_norm)
    scheduler_pfc = paddle.optimizer.lr.LambdaDecay(learning_rate=cfg.lr /
                                                    512 * cfg.batch_size,
                                                    lr_lambda=cfg.lr_func,
                                                    verbose=True)
    opt_pfc = paddle.optimizer.SGD(parameters=module_partial_fc.parameters(),
                                   learning_rate=scheduler_pfc,
                                   weight_decay=cfg.weight_decay,
                                   grad_clip=clip_by_norm)

    start_epoch = 0
    total_step = int(
        len(trainset) / cfg.batch_size / world_size * cfg.num_epoch)
    if rank == 0:
        print("Total Step is: %d" % total_step)

    callback_verification = CallBackVerification(2000, rank, cfg.val_targets,
                                                 cfg.rec)
    callback_logging = CallBackLogging(100, rank, total_step, cfg.batch_size,
                                       world_size, writer)
    callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)

    loss = AverageMeter()
    global_step = 0
    grad_scaler = MaxClipGradScaler(
        cfg.batch_size, 128 *
        cfg.batch_size, growth_interval=100) if cfg.fp16 else None
    for epoch in range(start_epoch, cfg.num_epoch):
        for step, (img, label) in enumerate(train_loader):
            label = label.flatten()
            global_step += 1
            features = F.normalize(backbone(img))
            x_grad, loss_v = module_partial_fc.forward_backward(
                label, features, opt_pfc)
            if cfg.fp16:
                scaled = grad_scaler.scale(x_grad)
                (features.multiply(scaled)).backward()
                grad_scaler._unscale(opt_backbone)
                grad_scaler.minimize(opt_backbone, scaled)
            else:
                (features.multiply(x_grad)).backward()
                opt_backbone.step()
            opt_pfc.step()
            module_partial_fc.update()
            opt_backbone.clear_gradients()
            opt_pfc.clear_gradients()
            loss.update(loss_v, 1)
            callback_logging(global_step, loss, epoch, cfg.fp16, grad_scaler)
            callback_verification(global_step, backbone)
        callback_checkpoint(global_step, backbone, module_partial_fc)
        scheduler_backbone.step()
        scheduler_pfc.step()
    writer.close()
Ejemplo n.º 13
0
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
    cfg = get_config(args.config)

    try:
        world_size = int(os.environ['WORLD_SIZE'])
        rank = int(os.environ['RANK'])
        dist.init_process_group('nccl')
    except KeyError:
        world_size = 1
        rank = 0
        dist.init_process_group(backend='nccl',
                                init_method="tcp://127.0.0.1:12584",
                                rank=rank,
                                world_size=world_size)

    local_rank = args.local_rank
    torch.cuda.set_device(local_rank)
    os.makedirs(cfg.output, exist_ok=True)
    init_logging(rank, cfg.output)

    if cfg.rec == "synthetic":
        train_set = SyntheticDataset(local_rank=local_rank)
    else:
        train_set = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)

    train_sampler = torch.utils.data.distributed.DistributedSampler(
        train_set, shuffle=True)
    train_loader = DataLoaderX(local_rank=local_rank,
                               dataset=train_set,
                               batch_size=cfg.batch_size,
                               sampler=train_sampler,
                               num_workers=2,
                               pin_memory=True,
                               drop_last=True)
    backbone = get_model(cfg.network,
                         dropout=0.0,
                         fp16=cfg.fp16,
                         num_features=cfg.embedding_size).to(local_rank)
    summary(backbone, input_size=(3, 112, 112))
    exit()

    if cfg.resume:
        try:
            backbone_pth = os.path.join(cfg.output, "backbone.pth")
            backbone.load_state_dict(
                torch.load(backbone_pth,
                           map_location=torch.device(local_rank)))
            if rank == 0:
                logging.info("backbone resume successfully!")
        except (FileNotFoundError, KeyError, IndexError, RuntimeError):
            if rank == 0:
                logging.info("resume fail, backbone init successfully!")

    backbone = torch.nn.parallel.DistributedDataParallel(
        module=backbone, broadcast_buffers=False, device_ids=[local_rank])
    backbone.train()
    if cfg.loss == 'magface':
        margin_softmax = losses.get_loss(cfg.loss, lambda_g=cfg.lambda_g)
    elif cfg.loss == 'mag_cosface':
        margin_softmax = losses.get_loss(cfg.loss)
    else:
        margin_softmax = losses.get_loss(cfg.loss,
                                         s=cfg.s,
                                         m1=cfg.m1,
                                         m2=cfg.m2,
                                         m3=cfg.m3)
    module_partial_fc = PartialFC(rank=rank,
                                  local_rank=local_rank,
                                  world_size=world_size,
                                  resume=cfg.resume,
                                  batch_size=cfg.batch_size,
                                  margin_softmax=margin_softmax,
                                  num_classes=cfg.num_classes,
                                  sample_rate=cfg.sample_rate,
                                  embedding_size=cfg.embedding_size,
                                  prefix=cfg.output)

    opt_backbone = torch.optim.SGD(params=[{
        'params': backbone.parameters()
    }],
                                   lr=cfg.lr / 512 * cfg.batch_size *
                                   world_size,
                                   momentum=0.9,
                                   weight_decay=cfg.weight_decay)
    opt_pfc = torch.optim.SGD(params=[{
        'params': module_partial_fc.parameters()
    }],
                              lr=cfg.lr / 512 * cfg.batch_size * world_size,
                              momentum=0.9,
                              weight_decay=cfg.weight_decay)

    num_image = len(train_set)
    total_batch_size = cfg.batch_size * world_size
    cfg.warmup_step = num_image // total_batch_size * cfg.warmup_epoch
    cfg.total_step = num_image // total_batch_size * cfg.num_epoch

    def lr_step_func(current_step):
        cfg.decay_step = [
            x * num_image // total_batch_size for x in cfg.decay_epoch
        ]
        if current_step < cfg.warmup_step:
            return current_step / cfg.warmup_step
        else:
            return 0.1**len([m for m in cfg.decay_step if m <= current_step])

    scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
        optimizer=opt_backbone, lr_lambda=lr_step_func)
    scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(optimizer=opt_pfc,
                                                      lr_lambda=lr_step_func)

    for key, value in cfg.items():
        num_space = 25 - len(key)
        logging.info(": " + key + " " * num_space + str(value))

    val_target = cfg.val_targets
    callback_verification = CallBackVerification(2000, rank, val_target,
                                                 cfg.rec)
    callback_logging = CallBackLogging(50, rank, cfg.total_step,
                                       cfg.batch_size, world_size, None)
    callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)

    loss = AverageMeter()
    start_epoch = 0
    global_step = 0
    grad_amp = MaxClipGradScaler(
        cfg.batch_size, 128 *
        cfg.batch_size, growth_interval=100) if cfg.fp16 else None
    for epoch in range(start_epoch, cfg.num_epoch):
        train_sampler.set_epoch(epoch)
        for step, (img, label) in enumerate(train_loader):
            global_step += 1
            x = backbone(img)
            features = F.normalize(x)
            x_grad, loss_v = module_partial_fc.forward_backward(
                label, features, opt_pfc, x)
            if cfg.fp16:
                features.backward(grad_amp.scale(x_grad))
                grad_amp.unscale_(opt_backbone)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                grad_amp.step(opt_backbone)
                grad_amp.update()
            else:
                features.backward(x_grad)
                clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
                opt_backbone.step()

            opt_pfc.step()
            module_partial_fc.update()
            opt_backbone.zero_grad()
            opt_pfc.zero_grad()
            loss.update(loss_v, 1)
            callback_logging(global_step, loss, epoch, cfg.fp16,
                             scheduler_backbone.get_last_lr()[0], grad_amp)
            callback_verification(global_step, backbone)
            scheduler_backbone.step()
            scheduler_pfc.step()
        callback_checkpoint(global_step, backbone, module_partial_fc)

    callback_verification('last', backbone)
    dist.destroy_process_group()
Ejemplo n.º 14
0
def main(args):
    torch.cuda.set_device(args.local_rank)
    cfg = get_config(args.config)

    os.makedirs(cfg.output, exist_ok=True)
    init_logging(rank, cfg.output)
    summary_writer = (SummaryWriter(
        log_dir=os.path.join(cfg.output, "tensorboard"))
                      if rank == 0 else None)
    train_loader = get_dataloader(cfg.rec,
                                  local_rank=args.local_rank,
                                  batch_size=cfg.batch_size,
                                  dali=cfg.dali)
    backbone = get_model(cfg.network,
                         dropout=0.0,
                         fp16=cfg.fp16,
                         num_features=cfg.embedding_size).cuda()

    backbone = torch.nn.parallel.DistributedDataParallel(
        module=backbone, broadcast_buffers=False, device_ids=[args.local_rank])
    backbone.train()

    if cfg.loss == "arcface":
        margin_loss = ArcFace()
    elif cfg.loss == "cosface":
        margin_loss = CosFace()
    else:
        raise

    module_partial_fc = PartialFC(margin_loss, cfg.embedding_size,
                                  cfg.num_classes, cfg.sample_rate, cfg.fp16)
    module_partial_fc.train().cuda()

    # TODO the params of partial fc must be last in the params list
    opt = torch.optim.SGD(params=[
        {
            "params": backbone.parameters(),
        },
        {
            "params": module_partial_fc.parameters(),
        },
    ],
                          lr=cfg.lr,
                          momentum=0.9,
                          weight_decay=cfg.weight_decay)
    total_batch_size = cfg.batch_size * world_size
    cfg.warmup_step = cfg.num_image // total_batch_size * cfg.warmup_epoch
    cfg.total_step = cfg.num_image // total_batch_size * cfg.num_epoch
    lr_scheduler = PolyScheduler(optimizer=opt,
                                 base_lr=cfg.lr,
                                 max_steps=cfg.total_step,
                                 warmup_steps=cfg.warmup_step)

    for key, value in cfg.items():
        num_space = 25 - len(key)
        logging.info(": " + key + " " * num_space + str(value))

    callback_verification = CallBackVerification(val_targets=cfg.val_targets,
                                                 rec_prefix=cfg.rec,
                                                 summary_writer=summary_writer)
    callback_logging = CallBackLogging(frequent=cfg.frequent,
                                       total_step=cfg.total_step,
                                       batch_size=cfg.batch_size,
                                       writer=summary_writer)

    loss_am = AverageMeter()
    start_epoch = 0
    global_step = 0
    amp = torch.cuda.amp.grad_scaler.GradScaler(growth_interval=100)

    for epoch in range(start_epoch, cfg.num_epoch):

        if isinstance(train_loader, DataLoader):
            train_loader.sampler.set_epoch(epoch)
        for _, (img, local_labels) in enumerate(train_loader):
            global_step += 1
            local_embeddings = backbone(img)
            loss: torch.Tensor = module_partial_fc(local_embeddings,
                                                   local_labels, opt)

            if cfg.fp16:
                amp.scale(loss).backward()
                amp.unscale_(opt)
                torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5)
                amp.step(opt)
                amp.update()
            else:
                loss.backward()
                torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5)
                opt.step()

            opt.zero_grad()
            lr_scheduler.step()

            with torch.no_grad():
                loss_am.update(loss.item(), 1)
                callback_logging(global_step, loss_am, epoch, cfg.fp16,
                                 lr_scheduler.get_last_lr()[0], amp)

                if global_step % cfg.verbose == 0 and global_step > 200:
                    callback_verification(global_step, backbone)

        path_pfc = os.path.join(cfg.output,
                                "softmax_fc_gpu_{}.pt".format(rank))
        torch.save(module_partial_fc.state_dict(), path_pfc)
        if rank == 0:
            path_module = os.path.join(cfg.output, "model.pt")
            torch.save(backbone.module.state_dict(), path_module)

        if cfg.dali:
            train_loader.reset()

    if rank == 0:
        path_module = os.path.join(cfg.output, "model.pt")
        torch.save(backbone.module.state_dict(), path_module)
    distributed.destroy_process_group()
Ejemplo n.º 15
0
def main(args):
    seed = 2333
    seed = seed + rank
    torch.manual_seed(seed)
    np.random.seed(seed)

    torch.cuda.set_device(args.local_rank)
    cfg = get_config(args.config)

    os.makedirs(cfg.output, exist_ok=True)
    init_logging(rank, cfg.output)

    summary_writer = (SummaryWriter(
        log_dir=os.path.join(cfg.output, "tensorboard"))
                      if rank == 0 else None)
    train_loader = get_dataloader(cfg.rec,
                                  local_rank=args.local_rank,
                                  batch_size=cfg.batch_size,
                                  dali=cfg.dali)
    backbone = get_model(cfg.network,
                         dropout=0.0,
                         fp16=cfg.fp16,
                         num_features=cfg.embedding_size).cuda()

    backbone = torch.nn.parallel.DistributedDataParallel(
        module=backbone,
        broadcast_buffers=False,
        device_ids=[args.local_rank],
        bucket_cap_mb=16,
        find_unused_parameters=True)

    backbone.train()
    # FIXME using gradient checkpoint if there are some unused parameters will cause error
    backbone._set_static_graph()

    margin_loss = CombinedMarginLoss(64, cfg.margin_list[0],
                                     cfg.margin_list[1], cfg.margin_list[2],
                                     cfg.interclass_filtering_threshold)

    if cfg.optimizer == "sgd":
        module_partial_fc = PartialFC(margin_loss, cfg.embedding_size,
                                      cfg.num_classes, cfg.sample_rate,
                                      cfg.fp16)
        module_partial_fc.train().cuda()
        opt = torch.optim.SGD(params=[{
            "params": backbone.parameters()
        }, {
            "params": module_partial_fc.parameters()
        }],
                              lr=cfg.lr,
                              momentum=0.9,
                              weight_decay=cfg.weight_decay)

    elif cfg.optimizer == "adamw":
        module_partial_fc = PartialFCAdamW(margin_loss, cfg.embedding_size,
                                           cfg.num_classes, cfg.sample_rate,
                                           cfg.fp16)
        module_partial_fc.train().cuda()
        opt = torch.optim.AdamW(params=[{
            "params": backbone.parameters()
        }, {
            "params":
            module_partial_fc.parameters()
        }],
                                lr=cfg.lr,
                                weight_decay=cfg.weight_decay)
    else:
        raise

    cfg.total_batch_size = cfg.batch_size * world_size
    cfg.warmup_step = cfg.num_image // cfg.total_batch_size * cfg.warmup_epoch
    cfg.total_step = cfg.num_image // cfg.total_batch_size * cfg.num_epoch
    lr_scheduler = PolyScheduler(optimizer=opt,
                                 base_lr=cfg.lr,
                                 max_steps=cfg.total_step,
                                 warmup_steps=cfg.warmup_step)

    for key, value in cfg.items():
        num_space = 25 - len(key)
        logging.info(": " + key + " " * num_space + str(value))

    callback_verification = CallBackVerification(val_targets=cfg.val_targets,
                                                 rec_prefix=cfg.rec,
                                                 summary_writer=summary_writer)
    callback_logging = CallBackLogging(frequent=cfg.frequent,
                                       total_step=cfg.total_step,
                                       batch_size=cfg.batch_size,
                                       writer=summary_writer)

    loss_am = AverageMeter()
    start_epoch = 0
    global_step = 0
    amp = torch.cuda.amp.grad_scaler.GradScaler(growth_interval=100)

    for epoch in range(start_epoch, cfg.num_epoch):

        if isinstance(train_loader, DataLoader):
            train_loader.sampler.set_epoch(epoch)
        for _, (img, local_labels) in enumerate(train_loader):
            global_step += 1
            local_embeddings = backbone(img)
            loss: torch.Tensor = module_partial_fc(local_embeddings,
                                                   local_labels, opt)

            if cfg.fp16:
                amp.scale(loss).backward()
                amp.unscale_(opt)
                torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5)
                amp.step(opt)
                amp.update()
            else:
                loss.backward()
                torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5)
                opt.step()

            opt.zero_grad()
            lr_scheduler.step()

            with torch.no_grad():
                loss_am.update(loss.item(), 1)
                callback_logging(global_step, loss_am, epoch, cfg.fp16,
                                 lr_scheduler.get_last_lr()[0], amp)

                if global_step % cfg.verbose == 0 and global_step > 200:
                    callback_verification(global_step, backbone)

        path_pfc = os.path.join(cfg.output,
                                "softmax_fc_gpu_{}.pt".format(rank))
        torch.save(module_partial_fc.state_dict(), path_pfc)
        if rank == 0:
            path_module = os.path.join(cfg.output, "model.pt")
            torch.save(backbone.module.state_dict(), path_module)

        if cfg.dali:
            train_loader.reset()

    if rank == 0:
        path_module = os.path.join(cfg.output, "model.pt")
        torch.save(backbone.module.state_dict(), path_module)

        from torch2onnx import convert_onnx
        convert_onnx(backbone.module.cpu().eval(), path_module,
                     os.path.join(cfg.output, "model.onnx"))

    distributed.destroy_process_group()