Пример #1
0
    def handleDataset():
        print('initializing dataset {}'.format(opt.dataset))
        # 之前不需要动
        dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)

        # for query images
        queryloader = DataLoader(
            ImageData(dataset.query, TestTransform(opt.datatype)),
            batch_size=opt.test_batch,
            num_workers=opt.workers,  # test_batch = 1
            pin_memory=pin_memory)

        # for target image
        galleryloader = DataLoader(ImageData(dataset.target,
                                             TestTransform(opt.datatype)),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

        queryFliploader = DataLoader(ImageData(
            dataset.query, TestTransform(opt.datatype, True)),
                                     batch_size=opt.test_batch,
                                     num_workers=opt.workers,
                                     pin_memory=pin_memory)

        galleryFliploader = DataLoader(ImageData(
            dataset.target, TestTransform(opt.datatype, True)),
                                       batch_size=opt.test_batch,
                                       num_workers=opt.workers,
                                       pin_memory=pin_memory)

        return queryloader, galleryloader, queryFliploader, galleryFliploader
Пример #2
0
def test(**kwargs):
    opt._parse(kwargs)

    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)

    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    if use_gpu:
        print('currently using GPU {}'.format(opt.gpu))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
        os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset)

    pin_memory = True if use_gpu else False

    queryloader = DataLoader(ImageData(dataset.query,
                                       TestTransform(opt.height, opt.width)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageData(dataset.gallery,
                                         TestTransform(opt.height, opt.width)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    print('loading model ...')
    model, optim_policy = get_baseline_model(dataset.num_train_pids)
    # ckpt = torch.load(opt.load_model)
    # model.load_state_dict(ckpt['state_dict'])
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    reid_evaluator = ResNetEvaluator(model)
    reid_evaluator.evaluate(queryloader, galleryloader)
Пример #3
0
def build_data_loader():
    logger.info("build train dataset")
    # dataset
    dataset = init_dataset(cfg.TRAIN.DATASET)
    sampler = RandomIdentitySampler(dataset.train, cfg.TRAIN.NUM_IDENTITIES)
    train_loader = DataLoader(ImageData(dataset.train, TrainTransformer()),
                              batch_size=cfg.TRAIN.BATCH_SIZE,
                              num_workers=cfg.TRAIN.NUM_WORKERS,
                              pin_memory=True,
                              sampler=sampler)

    query_loader = DataLoader(ImageData(dataset.query, TestTransformer()),
                              batch_size=cfg.TRAIN.BATCH_SIZE,
                              num_workers=cfg.TRAIN.NUM_WORKERS,
                              pin_memory=True,
                              shuffle=False)

    gallery_loader = DataLoader(ImageData(dataset.gallery, TestTransformer()),
                                batch_size=cfg.TRAIN.BATCH_SIZE,
                                num_workers=cfg.TRAIN.NUM_WORKERS,
                                pin_memory=True,
                                shuffle=False)
    return dataset, train_loader, query_loader, gallery_loader
Пример #4
0
def load_data(dataset, pin_memory):
    dataloader = {}
    if opt.loss == 'softmax':
        trainloader = DataLoader(ImageData(dataset.train, TrainTransform()),
                                 shuffle=True,
                                 batch_size=opt.train_batch,
                                 num_workers=8,
                                 pin_memory=pin_memory,
                                 drop_last=True)
    else:
        trainloader = DataLoader(ImageData(dataset.train, TrainTransform()),
                                 sampler=RandomIdentitySampler(
                                     dataset.train, opt.train_batch,
                                     opt.num_instances),
                                 batch_size=opt.train_batch,
                                 num_workers=8,
                                 pin_memory=pin_memory,
                                 drop_last=True)
    dataloader['train'] = trainloader
    queryloader = DataLoader(ImageData(dataset.query, TestTransform()),
                             batch_size=opt.test_batch,
                             num_workers=8,
                             pin_memory=pin_memory)
    dataloader['query'] = queryloader
    galleryloader = DataLoader(ImageData(dataset.gallery, TestTransform()),
                               batch_size=opt.test_batch,
                               num_workers=8,
                               pin_memory=pin_memory)
    dataloader['gallery'] = galleryloader
    queryFliploader = DataLoader(ImageData(dataset.query, TestTransform(True)),
                                 batch_size=opt.test_batch,
                                 num_workers=8,
                                 pin_memory=pin_memory)
    dataloader['queryFlip'] = queryFliploader
    galleryFliploader = DataLoader(ImageData(dataset.gallery,
                                             TestTransform(True)),
                                   batch_size=opt.test_batch,
                                   num_workers=8,
                                   pin_memory=pin_memory)
    dataloader['galleryFlip'] = galleryFliploader
    return dataloader
Пример #5
0
def train(**kwargs):
    opt._parse(kwargs)
    opt.model_name = 'bfe_test'
    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)

    pin_memory = True if use_gpu else False

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    trainloader = DataLoader(ImageData(dataset.train,
                                       TrainTransform(opt.datatype)),
                             sampler=RandomIdentitySampler(
                                 dataset.train, opt.num_instances),
                             batch_size=opt.train_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory,
                             drop_last=True)

    queryloader = DataLoader(ImageData(dataset.query,
                                       TestTransform(opt.datatype)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageData(dataset.gallery,
                                         TestTransform(opt.datatype)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)
    queryFliploader = DataLoader(ImageData(dataset.query,
                                           TestTransform(opt.datatype, True)),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    galleryFliploader = DataLoader(ImageData(dataset.gallery,
                                             TestTransform(opt.datatype,
                                                           True)),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

    print('initializing model ...')

    model = BFE(dataset.num_train_pids, 1.0, 0.33)

    optim_policy = model.get_optim_policy()

    if opt.pretrained_model:
        state_dict = torch.load(opt.pretrained_model)['state_dict']
        # state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = ResNetEvaluator(model)

    if opt.evaluate:
        reid_evaluator.evaluate(queryloader,
                                galleryloader,
                                queryFliploader,
                                galleryFliploader,
                                re_ranking=opt.re_ranking,
                                savefig=opt.savefig)
        return

    # xent_criterion = nn.CrossEntropyLoss()
    xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids)

    if opt.loss == 'triplet':
        embedding_criterion = TripletLoss(opt.margin)
    elif opt.loss == 'lifted':
        embedding_criterion = LiftedStructureLoss(hard_mining=True)
    elif opt.loss == 'weight':
        embedding_criterion = Margin()

    def criterion(triplet_y, softmax_y, labels):
        losses = [embedding_criterion(output, labels)[0] for output in triplet_y] + \
                 [xent_criterion(output, labels) for output in softmax_y]
        loss = sum(losses)
        return loss

    # get optimizer
    if opt.optim == "sgd":
        optimizer = torch.optim.SGD(optim_policy,
                                    lr=opt.lr,
                                    momentum=0.9,
                                    weight_decay=opt.weight_decay)
    else:
        optimizer = torch.optim.Adam(optim_policy,
                                     lr=opt.lr,
                                     weight_decay=opt.weight_decay)

    start_epoch = opt.start_epoch
    # get trainer and evaluator
    reid_trainer = cls_tripletTrainer(opt, model, optimizer, criterion,
                                      summary_writer)

    def adjust_lr(optimizer, ep):
        if ep < 10:
            lr = opt.lr * 0.1 * (ep / 10.0)  # warm_up
        elif ep < 50:
            lr = opt.lr * (ep // 5 + 1)
        elif ep < 200:
            lr = opt.lr * 10.0
        elif ep < 300:
            lr = opt.lr
        else:
            lr = opt.lr * 0.1
        for p in optimizer.param_groups:
            p['lr'] = lr

    # start training
    best_rank1 = opt.best_rank
    best_epoch = 0
    for epoch in range(start_epoch, opt.max_epoch):
        if opt.adjust_lr:
            adjust_lr(optimizer, epoch + 1)
        reid_trainer.train(epoch, trainloader)

        # skip if not save model
        if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or (
                epoch + 1) == opt.max_epoch:
            if opt.mode == 'class':
                rank1 = test(model, queryloader)
            else:
                rank1 = reid_evaluator.evaluate(queryloader, galleryloader,
                                                queryFliploader,
                                                galleryFliploader)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch + 1
            },
                            is_best=is_best,
                            save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(epoch + 1) +
                            '.pth.tar')

    print('Best rank-1 {:.1%}, achived at epoch {}'.format(
        best_rank1, best_epoch))
Пример #6
0
def train(**kwargs):
    opt._parse(kwargs)

    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)

    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset)

    pin_memory = True if use_gpu else False

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    if 'triplet' in opt.model_name:
        trainloader = DataLoader(
            ImageData(dataset.train, TrainTransform(opt.height, opt.width)),
            sampler=RandomIdentitySampler(dataset.train, opt.num_instances),
            batch_size=opt.train_batch,
            num_workers=opt.workers,
            pin_memory=pin_memory,
            drop_last=True)
    else:
        trainloader = DataLoader(ImageData(
            dataset.train, TrainTransform(opt.height, opt.width)),
                                 batch_size=opt.train_batch,
                                 shuffle=True,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    queryloader = DataLoader(ImageData(dataset.query,
                                       TestTransform(opt.height, opt.width)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageData(dataset.gallery,
                                         TestTransform(opt.height, opt.width)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    print('initializing model ...')
    if opt.model_name == 'softmax' or opt.model_name == 'softmax_triplet':
        model, optim_policy = get_baseline_model(dataset.num_train_pids)
    elif opt.model_name == 'triplet':
        model, optim_policy = get_baseline_model(num_classes=None)
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    # xent_criterion = nn.CrossEntropyLoss()
    xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids)
    tri_criterion = TripletLoss(opt.margin)

    def cls_criterion(cls_scores, targets):
        cls_loss = xent_criterion(cls_scores, targets)
        return cls_loss

    def triplet_criterion(feat, targets):
        triplet_loss, _, _ = tri_criterion(feat, targets)
        return triplet_loss

    def cls_tri_criterion(cls_scores, feat, targets):
        cls_loss = xent_criterion(cls_scores, targets)
        triplet_loss, _, _ = tri_criterion(feat, targets)
        loss = cls_loss + triplet_loss
        return loss

    # get optimizer
    optimizer = torch.optim.Adam(optim_policy,
                                 lr=opt.lr,
                                 weight_decay=opt.weight_decay)

    def adjust_lr(optimizer, ep):
        if ep < 20:
            lr = 1e-4 * (ep + 1) / 2
        elif ep < 80:
            lr = 1e-3 * opt.num_gpu
        elif ep < 180:
            lr = 1e-4 * opt.num_gpu
        elif ep < 300:
            lr = 1e-5 * opt.num_gpu
        elif ep < 320:
            lr = 1e-5 * 0.1**((ep - 320) / 80) * opt.num_gpu
        elif ep < 400:
            lr = 1e-6
        elif ep < 480:
            lr = 1e-4 * opt.num_gpu
        else:
            lr = 1e-5 * opt.num_gpu
        for p in optimizer.param_groups:
            p['lr'] = lr

    start_epoch = opt.start_epoch
    if use_gpu:
        model = nn.DataParallel(model).cuda()

    # get trainer and evaluator
    if opt.model_name == 'softmax':
        reid_trainer = clsTrainer(opt, model, optimizer, cls_criterion,
                                  summary_writer)
    elif opt.model_name == 'softmax_triplet':
        reid_trainer = cls_tripletTrainer(opt, model, optimizer,
                                          cls_tri_criterion, summary_writer)
    elif opt.model_name == 'triplet':
        reid_trainer = tripletTrainer(opt, model, optimizer, triplet_criterion,
                                      summary_writer)
    reid_evaluator = ResNetEvaluator(model)

    # start training
    best_rank1 = -np.inf
    best_epoch = 0
    for epoch in range(start_epoch, opt.max_epoch):
        if opt.step_size > 0:
            adjust_lr(optimizer, epoch + 1)
        reid_trainer.train(epoch, trainloader)

        # skip if not save model
        if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or (
                epoch + 1) == opt.max_epoch:
            rank1 = reid_evaluator.evaluate(queryloader, galleryloader)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch + 1,
            },
                            is_best=is_best,
                            save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(epoch + 1) +
                            '.pth.tar')

    print('Best rank-1 {:.1%}, achived at epoch {}'.format(
        best_rank1, best_epoch))
def train(**kwargs):
    opt._parse(kwargs)
    #opt.lr=0.00002
    opt.model_name = 'AlignedReid'
    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)

    pin_memory = True if use_gpu else False

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    trainloader = DataLoader(ImageData(dataset.train,
                                       TrainTransform(opt.datatype)),
                             sampler=RandomIdentitySampler(
                                 dataset.train, opt.num_instances),
                             batch_size=opt.train_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory,
                             drop_last=True)

    queryloader = DataLoader(ImageData(dataset.query,
                                       TestTransform(opt.datatype)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageData(dataset.gallery,
                                         TestTransform(opt.datatype)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)

    queryFliploader = queryloader
    galleryFliploader = galleryloader
    # queryFliploader = DataLoader(
    #     ImageData(dataset.query, TestTransform(opt.datatype, True)),
    #     batch_size=opt.test_batch, num_workers=opt.workers,
    #     pin_memory=pin_memory
    # )
    #
    # galleryFliploader = DataLoader(
    #     ImageData(dataset.gallery, TestTransform(opt.datatype, True)),
    #     batch_size=opt.test_batch, num_workers=opt.workers,
    #     pin_memory=pin_memory
    # )

    print('initializing model ...')
    model = AlignedResNet50(num_classes=dataset.num_train_pids,
                            loss={'softmax', 'metric'},
                            aligned=True,
                            use_gpu=use_gpu)

    optim_policy = model.get_optim_policy()

    if opt.pretrained_model:
        state_dict = torch.load(opt.pretrained_model)['state_dict']
        # state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = AlignedEvaluator(model)

    if opt.evaluate:
        #rank1 = test(model, queryloader, galleryloader, use_gpu)
        reid_evaluator.evaluate(queryloader,
                                galleryloader,
                                queryFliploader,
                                galleryFliploader,
                                re_ranking=opt.re_ranking,
                                savefig=opt.savefig,
                                test_distance='global')
        return

    # xent_criterion = nn.CrossEntropyLoss()
    xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids,
                                             use_gpu=use_gpu)

    embedding_criterion = TripletLossAlignedReID(margin=opt.margin)

    # def criterion(triplet_y, softmax_y, labels):
    #     losses = [embedding_criterion(output, labels)[0] for output in triplet_y] + \
    #              [xent_criterion(output, labels) for output in softmax_y]
    #     loss = sum(losses)
    #     return loss

    def criterion(outputs, features, local_features, labels):
        if opt.htri_only:
            if isinstance(features, tuple):
                global_loss, local_loss = DeepSupervision(
                    embedding_criterion, features, labels)
            else:
                global_loss, local_loss = embedding_criterion(
                    features, labels, local_features)
        else:
            if isinstance(outputs, tuple):
                xent_loss = DeepSupervision(xent_criterion, outputs, labels)
            else:
                xent_loss = xent_criterion(outputs, labels)

            if isinstance(features, tuple):
                global_loss, local_loss = DeepSupervision(
                    embedding_criterion, features, labels)
            else:
                global_loss, local_loss = embedding_criterion(
                    features, labels, local_features)
        loss = xent_loss + global_loss + local_loss
        return loss

    # get optimizer
    if opt.optim == "sgd":
        optimizer = torch.optim.SGD(optim_policy,
                                    lr=opt.lr,
                                    momentum=0.9,
                                    weight_decay=opt.weight_decay)
    else:
        optimizer = torch.optim.Adam(optim_policy,
                                     lr=opt.lr,
                                     weight_decay=opt.weight_decay)

    start_epoch = opt.start_epoch
    # get trainer and evaluator
    reid_trainer = AlignedTrainer(opt, model, optimizer, criterion,
                                  summary_writer)

    def adjust_lr(optimizer, ep):
        if ep < 50:
            lr = opt.lr * (ep // 5 + 1)
        elif ep < 200:
            lr = opt.lr * 10
        elif ep < 300:
            lr = opt.lr
        else:
            lr = opt.lr * 0.1
        for p in optimizer.param_groups:
            p['lr'] = lr

    # start training
    best_rank1 = opt.best_rank
    best_epoch = 0
    print('start train......')
    for epoch in range(start_epoch, opt.max_epoch):
        if opt.adjust_lr:
            adjust_lr(optimizer, epoch + 1)

        reid_trainer.train(epoch, trainloader)

        # skip if not save model
        if opt.eval_step > 0 and (epoch + 1) % opt.eval_step == 0 or (
                epoch + 1) == opt.max_epoch:

            #  just avoid out of memory during eval,and can't save the model
            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch + 1
            },
                            is_best=0,
                            save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(epoch + 1) +
                            '.pth.tar')

            if opt.mode == 'class':
                rank1 = test(model, queryloader)
            else:
                rank1 = reid_evaluator.evaluate(queryloader, galleryloader,
                                                queryFliploader,
                                                galleryFliploader)
                #rank1 = test(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            if is_best:
                save_checkpoint({
                    'state_dict': state_dict,
                    'epoch': epoch + 1
                },
                                is_best=is_best,
                                save_dir=opt.save_dir,
                                filename='checkpoint_ep' + str(epoch + 1) +
                                '.pth.tar')

    print('Best rank-1 {:.1%}, achived at epoch {}'.format(
        best_rank1, best_epoch))
Пример #8
0
def train(**kwargs):
    opt._parse(kwargs)

    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
   # os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    print(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)

    pin_memory = True if use_gpu else False

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    trainloader = DataLoader(
        ImageData(dataset.train, TrainTransform(opt.datatype)),
        sampler=RandomIdentitySampler(dataset.train, opt.num_instances),
        batch_size=opt.train_batch, num_workers=opt.workers,
        pin_memory=pin_memory, drop_last=True
    )

    queryloader = DataLoader(
        ImageData(dataset.query, TestTransform(opt.datatype)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    galleryloader = DataLoader(
        ImageData(dataset.gallery, TestTransform(opt.datatype)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )
    queryFliploader = DataLoader(
        ImageData(dataset.query, TestTransform(opt.datatype, True)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    galleryFliploader = DataLoader(
        ImageData(dataset.gallery, TestTransform(opt.datatype, True)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    print('initializing model ...')
    if opt.model_name == 'softmax' or opt.model_name == 'softmax_triplet':
        model = ResNetBuilder(dataset.num_train_pids, 1, True)
    elif opt.model_name == 'triplet':
        model = ResNetBuilder(None, 1, True)
    elif opt.model_name == 'CBDB':
        if opt.datatype == "person":
            model = CBDBdataset.num_train_pids, 1.0, 0.33)
        else:
            model = CBDB(dataset.num_train_pids, 0.5, 0.5)
    elif opt.model_name == 'ide':
        model = IDE(dataset.num_train_pids)
    elif opt.model_name == 'resnet':
        model = Resnet(dataset.num_train_pids)
 
    optim_policy = model.get_optim_policy()

    if opt.pretrained_model:
        state_dict = torch.load(opt.pretrained_model)['state_dict']
        #state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        model.load_state_dict(state_dict, False)
        print('load pretrained model ' + opt.pretrained_model)
    print('model size: {:.5f}M'.format(sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = ResNetEvaluator(model)

    if opt.evaluate:
        reid_evaluator.evaluate(queryloader, galleryloader, 
            queryFliploader, galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig)
        return
Пример #9
0
def train(**kwargs):
    opt._parse(kwargs)  ##设置程序的所有参数

    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(ImageData(dataset.train,
                                       TrainTransform(opt.datatype)),
                             batch_size=opt.train_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory,
                             drop_last=True)

    queryloader = DataLoader(ImageData(dataset.query,
                                       TestTransform(opt.datatype)),
                             batch_size=opt.test_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory)

    galleryloader = DataLoader(ImageData(dataset.gallery,
                                         TestTransform(opt.datatype)),
                               batch_size=opt.test_batch,
                               num_workers=opt.workers,
                               pin_memory=pin_memory)
    queryFliploader = DataLoader(ImageData(dataset.query,
                                           TestTransform(opt.datatype, True)),
                                 batch_size=opt.test_batch,
                                 num_workers=opt.workers,
                                 pin_memory=pin_memory)

    galleryFliploader = DataLoader(ImageData(dataset.gallery,
                                             TestTransform(opt.datatype,
                                                           True)),
                                   batch_size=opt.test_batch,
                                   num_workers=opt.workers,
                                   pin_memory=pin_memory)

    print('initializing model ...')
    if opt.model_name == 'softmax' or opt.model_name == 'softmax_triplet':
        model = ResNetBuilder(dataset.num_train_pids, 1, True)
    elif opt.model_name == 'triplet':
        model = ResNetBuilder(None, 1, True)
    elif opt.model_name == 'bfe':
        if opt.datatype == "person":
            model = BFE(dataset.num_train_pids, 1.0, 0.33)
        else:
            model = BFE(dataset.num_train_pids, 0.5, 0.5)
    elif opt.model_name == 'ide':
        model = IDE(dataset.num_train_pids)
    elif opt.model_name == 'resnet':
        model = Resnet(dataset.num_train_pids)
    elif opt.model_name == 'strongBaseline':
        model = StrongBaseline(dataset.num_train_pids)

    optim_policy = model.get_optim_policy()

    # update model
    model = resnet18(True)
    model.fc.out_features = 10

    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    # get optimizer
    if opt.optim == "sgd":
        optimizer = torch.optim.SGD(optim_policy,
                                    lr=opt.lr,
                                    momentum=0.9,
                                    weight_decay=opt.weight_decay)
    else:
        optimizer = torch.optim.Adam(optim_policy,
                                     lr=opt.lr,
                                     weight_decay=opt.weight_decay)

    #xent_criterion = nn.CrossEntropyLoss()
    criterion = CrossEntropyLabelSmooth(10)
    epochs = 100
    best = 0.0
    b_e = 0
    for e in range(epochs):
        model.train()
        for i, inputs in enumerate(trainloader):
            imgs, pid, _ = inputs
            imgs, pid = imgs.cuda(), pid.cuda()
            outputs = model(imgs)
            loss = criterion(outputs, pid)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            print(('epoch=%s \t batch loss=%s') % (e, loss.item()))
Пример #10
0
def train(**kwargs):
    opt._parse(kwargs)
    #opt.lr=0.00002
    opt.model_name='PCB'
    # set random seed and cudnn benchmark
    torch.manual_seed(opt.seed)
    os.makedirs(opt.save_dir, exist_ok=True)
    use_gpu = torch.cuda.is_available()
    sys.stdout = Logger(osp.join(opt.save_dir, 'log_train.txt'))

    print('=========user config==========')
    pprint(opt._state_dict())
    print('============end===============')

    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(opt.seed)
    else:
        print('currently using cpu')

    print('initializing dataset {}'.format(opt.dataset))
    dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)
    tgt_dataset = data_manager.init_dataset(name=opt.tgt_dataset,mode=opt.mode)

    pin_memory = True if use_gpu else False

    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    trainloader = DataLoader(
        ImageData(dataset.train, TrainTransform(opt.datatype)),
        batch_size=opt.train_batch, num_workers=opt.workers,
        pin_memory=pin_memory, drop_last=True
    )

    tgt_trainloader = DataLoader(
        ImageData(tgt_dataset.train, TrainTransform(opt.datatype)),
        batch_size=opt.train_batch,num_workers=opt.workers,
        pin_memory=pin_memory,drop_last=True
    )

    tgt_queryloader = DataLoader(
        ImageData(tgt_dataset.query, TestTransform(opt.datatype)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    tgt_galleryloader = DataLoader(
        ImageData(tgt_dataset.gallery, TestTransform(opt.datatype)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )
    tgt_queryFliploader = DataLoader(
        ImageData(tgt_dataset.query, TestTransform(opt.datatype, True)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    tgt_galleryFliploader = DataLoader(
        ImageData(tgt_dataset.gallery, TestTransform(opt.datatype, True)),
        batch_size=opt.test_batch, num_workers=opt.workers,
        pin_memory=pin_memory
    )

    print('initializing model ...')
    model = PCB(dataset.num_train_pids)


    optim_policy = model.get_optim_policy()

    start_epoch = opt.start_epoch

    if opt.pretrained_model:
        checkpoint = torch.load(opt.pretrained_model)
        state_dict = checkpoint['state_dict']

        # state_dict = {k: v for k, v in state_dict.items() \
        #        if not ('reduction' in k or 'softmax' in k)}
        try:
            model.load_state_dict(state_dict, False)
            print('load pretrained model ' + opt.pretrained_model)
        except:
            RuntimeError('please keep the same size with source dataset..')
    else:
        raise RuntimeError('please load a pre-trained model...')

    print('model size: {:.5f}M'.format(sum(p.numel() for p in model.parameters()) / 1e6))


    if use_gpu:
        model = nn.DataParallel(model).cuda()
    reid_evaluator = ResNetEvaluator(model)

    if opt.evaluate:
        print('transfer directly....... ')
        reid_evaluator.evaluate(tgt_queryloader, tgt_galleryloader,
                                tgt_queryFliploader, tgt_galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig)
        return


    #xent_criterion = CrossEntropyLabelSmooth(dataset.num_train_pids)


    embedding_criterion = SelfTraining_TripletLoss(margin=0.5,num_instances=4)

    # def criterion(triplet_y, softmax_y, labels):
    #     losses = [embedding_criterion(output, labels)[0] for output in triplet_y] + \
    #              [xent_criterion(output, labels) for output in softmax_y]
    #     loss = sum(losses)
    #     return loss


    def criterion(triplet_y, softmax_y, labels):
        #losses = [torch.sum(torch.stack([xent_criterion(logits, labels) for logits in softmax_y]))]
        losses = [torch.sum(torch.stack([embedding_criterion(output,labels) for output in triplet_y]))]
        loss = sum(losses)
        return loss


    # get optimizer
    if opt.optim == "sgd":
        optimizer = torch.optim.SGD(optim_policy, lr=opt.lr, momentum=0.9, weight_decay=opt.weight_decay)
    else:
        optimizer = torch.optim.Adam(optim_policy, lr=opt.lr, weight_decay=opt.weight_decay)


    # get trainer and evaluator
    reid_trainer = PCBTrainer(opt, model, optimizer, criterion, summary_writer)

    def adjust_lr(optimizer, ep):
        if ep < 50:
            lr = opt.lr * (ep // 5 + 1)
        elif ep < 200:
            lr = opt.lr*10
        elif ep < 300:
            lr = opt.lr
        else:
            lr = opt.lr*0.1
        for p in optimizer.param_groups:
            p['lr'] = lr

    # start training
    best_rank1 = opt.best_rank
    best_epoch = 0


    print('transfer directly.....')
    reid_evaluator.evaluate(tgt_queryloader, tgt_galleryloader,
                            tgt_queryFliploader, tgt_galleryFliploader, re_ranking=opt.re_ranking, savefig=opt.savefig)


    for iter_n in range(start_epoch,opt.max_epoch):
        if opt.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            print('Iteration {}: Extracting Source Dataset Features...'.format(iter_n + 1))
            source_features, _ = extract_pcb_features(model, trainloader)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(iter_n + 1))
        target_features, _ = extract_pcb_features(model, tgt_trainloader)
        # synchronization feature order with dataset.train

        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        rerank_dist = re_ranking(
            source_features, target_features, lambda_value=opt.lambda_value)
        if iter_n == 0:
            # DBSCAN cluster
            tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2    取上三角
            tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
            tri_mat = np.sort(tri_mat, axis=None)
            top_num = np.round(opt.rho * tri_mat.size).astype(int)
            eps = tri_mat[:top_num].mean()  # DBSCAN聚类半径
            print('eps in cluster: {:.3f}'.format(eps))
            cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=8)

        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        labels = cluster.fit_predict(rerank_dist)
        del(rerank_dist)
        del(source_features)
        del(target_features)
        try:
            gc.collect()
        except:
            print('cannot collect')

        num_ids = len(set(labels)) - 1
        print('Iteration {} have {} training ids'.format(iter_n + 1, num_ids))
        # generate new dataset
        new_dataset = []
        for (fname, _, _), label in zip(tgt_dataset.train, labels):
            if label == -1:
                continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, 0))
        print('Iteration {} have {} training images'.format(iter_n + 1, len(new_dataset)))

        selftrain_loader = DataLoader(
            ImageData(new_dataset, TrainTransform(opt.datatype)),
            sampler=RandomIdentitySampler(new_dataset, opt.num_instances),
            batch_size=opt.train_batch, num_workers=opt.workers,
            pin_memory=pin_memory, drop_last=True
        )

        # train model with new generated dataset
        trainer = PCBTrainer(opt, model, optimizer, criterion, summary_writer)
        reid_evaluator = ResNetEvaluator(model)
        # Start training
        for epoch in range(opt.selftrain_iterations):
            trainer.train(epoch, selftrain_loader)


        # skip if not save model
        if opt.eval_step > 0 and (iter_n + 1) % opt.eval_step == 0 or (iter_n + 1) == opt.max_epoch:
            #  just avoid out of memory during eval,and can't save the model
            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({'state_dict': state_dict, 'epoch': iter_n + 1},
                            is_best=0, save_dir=opt.save_dir,
                            filename='checkpoint_ep' + str(iter_n + 1) + '.pth.tar')


            if (iter_n + 1) % (opt.eval_step*4) == 0:
                if opt.mode == 'class':
                    rank1 = test(model, tgt_queryloader)
                else:
                    rank1 = reid_evaluator.evaluate(tgt_queryloader, tgt_galleryloader, tgt_queryFliploader,
                                                    tgt_galleryFliploader)
                is_best = rank1 > best_rank1
                if is_best:
                    best_rank1 = rank1
                    best_epoch = iter_n + 1

                if use_gpu:
                    state_dict = model.module.state_dict()
                else:
                    state_dict = model.state_dict()

                if is_best:
                    save_checkpoint({'state_dict': state_dict, 'epoch': iter_n + 1},
                                    is_best=is_best, save_dir=opt.save_dir,
                                    filename='checkpoint_ep' + str(iter_n + 1) + '.pth.tar')

    print('Best rank-1 {:.1%}, achived at epoch {}'.format(best_rank1, best_epoch))
Пример #11
0
def trainer(data_pth, a, b, _time=0, layers=18):
    seed = 0

    # dataset options
    height = 128
    width = 128

    # optimization options
    optim = 'Adam'
    max_epoch = 20
    train_batch = 64
    test_batch = 64
    lr = 0.1
    step_size = 40
    gamma = 0.1
    weight_decay = 5e-4
    momentum = 0.9
    test_margin = b
    margin = a
    num_instances = 4
    num_gpu = 1

    # model options
    last_stride = 1
    pretrained_model_18 = 'model/resnet18-5c106cde.pth'
    pretrained_model_50 = 'model/resnet50-19c8e357.pth'
    pretrained_model_34 = 'model/resnet34-333f7ec4.pth'
    pretrained_model_101 = 'model/resnet101-5d3b4d8f.pth'
    pretrained_model_152 = 'model/resnet152-b121ed2d.pth'

    # miscs
    print_freq = 20
    eval_step = 1
    save_dir = 'model/pytorch-ckpt/time%d' % _time
    workers = 1
    start_epoch = 0


    torch.manual_seed(seed)
    use_gpu = torch.cuda.is_available()
    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(seed)
    else:
        print('currently using cpu')

    pin_memory = True if use_gpu else False

    print('initializing dataset {}'.format('Tableware'))
    dataset = Tableware(data_pth)

    trainloader = DataLoader(
        ImageData(dataset.train, TrainTransform(height, width)),
        batch_size=train_batch, num_workers=workers,
        pin_memory=pin_memory, drop_last=True
    )

    # testloader = DataLoader(
    #     ImageData(dataset.test, TestTransform(height, width)),
    #     batch_size=test_batch, num_workers=workers,
    #     pin_memory=pin_memory, drop_last=True
    # )

    # model, optim_policy = get_baseline_model(model_path=pretrained_model)
    if layers == 18:
        model, optim_policy = get_baseline_model(model_path=pretrained_model_18, layers=18)
    else:
        model, optim_policy = get_baseline_model(model_path=pretrained_model_50, layers=50)
    # model, optim_policy = get_baseline_model(model_path=pretrained_model_18, layers=18)
    # model, optim_policy = get_baseline_model(model_path=pretrained_model_34, layers=34)
    # model, optim_policy = get_baseline_model(model_path=pretrained_model_101, layers=101)
    # model = load_model(model, model_path='./model/pytorch-ckpt/87_layers18_margin20_epoch87.tar')
    print('model\'s parameters size: {:.5f} M'.format(sum(p.numel() for p in model.parameters()) / 1e6))
    inner_dist = 0
    outer_dist = 0
    max_outer = 0
    min_outer = 0
    max_iner = 0
    min_iner = 0

    tri_criterion = TripletLoss(margin)

    # get optimizer
    optimizer = torch.optim.Adam(
        optim_policy, lr=lr, weight_decay=weight_decay
    )

    def adjust_lr(optimizer, ep):
        if ep < 20:
            lr = 1e-4 * (ep + 1) / 2
        elif ep < 80:
            lr = 1e-3 * num_gpu
        elif ep < 180:
            lr = 1e-4 * num_gpu
        elif ep < 300:
            lr = 1e-5 * num_gpu
        elif ep < 320:
            lr = 1e-5 * 0.1 ** ((ep - 320) / 80) *num_gpu
        elif ep < 400:
            lr = 1e-6
        elif ep < 480:
            lr = 1e-4 * num_gpu
        else:
            lr = 1e-5 * num_gpu
        for p in optimizer.param_groups:
            p['lr'] = lr

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    evaluator = Evaluator(model)

    for epoch in range(start_epoch, max_epoch):
        if step_size > 0:
            adjust_lr(optimizer, epoch + 1)
        next_margin = margin


        # skip if not save model
        if eval_step > 0 and (epoch + 1) % eval_step == 0 or (epoch + 1) == max_epoch:
            save_record_path = 'margin_'+ str(margin) + '_epoch_' + str(epoch + 1) + '.txt'
            _t1 =time.time()
            train(model, optimizer, tri_criterion, epoch, print_freq, trainloader, data_pth=data_pth)
            _t2 = time.time()
            print('time for training:', '%.2f' % (_t2 - _t1), 's')

            """
            acc, inner_dist, outer_dist, max_outer, min_outer, max_iner, min_iner = evaluator.evaluate(testloader, test_margin, save_record_path)
            print('margin:{}, epoch:{}, acc:{}'.format(margin, epoch+1, acc))
            f = open('record.txt', 'a')
            f.write('margin:{}, epoch:{}, acc:{}\n'.format(margin, epoch+1, acc))
            f.close()
            """

            is_best = False
            # save_model_path = 'new_margin({})_epoch({}).pth.tar'.format(margin, epoch+1)
            save_model_path = 'time{}_layers{}_margin{}_epoch{}.tar'.format(_time, layers, margin, epoch+1)
            # save_model_path = 'layers34_margin{}_epoch{}.tar'.format(margin, epoch+1)
            # save_model_path = 'layers101_margin{}_epoch{}.tar'.format(margin, epoch+1)
            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()


            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch + 1,
            }, is_best=is_best, save_dir=save_dir, filename=save_model_path)
            
            model.eval()
            acc = do_get_feature_and_t(model, margin=20, epoch=1)

            margin = next_margin
    return save_model_path, inner_dist, outer_dist, max_outer, min_outer, max_iner, min_iner
Пример #12
0
from config import opt
from datasets import data_manager
from datasets.data_loader import ImageData
from datasets.samplers import RandomIdentitySampler
from utils.serialization import Logger, save_checkpoint
from utils.transforms import TestTransform, TrainTransform

use_gpu = True

if __name__ == '__main__':

    dataset = data_manager.init_dataset(name=opt.dataset, mode=opt.mode)
    pin_memory = True if use_gpu else False
    summary_writer = SummaryWriter(osp.join(opt.save_dir, 'tensorboard_log'))

    trainloader = DataLoader(ImageData(dataset.train,
                                       TrainTransform(opt.datatype)),
                             sampler=RandomIdentitySampler(
                                 dataset.train, opt.num_instances),
                             batch_size=opt.train_batch,
                             num_workers=opt.workers,
                             pin_memory=pin_memory,
                             drop_last=True)

    model = PlateNet(batch_size=opt.train_batch, n_class=66)
    optim_policy = model.get_optim_policy()
    if use_gpu:
        model = nn.DataParallel(model).cuda()
    print('dddddddddddd')

    criterion = nn.CTCLoss()
def trainer(data_pth):
    seed = 0

    # dataset options
    height = 128
    width = 128

    # optimization options
    optim = 'Adam'
    max_epoch = 1
    train_batch = 64
    test_batch = 64
    lr = 0.1
    step_size = 40
    gamma = 0.1
    weight_decay = 5e-4
    momentum = 0.9
    test_margin = 10.0
    margin = 1.0
    num_instances = 4
    num_gpu = 1

    # model options
    last_stride = 1
    pretrained_model = 'model/resnet50-19c8e357.pth'

    # miscs
    print_freq = 20
    eval_step = 1
    save_dir = 'model/pytorch-ckpt/'
    workers = 1
    start_epoch = 0

    torch.manual_seed(seed)
    use_gpu = torch.cuda.is_available()
    if use_gpu:
        print('currently using GPU')
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(seed)
    else:
        print('currently using cpu')

    pin_memory = True if use_gpu else False

    print('initializing dataset {}'.format('Tableware'))
    dataset = Tableware(data_pth)

    trainloader = DataLoader(ImageData(dataset.train,
                                       TrainTransform(height, width)),
                             batch_size=train_batch,
                             num_workers=workers,
                             pin_memory=pin_memory,
                             drop_last=True)

    testloader = DataLoader(ImageData(dataset.test,
                                      TestTransform(height, width)),
                            batch_size=test_batch,
                            num_workers=workers,
                            pin_memory=pin_memory,
                            drop_last=True)

    model, optim_policy = get_baseline_model(model_path=pretrained_model)
    print('model size: {:.5f}M'.format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    tri_criterion = TripletLoss(margin)

    # get optimizer
    optimizer = torch.optim.Adam(optim_policy,
                                 lr=lr,
                                 weight_decay=weight_decay)

    def adjust_lr(optimizer, ep):
        if ep < 20:
            lr = 1e-4 * (ep + 1) / 2
        elif ep < 80:
            lr = 1e-3 * num_gpu
        elif ep < 180:
            lr = 1e-4 * num_gpu
        elif ep < 300:
            lr = 1e-5 * num_gpu
        elif ep < 320:
            lr = 1e-5 * 0.1**((ep - 320) / 80) * num_gpu
        elif ep < 400:
            lr = 1e-6
        elif ep < 480:
            lr = 1e-4 * num_gpu
        else:
            lr = 1e-5 * num_gpu
        for p in optimizer.param_groups:
            p['lr'] = lr

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    evaluator = Evaluator(model)
    # start training
    best_acc = -np.inf
    best_epoch = 0
    for epoch in range(start_epoch, max_epoch):
        if step_size > 0:
            adjust_lr(optimizer, epoch + 1)

        train(model, optimizer, tri_criterion, epoch, print_freq, trainloader)

        # skip if not save model
        if eval_step > 0 and (epoch + 1) % eval_step == 0 or (epoch +
                                                              1) == max_epoch:
            acc = evaluator.evaluate(testloader, test_margin)
            is_best = acc > best_acc
            if is_best:
                best_acc = acc
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
                'epoch': epoch + 1,
            },
                            is_best=is_best,
                            save_dir=save_dir,
                            filename='checkpoint_ep' + str(epoch + 1) +
                            '.pth.tar')

    print('Best accuracy {:.1%}, achieved at epoch {}'.format(
        best_acc, best_epoch))