# Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Triplet loss classification")
    # data
    parser.add_argument('-d',
                        '--dataset',
                        type=str,
                        default='cuhk03',
                        choices=datasets.names())
    parser.add_argument('-b', '--batch-size', type=int, default=256)
    parser.add_argument('-j', '--workers', type=int, default=4)
    parser.add_argument('--split', type=int, default=0)
    parser.add_argument('--height',
                        type=int,
                        help="input height, default: 256 for resnet*, "
                        "144 for inception")
    parser.add_argument('--width',
                        type=int,
                        help="input width, default: 128 for resnet*, "
                        "56 for inception")
    parser.add_argument('--combine-trainval',
                        action='store_true',
                        help="train and val sets together for training, "
                        "val set alone for validation")
Exemple #2
0
    data_file.close()
    if (args.clock):
        exp_end = time.time()
        exp_time = exp_end - exp_start
        h, m, s = changetoHSM(exp_time)
        print("experiment is over, cost %02d:%02d:%02.6f" % (h, m, s))
        time_file.close()


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Snatch Strategy')
    parser.add_argument('-d',
                        '--dataset',
                        type=str,
                        default='DukeMTMC-VideoReID',
                        choices=datasets.names())  #DukeMTMC-VideoReID
    parser.add_argument('-b', '--batch-size', type=int, default=16)
    parser.add_argument('--epoch', type=int, default=70)
    parser.add_argument('--step_size', type=int, default=55)
    parser.add_argument('--percent', type=float, default=0)  # 第二次加进去的量
    working_dir = os.path.dirname(os.path.abspath(__file__))
    parser.add_argument('--data_dir',
                        type=str,
                        metavar='PATH',
                        default=os.path.join(working_dir, 'data'))  # 加载数据集的根目录
    parser.add_argument('--logs_dir',
                        type=str,
                        metavar='PATH',
                        default=os.path.join(working_dir, 'logs'))  # 保持日志根目录
    parser.add_argument('--exp_name', type=str, default="ero")
    parser.add_argument('--exp_order', type=str, default="1")
Exemple #3
0
    def __init__(self):
        self.parser = argparse.ArgumentParser(
            formatter_class=argparse.ArgumentDefaultsHelpFormatter)

        self.parser.add_argument('--stage',
                                 type=int,
                                 default=1,
                                 help='training stage [1|2]')
        self.parser.add_argument('-d',
                                 '--dataset',
                                 type=str,
                                 default='market1501',
                                 choices=datasets.names())
        self.parser.add_argument('-t',
                                 '--target',
                                 type=str,
                                 default='dukemtmc',
                                 choices=datasets.names())
        self.parser.add_argument('-s',
                                 '--source',
                                 type=str,
                                 default='market1501',
                                 choices=datasets.names())

        # paths
        self.parser.add_argument(
            '--dataroot',
            type=str,
            default='./datasets/',
            help=
            'root path to datasets (should have subfolders market1501, dukemtmc, cuhk03, etc)'
        )
        self.parser.add_argument('--checkpoints',
                                 type=str,
                                 default='./checkpoints/',
                                 help='root path to save models')
        self.parser.add_argument('--name',
                                 type=str,
                                 default='FD-GAN',
                                 help='directory to save models')
        self.parser.add_argument(
            '--netE-pretrain',
            type=str,
            default='pretrained model path for net_E in stage 2')
        self.parser.add_argument(
            '--netG-pretrain',
            type=str,
            default='pretrained model path for net_G in stage 2')
        self.parser.add_argument(
            '--tar-netG-pretrain',
            type=str,
            default='pretrained model path for net_G in stage 2')
        self.parser.add_argument(
            '--netDp-pretrain',
            type=str,
            default='pretrained model path for net_Dp in stage 2')
        self.parser.add_argument(
            '--tar-netDi-pretrain',
            type=str,
            default='pretrained model path for net_Di in stage 2')
        self.parser.add_argument(
            '--netDi-pretrain',
            type=str,
            default='pretrained model path for net_Di in stage 2')
        # model structures
        self.parser.add_argument('--arch',
                                 type=str,
                                 default='resnet50',
                                 choices=models.names())
        self.parser.add_argument(
            '--norm',
            type=str,
            default='batch',
            help='instance normalization or batch normalization')
        self.parser.add_argument('--drop',
                                 type=float,
                                 default=0.2,
                                 help='dropout for the netG')
        self.parser.add_argument('--connect-layers',
                                 type=int,
                                 default=0,
                                 help='skip connections num for netG')
        self.parser.add_argument(
            '--fuse-mode',
            type=str,
            default='cat',
            help='method to fuse reid feature and pose feature [cat|add]')
        self.parser.add_argument('--pose-feature-size',
                                 type=int,
                                 default=128,
                                 help='length of feature vector for pose')
        self.parser.add_argument('--noise-feature-size',
                                 type=int,
                                 default=256,
                                 help='length of feature vector for noise')
        self.parser.add_argument('--pose-aug',
                                 type=str,
                                 default='no',
                                 help='posemap augmentation [no|erase|gauss]')
        # dataloader setting
        self.parser.add_argument('-b',
                                 '--batch-size',
                                 type=int,
                                 default=16,
                                 help='input batch size')
        self.parser.add_argument('-j',
                                 '--workers',
                                 default=4,
                                 type=int,
                                 help='num threads for loading data')
        self.parser.add_argument('--width',
                                 type=int,
                                 default=128,
                                 help='input image width')
        self.parser.add_argument('--height',
                                 type=int,
                                 default=256,
                                 help='input image height')
        # optimizer setting
        self.parser.add_argument('--niter',
                                 type=int,
                                 default=50,
                                 help='# of iter at starting learning rate')
        self.parser.add_argument(
            '--niter-decay',
            type=int,
            default=50,
            help='# of iter to linearly decay learning rate to zero')
        self.parser.add_argument('--lr',
                                 type=float,
                                 default=0.001,
                                 help='initial learning rate')
        self.parser.add_argument(
            '--save-step',
            type=int,
            default=2,
            help='frequency of saving checkpoints at the end of epochs')
        self.parser.add_argument(
            '--eval-step',
            type=int,
            default=10,
            help='frequency of evaluate checkpoints at the end of epochs')
        # visualization setting
        self.parser.add_argument('--display-port',
                                 type=int,
                                 default=6006,
                                 help='visdom port of the web display')
        self.parser.add_argument(
            '--display-id',
            type=int,
            default=1,
            help='window id of the web display, set 0 for non-usage of visdom')
        self.parser.add_argument('--display-winsize',
                                 type=int,
                                 default=256,
                                 help='display window size')
        self.parser.add_argument(
            '--display-freq',
            type=int,
            default=10,
            help='frequency of showing training results on screen')
        self.parser.add_argument(
            '--display-single-pane-ncols',
            type=int,
            default=0,
            help=
            'if positive, display all images in a single visdom web panel with certain number of images per row.'
        )
        self.parser.add_argument(
            '--update-html-freq',
            type=int,
            default=100,
            help='frequency of saving training results to html')
        self.parser.add_argument(
            '--no_html',
            action='store_true',
            help=
            'do not save intermediate training results to [opt.checkpoints]/name/web/'
        )
        self.parser.add_argument(
            '--print-freq',
            type=int,
            default=10,
            help='frequency of showing training results on console')
        # training schedule
        self.parser.add_argument('--lambda-recon',
                                 type=float,
                                 default=1.0,
                                 help='loss weight of loss_r')
        self.parser.add_argument('--lambda-veri',
                                 type=float,
                                 default=1.0,
                                 help='loss weight of loss_v')
        self.parser.add_argument('--lambda-sp',
                                 type=float,
                                 default=1.0,
                                 help='loss weight of loss_sp')
        self.parser.add_argument('--lambda-mmd',
                                 type=float,
                                 default=1.0,
                                 help='loss weight of loss_mmd')
        self.parser.add_argument('--lambda-tri',
                                 type=float,
                                 default=10.0,
                                 help='loss weight of loss_tri')
        self.parser.add_argument('--smooth-label',
                                 action='store_true',
                                 help='smooth label or not for GANloss')
        self.parser.add_argument('--tri-margin',
                                 type=float,
                                 default=0.5,
                                 help='margin for Tripletloss')

        self.opt = self.parser.parse_args()
        self.show_opt()
    criterion.load_state_dict(checkpoint['adapt_metric'])
    # metric.train(model, train_loader)
    # evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
    if len(args.dataset) > 1:
        for dataset_name in args.dataset:
            print("{} test result:".format(dataset_name))
            evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                               dataset.gallery[dataset_name])
    else:
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery)

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Softmax loss classification")
    # data
    parser.add_argument('-d', '--dataset', nargs='+', default='market1501',
                        choices=datasets.names())
    parser.add_argument('-b', '--batch-size', type=int, default=60)
    parser.add_argument('-j', '--workers', type=int, default=4)
    parser.add_argument('--split', type=int, default=0)
    parser.add_argument('--height', type=int,
                        help="input height, default: 256 for resnet*, "
                             "144 for inception")
    parser.add_argument('--width', type=int,
                        help="input width, default: 128 for resnet*, "
                             "56 for inception")
    parser.add_argument('--combine-trainval', action='store_true',
                        help="train and val sets together for training, "
                             "val set alone for validation")
    parser.add_argument('--num-instances', type=int, default=4,
                        help="each minibatch consist of "
                             "(batch_size // num_instances) identities, and "
Exemple #5
0
    def __init__(self):
        self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)

        self.parser.add_argument('--stage', type=int, default=0, help='training stage [0|1|2](0 is end-to-end, 1 is train only resnet, and 2 is fintuning)')
        self.parser.add_argument('-d', '--dataset', type=str, default='DanceReID', choices=datasets.names())
        # paths

        self.parser.add_argument('--dataroot', type=str, default='./datasets/', help='root path to datasets (should have subfolders market1501, dukemtmc, DanceReID, etc)')
        self.parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='root path to save models')

        self.parser.add_argument('--name', type=str, default='my_session', help='directory to save models')
        self.parser.add_argument('--netE-pretrain', type=str, default='', help='pretrained model path for net_E in stage 2')
        self.parser.add_argument('--netG-pretrain', type=str, default='', help='pretrained model path for net_G in stage 2')
        self.parser.add_argument('--netDp-pretrain', type=str, default='', help='pretrained model path for net_Dp in stage 2')
        self.parser.add_argument('--netDi-pretrain', type=str, default='', help='pretrained model path for net_Di in stage 2')

        # model structures
        self.parser.add_argument('--arch', type=str, default='resnet50', help='basemodeltype')
        self.parser.add_argument('--norm', type=str, default='batch', help='instance normalization or batch normalization')
        self.parser.add_argument('--drop', type=float, default=0.2, help='dropout for the netG')
        self.parser.add_argument('--connect-layers', type=int, default=0, help='skip connections num for netG')
        self.parser.add_argument('--fuse-mode', type=str, default='cat', help='method to fuse reid feature and pose feature [cat|add]')
        self.parser.add_argument('--pose-feature-size', type=int, default=128, help='length of feature vector for pose')
        self.parser.add_argument('--noise-feature-size', type=int, default=256, help='length of feature vector for noise')
        self.parser.add_argument('--pose-aug', type=str, default='no', help='posemap augmentation [no|erase|gauss]')
        
        # dataloader setting
        self.parser.add_argument('-b', '--batch-size', type=int, default=16, help='input batch size')
        self.parser.add_argument('-j', '--workers', default=4, type=int, help='num threads for loading data')
        self.parser.add_argument('--width', type=int, default=128, help='input image width')
        self.parser.add_argument('--height', type=int, default=256, help='input image height')

        self.parser.add_argument('--skip-frame', type=int, default=20, help='ST-sampling skipping frames')
        self.parser.add_argument('--inter-rate', type=float, default=0.7, help='probability to sample same video (ST-sampling)')

        # optimizer setting
        self.parser.add_argument('--niter', type=int, default=50, help='# of iter at starting learning rate')
        self.parser.add_argument('--niter-decay', type=int, default=50, help='# of iter to linearly decay learning rate to zero')
        self.parser.add_argument('--lr', type=float, default=0.001, help='initial learning rate')
        self.parser.add_argument('--save-step', type=int, default=20, help='frequency of saving checkpoints at the end of epochs')
        self.parser.add_argument('--eval-step', type=int, default=5, help='frequency of evaluate checkpoints at the end of epochs')
        
        # visualization setting
        self.parser.add_argument('--display-port', type=int, default=6006, help='visdom port of the web display')
        self.parser.add_argument('--display-id', type=int, default=1, help='window id of the web display, set 0 for non-usage of visdom')
        self.parser.add_argument('--display-winsize', type=int, default=256,  help='display window size')
        self.parser.add_argument('--display-freq', type=int, default=10, help='frequency of showing training results on screen')
        self.parser.add_argument('--display-single-pane-ncols', type=int, default=0, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
        self.parser.add_argument('--update-html-freq', type=int, default=100, help='frequency of saving training results to html')
        self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints]/name/web/')
        self.parser.add_argument('--print-freq', type=int, default=10, help='frequency of showing training results on console')
        
        # training
        self.parser.add_argument('--lambda-dp', type=float, default=1.0, help='weight of Dp')
        self.parser.add_argument('--lambda-d', type=float, default=1.0, help='weight of D')
        self.parser.add_argument('--lambda-recon', type=float, default=1.0, help='loss weight of reconstruction')
        self.parser.add_argument('--lambda-tri', type=float, default=1.0, help='loss weight of triplet loss')
        self.parser.add_argument('--lambda-class', type=float, default=1.0, help='loss weight of classification')
        self.parser.add_argument('--smooth-label', action='store_true', help='smooth label or not for GANloss')
        self.parser.add_argument('--margin', type=float, default=1.0, help='margin for triplet loss')
        self.parser.add_argument('--soft-margin', action='store_true', help='use soft margin for triplet loss')
        self.parser.add_argument('--batch-hard', action='store_true', help='batch hard mining for tri-loss')
        self.parser.add_argument('--emb-type', type=str, default='Single', help='ReID embedding model type [Single|Siamese]')

        # apply training tricks
        self.parser.add_argument('--mask', action='store_true', help='use pose mask for human reconstruction or not')
        self.parser.add_argument('--eraser', action='store_true', help='use random eraser during training')
        self.parser.add_argument('--emb-smooth', action='store_true', help='use label smoothing for computing classification loss')
        self.parser.add_argument('--last-stride', type=int, default=2, help='resnet50 last stride')

        self.parser.add_argument('--seed', type=int, default=1)


        self.opt = self.parser.parse_args()
        self.show_opt()
Exemple #6
0
def main(args):
    cudnn.deterministic = False
    cudnn.benchmark = True

    exp_database_dir = osp.join(args.exp_dir, string.capwords(args.dataset))
    output_dir = osp.join(exp_database_dir, args.method, args.sub_method)
    log_file = osp.join(output_dir, 'log.txt')
    # Redirect print to both console and log file
    sys.stdout = Logger(log_file)

    # Create model
    ibn_type = args.ibn
    if ibn_type == 'none':
        ibn_type = None
    model = resmap.create(args.arch,
                          ibn_type=ibn_type,
                          final_layer=args.final_layer,
                          neck=args.neck).cuda()
    num_features = model.num_features
    # print(model)
    # print('\n')

    feamap_factor = {'layer2': 8, 'layer3': 16, 'layer4': 32}
    hei = args.height // feamap_factor[args.final_layer]
    wid = args.width // feamap_factor[args.final_layer]
    matcher = QAConv(num_features, hei, wid).cuda()

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')

    # Criterion
    criterion = TripletLoss(matcher, args.margin).cuda()

    # Optimizer
    base_param_ids = set(map(id, model.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': model.base.parameters(),
        'lr': 0.1 * args.lr
    }, {
        'params': new_params,
        'lr': args.lr
    }, {
        'params': matcher.parameters(),
        'lr': args.lr
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=5e-4,
                                nesterov=True)

    # Load from checkpoint
    start_epoch = 0
    base_loss = None
    final_epochs = args.max_epochs
    lr_stepped = False

    if args.resume or args.evaluate:
        print('Loading checkpoint...')
        if args.resume and (args.resume != 'ori'):
            checkpoint = load_checkpoint(args.resume)
        else:
            checkpoint = load_checkpoint(
                osp.join(output_dir, 'checkpoint.pth.tar'))
        model.load_state_dict(checkpoint['model'])
        criterion.load_state_dict(checkpoint['criterion'])
        optimizer.load_state_dict(checkpoint['optim'])
        start_epoch = checkpoint['epoch']
        base_loss = checkpoint['base_loss']
        final_epochs = checkpoint['final_epochs']
        lr_stepped = checkpoint['lr_stepped']

        if lr_stepped:
            print('Decay the learning rate by a factor of 0.1.')
            for group in optimizer.param_groups:
                group['lr'] *= 0.1

        print("=> Start epoch {} ".format(start_epoch))

    model = nn.DataParallel(model).cuda()

    # Create data loaders
    save_path = None
    if args.gs_save:
        save_path = output_dir
    dataset, num_classes, train_loader, _, _ = get_data(
        args.dataset, args.data_dir, model, matcher, save_path, args)

    if not args.evaluate:
        # Trainer
        trainer = Trainer(model, criterion, args.clip_value)
        t0 = time.time()

        # Start training
        for epoch in range(start_epoch, args.max_epochs):
            loss, acc = trainer.train(epoch, train_loader, optimizer)

            if epoch == 1:
                base_loss = loss

            lr = list(map(lambda group: group['lr'], optimizer.param_groups))

            train_time = time.time() - t0
            epoch1 = epoch + 1

            print(
                '* Finished epoch %d at lr=[%g, %g, %g]. Loss: %.3f. Acc: %.2f%%. Training time: %.0f seconds.                  \n'
                % (epoch1, lr[0], lr[1], lr[2], loss, acc * 100, train_time))

            if (not lr_stepped) and (base_loss is not None) and (
                    loss < base_loss * args.step_factor):
                lr_stepped = True
                final_epochs = min(args.max_epochs, epoch1 + epoch1 // 2)
                print(
                    'Decay the learning rate by a factor of 0.1. Final epochs: %d.\n'
                    % final_epochs)
                for group in optimizer.param_groups:
                    group['lr'] *= 0.1

            save_checkpoint(
                {
                    'model': model.module.state_dict(),
                    'criterion': criterion.state_dict(),
                    'optim': optimizer.state_dict(),
                    'epoch': epoch1,
                    'final_epochs': final_epochs,
                    'base_loss': base_loss,
                    'lr_stepped': lr_stepped,
                },
                fpath=osp.join(output_dir, 'checkpoint.pth.tar'))

            if epoch1 == final_epochs:
                print('The learning converges at epoch %d.\n' % epoch1)
                break

    json_file = osp.join(output_dir, 'results.json')

    if not args.evaluate:
        arg_dict = {
            'train_dataset': args.dataset,
            'exp_dir': args.exp_dir,
            'method': args.method,
            'sub_method': args.sub_method
        }
        with open(json_file, 'a') as f:
            json.dump(arg_dict, f)
            f.write('\n')
        train_dict = {
            'train_dataset': args.dataset,
            'loss': loss,
            'acc': acc,
            'epochs': epoch1,
            'train_time': train_time
        }
        with open(json_file, 'a') as f:
            json.dump(train_dict, f)
            f.write('\n')

    # Final test
    print('Evaluate the learned model:')
    t0 = time.time()

    # Evaluator
    evaluator = Evaluator(model)

    test_names = args.testset.strip().split(',')
    for test_name in test_names:
        if test_name not in datasets.names():
            print('Unknown dataset: %s.' % test_name)
            continue

        t1 = time.time()
        testset, test_query_loader, test_gallery_loader = \
            get_test_data(test_name, args.data_dir, args.height, args.width, args.workers, args.test_fea_batch)

        if not args.do_tlift:
            testset.has_time_info = False

        test_rank1, test_mAP, test_rank1_rerank, test_mAP_rerank, test_rank1_tlift, test_mAP_tlift, test_dist, \
        test_dist_rerank, test_dist_tlift, pre_tlift_dict = \
            evaluator.evaluate(matcher, testset, test_query_loader, test_gallery_loader,
                                args.test_gal_batch, args.test_prob_batch,
                               args.tau, args.sigma, args.K, args.alpha)

        test_time = time.time() - t1

        if testset.has_time_info:
            test_dict = {
                'test_dataset': test_name,
                'rank1': test_rank1,
                'mAP': test_mAP,
                'rank1_rerank': test_rank1_rerank,
                'mAP_rerank': test_mAP_rerank,
                'rank1_tlift': test_rank1_tlift,
                'mAP_tlift': test_mAP_tlift,
                'test_time': test_time
            }
            print(
                '  %s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f,'
                ' rank1_rerank_tlift=%.1f, mAP_rerank_tlift=%.1f.\n' %
                (test_name, test_rank1 * 100, test_mAP * 100,
                 test_rank1_rerank * 100, test_mAP_rerank * 100,
                 test_rank1_tlift * 100, test_mAP_tlift * 100))
        else:
            test_dict = {
                'test_dataset': test_name,
                'rank1': test_rank1,
                'mAP': test_mAP,
                'test_time': test_time
            }
            print('  %s: rank1=%.1f, mAP=%.1f.\n' %
                  (test_name, test_rank1 * 100, test_mAP * 100))

        with open(json_file, 'a') as f:
            json.dump(test_dict, f)
            f.write('\n')

        if args.save_score:
            test_gal_list = np.array(
                [fname for fname, _, _, _ in testset.gallery], dtype=np.object)
            test_prob_list = np.array(
                [fname for fname, _, _, _ in testset.query], dtype=np.object)
            test_gal_ids = [pid for _, pid, _, _ in testset.gallery]
            test_prob_ids = [pid for _, pid, _, _ in testset.query]
            test_gal_cams = [c for _, _, c, _ in testset.gallery]
            test_prob_cams = [c for _, _, c, _ in testset.query]
            test_score_file = osp.join(exp_database_dir, args.method,
                                       args.sub_method,
                                       '%s_score.mat' % test_name)
            sio.savemat(test_score_file, {
                'score': 1. - test_dist,
                'score_rerank': 1. - test_dist_rerank,
                'score_tlift': 1. - test_dist_tlift,
                'gal_time': pre_tlift_dict['gal_time'],
                'prob_time': pre_tlift_dict['prob_time'],
                'gal_list': test_gal_list,
                'prob_list': test_prob_list,
                'gal_ids': test_gal_ids,
                'prob_ids': test_prob_ids,
                'gal_cams': test_gal_cams,
                'prob_cams': test_prob_cams
            },
                        oned_as='column',
                        do_compression=True)

    test_time = time.time() - t0

    if not args.evaluate:
        print('Finished training at epoch %d, loss = %.3f, acc = %.2f%%.\n' %
              (epoch1, loss, acc * 100))
        print(
            "Total training time: %.3f sec. Average training time per epoch: %.3f sec."
            % (train_time, train_time / (epoch1 - start_epoch)))
    print("Total testing time: %.3f sec.\n" % test_time)

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')
Exemple #7
0
        nums_to_select = new_nums_to_select
        step = step + 1

    data_file.close()
    if (args.clock):
        exp_end = time.time()
        exp_time = exp_end - exp_start
        h, m, s = changetoHSM(exp_time)
        print("experiment is over, cost %02d:%02d:%02.6f" % ( h, m, s))
        time_file.close()



if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Snatch Strategy')
    parser.add_argument('-d', '--dataset', type=str, default='DukeMTMC-VideoReID',choices=datasets.names())  #s
    parser.add_argument('-b', '--batch-size', type=int, default=16)
    parser.add_argument('--epoch',type=int,default=70)
    parser.add_argument('--step_size',type=int,default=55)
    parser.add_argument('--EF', type=float, default=5)  # 渐进采样系数
    parser.add_argument('--q', type=float, default=1)  # 渐进采样指数
    parser.add_argument('--yita', type=int, default=100)   #big start based number
    parser.add_argument('--step_s', type=int, default=10)  #big start 饱和度控制
    working_dir = os.path.dirname(os.path.abspath(__file__))
    parser.add_argument('--data_dir', type=str, metavar='PATH',default=os.path.join(working_dir, 'data'))  # 加载数据集的根目录
    parser.add_argument('--logs_dir', type=str, metavar='PATH',default=os.path.join(working_dir, 'logs'))  # 保持日志根目录
    parser.add_argument('--exp_name',type=str,default="gradully_supplement")
    parser.add_argument('--exp_order',type=str,default="1")
    parser.add_argument('--resume', type=bool, default=True)
    parser.add_argument('--mode', type=str, choices=["Classification", "Dissimilarity"], default="Dissimilarity")   #这个考虑要不要取消掉
    parser.add_argument('--max_frames', type=int, default=100)
Exemple #8
0
def main(args):
    cudnn.deterministic = False
    cudnn.benchmark = True

    exp_database_dir = osp.join(args.exp_dir, string.capwords(args.dataset))
    output_dir = osp.join(exp_database_dir, args.method, args.sub_method)
    log_file = osp.join(output_dir, 'log.txt')
    # Redirect print to both console and log file
    sys.stdout = Logger(log_file)

    # Create data loaders
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.data_dir, args.height, args.width, args.batch_size, args.combine_all,
                 args.min_size, args.max_size, args.workers, args.test_fea_batch)

    # Create model
    model = resmap.create(args.arch,
                          final_layer=args.final_layer,
                          neck=args.neck).cuda()
    num_features = model.num_features
    # print(model)
    print('\n')

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')

    # Criterion

    feamap_factor = {'layer2': 8, 'layer3': 16, 'layer4': 32}
    hei = args.height // feamap_factor[args.final_layer]
    wid = args.width // feamap_factor[args.final_layer]
    criterion = QAConvLoss(num_classes, num_features, hei, wid,
                           args.mem_batch_size).cuda()

    # Optimizer
    base_param_ids = set(map(id, model.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': model.base.parameters(),
        'lr': 0.1 * args.lr
    }, {
        'params': new_params,
        'lr': args.lr
    }, {
        'params': criterion.parameters(),
        'lr': args.lr
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=5e-4,
                                nesterov=True)

    # Decay LR by a factor of 0.1 every step_size epochs
    lr_scheduler = StepLR(optimizer, step_size=args.step_size, gamma=0.1)

    # Load from checkpoint
    start_epoch = 0

    if args.resume or args.evaluate:
        print('Loading checkpoint...')
        if args.resume and (args.resume != 'ori'):
            checkpoint = load_checkpoint(args.resume)
        else:
            checkpoint = load_checkpoint(
                osp.join(output_dir, 'checkpoint.pth.tar'))
        model.load_state_dict(checkpoint['model'])
        criterion.load_state_dict(checkpoint['criterion'])
        optimizer.load_state_dict(checkpoint['optim'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))

    model = nn.DataParallel(model).cuda()
    criterion = nn.DataParallel(criterion).cuda()

    if not args.evaluate:
        # Trainer
        trainer = Trainer(model, criterion)

        t0 = time.time()
        # Start training
        for epoch in range(start_epoch, args.epochs):
            loss, acc = trainer.train(epoch, train_loader, optimizer)

            lr = list(map(lambda group: group['lr'], optimizer.param_groups))
            lr_scheduler.step(epoch + 1)
            train_time = time.time() - t0

            print(
                '* Finished epoch %d at lr=[%g, %g, %g]. Loss: %.3f. Acc: %.2f%%. Training time: %.0f seconds.\n'
                %
                (epoch + 1, lr[0], lr[1], lr[2], loss, acc * 100, train_time))

            save_checkpoint(
                {
                    'model': model.module.state_dict(),
                    'criterion': criterion.module.state_dict(),
                    'optim': optimizer.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(output_dir, 'checkpoint.pth.tar'))

    # Final test
    print('Evaluate the learned model:')
    t0 = time.time()

    # Evaluator
    evaluator = Evaluator(model)

    test_names = args.testset.strip().split(',')
    for test_name in test_names:
        if test_name not in datasets.names():
            print('Unknown dataset: %s.' % test_name)
            continue

        testset, test_query_loader, test_gallery_loader = \
            get_test_data(test_name, args.data_dir, args.height, args.width, args.test_fea_batch)

        test_rank1, test_mAP, test_rank1_rerank, test_mAP_rerank, test_rank1_tlift, test_mAP_tlift, test_dist, \
        test_dist_rerank, test_dist_tlift, pre_tlift_dict = \
            evaluator.evaluate(test_query_loader, test_gallery_loader, testset, criterion.module,
                               args.test_gal_batch, args.test_prob_batch,
                               args.tau, args.sigma, args.K, args.alpha)

        print('  %s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f,'
              ' rank1_rerank_tlift=%.1f, mAP_rerank_tlift=%.1f.\n' %
              (test_name, test_rank1 * 100, test_mAP * 100,
               test_rank1_rerank * 100, test_mAP_rerank * 100,
               test_rank1_tlift * 100, test_mAP_tlift * 100))

        result_file = osp.join(exp_database_dir, args.method,
                               test_name + '_results.txt')
        with open(result_file, 'a') as f:
            f.write('%s/%s:\n' % (args.method, args.sub_method))
            f.write(
                '\t%s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f, rank1_rerank_tlift=%.1f, '
                'mAP_rerank_tlift=%.1f.\n\n' %
                (test_name, test_rank1 * 100, test_mAP * 100,
                 test_rank1_rerank * 100, test_mAP_rerank * 100,
                 test_rank1_tlift * 100, test_mAP_tlift * 100))

        if args.save_score:
            test_gal_list = np.array(
                [fname for fname, _, _, _ in testset.gallery], dtype=np.object)
            test_prob_list = np.array(
                [fname for fname, _, _, _ in testset.query], dtype=np.object)
            test_gal_ids = [pid for _, pid, _, _ in testset.gallery]
            test_prob_ids = [pid for _, pid, _, _ in testset.query]
            test_gal_cams = [c for _, _, c, _ in testset.gallery]
            test_prob_cams = [c for _, _, c, _ in testset.query]
            test_score_file = osp.join(exp_database_dir, args.method,
                                       args.sub_method,
                                       '%s_score.mat' % test_name)
            sio.savemat(test_score_file, {
                'score': 1. - test_dist,
                'score_rerank': 1. - test_dist_rerank,
                'score_tlift': 1. - test_dist_tlift,
                'gal_time': pre_tlift_dict['gal_time'],
                'prob_time': pre_tlift_dict['prob_time'],
                'gal_list': test_gal_list,
                'prob_list': test_prob_list,
                'gal_ids': test_gal_ids,
                'prob_ids': test_prob_ids,
                'gal_cams': test_gal_cams,
                'prob_cams': test_prob_cams
            },
                        oned_as='column',
                        do_compression=True)

    test_time = time.time() - t0
    if not args.evaluate:
        print('Finished training at epoch %d, loss %.3f, acc %.2f%%.\n' %
              (epoch + 1, loss, acc * 100))
        print(
            "Total training time: %.3f sec. Average training time per epoch: %.3f sec."
            % (train_time, train_time / (args.epochs - start_epoch + 1)))
    print("Total testing time: %.3f sec.\n" % test_time)

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')
Exemple #9
0
def main(args):
    cudnn.deterministic = False
    cudnn.benchmark = True

    exp_database_dir = osp.join(args.exp_dir, string.capwords(args.dataset))
    output_dir = osp.join(exp_database_dir, args.method, args.sub_method)
    log_file = osp.join(output_dir, 'log.txt')
    # Redirect print to both console and log file
    sys.stdout = Logger(log_file)

    # Create data loaders
    dataset, num_classes, train_loader, _, _ = \
        get_data(args.dataset, args.data_dir, args.height, args.width, args.batch_size, args.combine_all,
                 args.workers, args.test_fea_batch)

    # Create model
    #model = seTest.resnst50().cuda()
    model = resmap.create(args.arch,
                          ibn_type=args.ibn,
                          final_layer=args.final_layer,
                          neck=args.neck).cuda()
    num_features = model.num_features
    #num_features = 64
    # print(model)
    # print('\n')

    feamap_factor = {'layer2': 8, 'layer3': 16, 'layer4': 32}
    hei = args.height // feamap_factor[args.final_layer]
    wid = args.width // feamap_factor[args.final_layer]
    matcher = QAConv(num_features, hei, wid).cuda()

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')

    # Criterion
    criterion = ClassMemoryLoss(matcher, num_classes, num_features, hei, wid,
                                args.mem_batch_size).cuda()

    # Optimizer
    base_param_ids = set(map(id, model.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': model.base.parameters(),
        'lr': 0.1 * args.lr
    }, {
        'params': new_params,
        'lr': args.lr
    }, {
        'params': criterion.parameters(),
        'lr': args.lr
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=5e-4,
                                nesterov=True)

    # Load from checkpoint
    start_epoch = 0

    if args.resume or args.evaluate:
        print('Loading checkpoint...')
        if args.resume and (args.resume != 'ori'):
            checkpoint = load_checkpoint(args.resume)
        else:
            checkpoint = load_checkpoint(
                osp.join(output_dir, 'checkpoint.pth.tar'))
        model.load_state_dict(checkpoint['model'])
        criterion.load_state_dict(checkpoint['criterion'])
        optimizer.load_state_dict(checkpoint['optim'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))
    elif args.pre_epochs > 0:
        pre_tr = PreTrainer(model, criterion, optimizer, train_loader,
                            args.pre_epochs, args.max_steps, args.num_trials)
        result_file = osp.join(exp_database_dir, args.method,
                               'pretrain_metric.txt')
        model, criterion, optimizer = pre_tr.train(result_file, args.method,
                                                   args.sub_method)

    # Decay LR by a factor of 0.1 every step_size epochs
    lr_scheduler = StepLR(optimizer,
                          step_size=args.step_size,
                          gamma=0.1,
                          last_epoch=start_epoch - 1)

    model = nn.DataParallel(model).cuda()
    criterion = nn.DataParallel(criterion).cuda()

    enhance_data_aug = False

    if not args.evaluate:
        # Trainer
        trainer = Trainer(model, criterion)

        t0 = time.time()
        # Start training
        for epoch in range(start_epoch, args.epochs):
            loss, acc = trainer.train(epoch, train_loader, optimizer)

            lr = list(map(lambda group: group['lr'], optimizer.param_groups))
            lr_scheduler.step()
            train_time = time.time() - t0

            print(
                '* Finished epoch %d at lr=[%g, %g, %g]. Loss: %.3f. Acc: %.2f%%. Training time: %.0f seconds.                  \n'
                %
                (epoch + 1, lr[0], lr[1], lr[2], loss, acc * 100, train_time))

            save_checkpoint(
                {
                    'model': model.module.state_dict(),
                    'criterion': criterion.module.state_dict(),
                    'optim': optimizer.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(output_dir, 'checkpoint.pth.tar'))

            if not enhance_data_aug and epoch < args.epochs - 1 and acc > args.acc_thr:
                enhance_data_aug = True
                print('\nAcc = %.2f%% > %.2f%%. Start to Flip and Block.\n' %
                      (acc * 100, args.acc_thr * 100))

                train_transformer = T.Compose([
                    T.Resize((args.height, args.width), interpolation=3),
                    T.Pad(10),
                    T.RandomCrop((args.height, args.width)),
                    T.RandomHorizontalFlip(0.5),
                    T.RandomRotation(5),
                    T.ColorJitter(brightness=(0.5, 2.0),
                                  contrast=(0.5, 2.0),
                                  saturation=(0.5, 2.0),
                                  hue=(-0.1, 0.1)),
                    T.RandomOcclusion(args.min_size, args.max_size),
                    T.ToTensor(),
                ])

                train_loader = DataLoader(Preprocessor(
                    dataset.train,
                    root=osp.join(dataset.images_dir, dataset.train_path),
                    transform=train_transformer),
                                          batch_size=args.batch_size,
                                          num_workers=args.workers,
                                          shuffle=True,
                                          pin_memory=True,
                                          drop_last=True)

    # Final test
    print('Evaluate the learned model:')
    t0 = time.time()

    # Evaluator
    evaluator = Evaluator(model)

    avg_rank1 = 0
    avg_mAP = 0
    num_testsets = 0
    results = {}

    test_names = args.testset.strip().split(',')
    for test_name in test_names:
        if test_name not in datasets.names():
            print('Unknown dataset: %s.' % test_name)
            continue

        testset, test_query_loader, test_gallery_loader = \
            get_test_data(test_name, args.data_dir, args.height, args.width, args.workers, args.test_fea_batch)

        if not args.do_tlift:
            testset.has_time_info = False
        test_rank1, test_mAP, test_rank1_rerank, test_mAP_rerank, test_rank1_tlift, test_mAP_tlift, test_dist, \
        test_dist_rerank, test_dist_tlift, pre_tlift_dict = \
            evaluator.evaluate(matcher, testset, test_query_loader, test_gallery_loader,
                                args.test_gal_batch, args.test_prob_batch,
                               args.tau, args.sigma, args.K, args.alpha)

        results[test_name] = [test_rank1, test_mAP]
        if test_name != args.dataset:
            avg_rank1 += test_rank1
            avg_mAP += test_mAP
            num_testsets += 1

        if testset.has_time_info:
            print(
                '  %s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f,'
                ' rank1_rerank_tlift=%.1f, mAP_rerank_tlift=%.1f.\n' %
                (test_name, test_rank1 * 100, test_mAP * 100,
                 test_rank1_rerank * 100, test_mAP_rerank * 100,
                 test_rank1_tlift * 100, test_mAP_tlift * 100))
        else:
            print('  %s: rank1=%.1f, mAP=%.1f.\n' %
                  (test_name, test_rank1 * 100, test_mAP * 100))

        result_file = osp.join(exp_database_dir, args.method,
                               test_name + '_results.txt')
        with open(result_file, 'a') as f:
            f.write('%s/%s:\n' % (args.method, args.sub_method))
            if testset.has_time_info:
                f.write(
                    '\t%s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f, rank1_rerank_tlift=%.1f, '
                    'mAP_rerank_tlift=%.1f.\n\n' %
                    (test_name, test_rank1 * 100, test_mAP * 100,
                     test_rank1_rerank * 100, test_mAP_rerank * 100,
                     test_rank1_tlift * 100, test_mAP_tlift * 100))
            else:
                f.write('\t%s: rank1=%.1f, mAP=%.1f.\n\n' %
                        (test_name, test_rank1 * 100, test_mAP * 100))

        if args.save_score:
            test_gal_list = np.array(
                [fname for fname, _, _, _ in testset.gallery], dtype=np.object)
            test_prob_list = np.array(
                [fname for fname, _, _, _ in testset.query], dtype=np.object)
            test_gal_ids = [pid for _, pid, _, _ in testset.gallery]
            test_prob_ids = [pid for _, pid, _, _ in testset.query]
            test_gal_cams = [c for _, _, c, _ in testset.gallery]
            test_prob_cams = [c for _, _, c, _ in testset.query]
            test_score_file = osp.join(exp_database_dir, args.method,
                                       args.sub_method,
                                       '%s_score.mat' % test_name)
            sio.savemat(test_score_file, {
                'score': 1. - test_dist,
                'score_rerank': 1. - test_dist_rerank,
                'score_tlift': 1. - test_dist_tlift,
                'gal_time': pre_tlift_dict['gal_time'],
                'prob_time': pre_tlift_dict['prob_time'],
                'gal_list': test_gal_list,
                'prob_list': test_prob_list,
                'gal_ids': test_gal_ids,
                'prob_ids': test_prob_ids,
                'gal_cams': test_gal_cams,
                'prob_cams': test_prob_cams
            },
                        oned_as='column',
                        do_compression=True)

    test_time = time.time() - t0
    avg_rank1 /= num_testsets
    avg_mAP /= num_testsets

    for key in results.keys():
        print('%s: rank1=%.1f%%, mAP=%.1f%%.' %
              (key, results[key][0] * 100, results[key][1] * 100))
    print('Average: rank1=%.2f%%, mAP=%.2f%%.\n\n' %
          (avg_rank1 * 100, avg_mAP * 100))

    result_file = osp.join(exp_database_dir, args.method,
                           args.sub_method[:-5] + '_avg_results.txt')
    with open(result_file, 'a') as f:
        f.write('%s/%s:\n' % (args.method, args.sub_method))
        if not args.evaluate:
            f.write('\t Loss: %.3f, acc: %.2f%%. ' % (loss, acc * 100))
            f.write("Train: %.0fs. " % train_time)
        f.write("Test: %.0fs. " % test_time)
        f.write('Rank1: %.2f%%, mAP: %.2f%%.\n' %
                (avg_rank1 * 100, avg_mAP * 100))
        for key in results.keys():
            f.write('\t %s: Rank1: %.1f%%, mAP: %.1f%%.\n' %
                    (key, results[key][0] * 100, results[key][1] * 100))
        f.write('\n')

    if not args.evaluate:
        print('Finished training at epoch %d, loss = %.3f, acc = %.2f%%.\n' %
              (epoch + 1, loss, acc * 100))
        print(
            "Total training time: %.3f sec. Average training time per epoch: %.3f sec."
            % (train_time, train_time / (args.epochs - start_epoch + 1)))
    print("Total testing time: %.3f sec.\n" % test_time)

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')
Exemple #10
0
def main(args):
    cudnn.deterministic = False
    cudnn.benchmark = True

    exp_database_dir = osp.join(args.exp_dir, string.capwords(args.dataset))
    output_dir = osp.join(exp_database_dir, args.method, args.sub_method)
    log_file = osp.join(output_dir, 'log.txt')
    # Redirect print to both console and log file
    sys.stdout = Logger(log_file)

    # Create model
    ibn_type = args.ibn
    if ibn_type == 'none':
        ibn_type = None
    model = resmap.create(args.arch,
                          ibn_type=ibn_type,
                          final_layer=args.final_layer,
                          neck=args.neck).cuda()
    num_features = model.num_features
    # print(model)
    # print('\n')

    feamap_factor = {'layer2': 8, 'layer3': 16, 'layer4': 32}
    hei = args.height // feamap_factor[args.final_layer]
    wid = args.width // feamap_factor[args.final_layer]
    matcher = QAConv(num_features, hei, wid).cuda()

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')

    # Criterion
    criterion = PairwiseMatchingLoss(matcher).cuda()

    # Optimizer
    base_param_ids = set(map(id, model.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': model.base.parameters(),
        'lr': 0.1 * args.lr
    }, {
        'params': new_params,
        'lr': args.lr
    }, {
        'params': matcher.parameters(),
        'lr': args.lr
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=5e-4,
                                nesterov=True)

    # Load from checkpoint
    start_epoch = 0

    if args.resume or args.evaluate:
        print('Loading checkpoint...')
        if args.resume and (args.resume != 'ori'):
            checkpoint = load_checkpoint(args.resume)
        else:
            checkpoint = load_checkpoint(
                osp.join(output_dir, 'checkpoint.pth.tar'))
        model.load_state_dict(checkpoint['model'])
        criterion.load_state_dict(checkpoint['criterion'])
        optimizer.load_state_dict(checkpoint['optim'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))

    model = nn.DataParallel(model).cuda()

    # Create data loaders
    # Warning: this training data loader cannot be used elsewhere other than a continueous training, otherwise the
    #   switch between the PK sampler and GS sampler will be incorrect!
    save_path = None
    if args.gs_save:
        save_path = output_dir
    dataset, num_classes, train_loader, _, _ = get_data(
        args.dataset, args.data_dir, model, matcher, start_epoch - 1,
        save_path, args)

    # Decay LR by a factor of 0.1 every step_size epochs
    lr_scheduler = StepLR(optimizer,
                          step_size=args.step_size,
                          gamma=0.1,
                          last_epoch=start_epoch - 1)

    if not args.evaluate:
        # Trainer
        trainer = Trainer(model, criterion, args.clip_value)

        t0 = time.time()
        # Start training
        for epoch in range(start_epoch, args.epochs):
            loss, acc = trainer.train(epoch, train_loader, optimizer)

            lr = list(map(lambda group: group['lr'], optimizer.param_groups))
            lr_scheduler.step()
            train_time = time.time() - t0

            print(
                '* Finished epoch %d at lr=[%g, %g, %g]. Loss: %.3f. Acc: %.2f%%. Training time: %.0f seconds.                  \n'
                %
                (epoch + 1, lr[0], lr[1], lr[2], loss, acc * 100, train_time))

            save_checkpoint(
                {
                    'model': model.module.state_dict(),
                    'criterion': criterion.state_dict(),
                    'optim': optimizer.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(output_dir, 'checkpoint.pth.tar'))

    # Final test
    print('Evaluate the learned model:')
    t0 = time.time()

    # Evaluator
    evaluator = Evaluator(model)

    avg_rank1 = 0
    avg_mAP = 0
    num_testsets = 0
    results = {}

    test_names = args.testset.strip().split(',')
    for test_name in test_names:
        if test_name not in datasets.names():
            print('Unknown dataset: %s.' % test_name)
            continue

        testset, test_query_loader, test_gallery_loader = \
            get_test_data(test_name, args.data_dir, args.height, args.width, args.workers, args.test_fea_batch)

        if not args.do_tlift:
            testset.has_time_info = False
        test_rank1, test_mAP, test_rank1_rerank, test_mAP_rerank, test_rank1_tlift, test_mAP_tlift, test_dist, \
        test_dist_rerank, test_dist_tlift, pre_tlift_dict = \
            evaluator.evaluate(matcher, testset, test_query_loader, test_gallery_loader,
                                args.test_gal_batch, args.test_prob_batch,
                               args.tau, args.sigma, args.K, args.alpha)

        results[test_name] = [test_rank1, test_mAP]
        if test_name != args.dataset:
            avg_rank1 += test_rank1
            avg_mAP += test_mAP
            num_testsets += 1

        if testset.has_time_info:
            print(
                '  %s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f,'
                ' rank1_rerank_tlift=%.1f, mAP_rerank_tlift=%.1f.\n' %
                (test_name, test_rank1 * 100, test_mAP * 100,
                 test_rank1_rerank * 100, test_mAP_rerank * 100,
                 test_rank1_tlift * 100, test_mAP_tlift * 100))
        else:
            print('  %s: rank1=%.1f, mAP=%.1f.\n' %
                  (test_name, test_rank1 * 100, test_mAP * 100))

        result_file = osp.join(exp_database_dir, args.method,
                               test_name + '_results.txt')
        with open(result_file, 'a') as f:
            f.write('%s/%s:\n' % (args.method, args.sub_method))
            if testset.has_time_info:
                f.write(
                    '\t%s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f, rank1_rerank_tlift=%.1f, '
                    'mAP_rerank_tlift=%.1f.\n\n' %
                    (test_name, test_rank1 * 100, test_mAP * 100,
                     test_rank1_rerank * 100, test_mAP_rerank * 100,
                     test_rank1_tlift * 100, test_mAP_tlift * 100))
            else:
                f.write('\t%s: rank1=%.1f, mAP=%.1f.\n\n' %
                        (test_name, test_rank1 * 100, test_mAP * 100))

        if args.save_score:
            test_gal_list = np.array(
                [fname for fname, _, _, _ in testset.gallery], dtype=np.object)
            test_prob_list = np.array(
                [fname for fname, _, _, _ in testset.query], dtype=np.object)
            test_gal_ids = [pid for _, pid, _, _ in testset.gallery]
            test_prob_ids = [pid for _, pid, _, _ in testset.query]
            test_gal_cams = [c for _, _, c, _ in testset.gallery]
            test_prob_cams = [c for _, _, c, _ in testset.query]
            test_score_file = osp.join(exp_database_dir, args.method,
                                       args.sub_method,
                                       '%s_score.mat' % test_name)
            sio.savemat(test_score_file, {
                'score': 1. - test_dist,
                'score_rerank': 1. - test_dist_rerank,
                'score_tlift': 1. - test_dist_tlift,
                'gal_time': pre_tlift_dict['gal_time'],
                'prob_time': pre_tlift_dict['prob_time'],
                'gal_list': test_gal_list,
                'prob_list': test_prob_list,
                'gal_ids': test_gal_ids,
                'prob_ids': test_prob_ids,
                'gal_cams': test_gal_cams,
                'prob_cams': test_prob_cams
            },
                        oned_as='column',
                        do_compression=True)

    test_time = time.time() - t0
    avg_rank1 /= num_testsets
    avg_mAP /= num_testsets
    for key in results.keys():
        print('%s: rank1=%.1f%%, mAP=%.1f%%.' %
              (key, results[key][0] * 100, results[key][1] * 100))
    print('Average: rank1=%.2f%%, mAP=%.2f%%.\n\n' %
          (avg_rank1 * 100, avg_mAP * 100))

    result_file = osp.join(exp_database_dir, args.method,
                           args.sub_method[:-5] + '_avg_results.txt')
    with open(result_file, 'a') as f:
        f.write('%s/%s:\n' % (args.method, args.sub_method))
        if not args.evaluate:
            f.write('\t Loss: %.3f, acc: %.2f%%. ' % (loss, acc * 100))
            f.write("Train: %.0fs. " % train_time)
        f.write("Test: %.0fs. " % test_time)
        f.write('Rank1: %.2f%%, mAP: %.2f%%.\n' %
                (avg_rank1 * 100, avg_mAP * 100))
        for key in results.keys():
            f.write('\t %s: Rank1: %.1f%%, mAP: %.1f%%.\n' %
                    (key, results[key][0] * 100, results[key][1] * 100))
        f.write('\n')

    if not args.evaluate:
        print('Finished training at epoch %d, loss = %.3f, acc = %.2f%%.\n' %
              (epoch + 1, loss, acc * 100))
        print(
            "Total training time: %.3f sec. Average training time per epoch: %.3f sec."
            % (train_time, train_time / (args.epochs - start_epoch + 1)))
    print("Total testing time: %.3f sec.\n" % test_time)

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')