Example #1
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir + '/log'))

    if args.height is None or args.width is None:
        args.height, args.width = (144,
                                   56) if args.arch == 'inception' else (256,
                                                                         128)
    dataset, num_classes, train_loader, trainvallabel, val_loader, query_loader, gallery_loader, query_loader_s, gallery_loader_s = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
            args.width, args.batch_size, args.num_instances, args.workers,
            args.combine_trainval, args.flip_prob, args.padding, args.re_prob)
    print(num_classes)
    gen_a = AdaINGen(3, num_classes)
    gen_b = AdaINGen(3, num_classes)
    # id_a = ft_netAB(num_classes, stride=1, norm="no",  pool="max")
    # id_b = ft_netAB(num_classes, stride=1, norm="no",  pool="max")
    dis_a = MsImageDis(3)  # discriminator for domain a
    dis_b = MsImageDis(3)  # discriminator for domain a
    gen_a = gen_a.cuda()
    gen_b = gen_b.cuda()
    # id_a = id_a.cuda()
    # id_b = id_b.cuda()
    dis_a = dis_a.cuda()
    dis_b = dis_b.cuda()

    evaluator = Evaluator(gen_a.enc_content, gen_b.enc_content)
    metric = DistanceMetric(algorithm=args.dist_metric)

    # evaluator_s = Evaluator(model_s)
    # metric_s = DistanceMetric(algorithm=args.dist_metric)

    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model_t.load_state_dict(checkpoint['model'])
        # model_discriminator.load_state_dict(checkpoint['model_discriminator'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}".format(start_epoch))

    if args.evaluate:
        metric.train(model_t, train_loader)
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery)
        exit()

    current_margin = args.margin
    criterion_z_s = CrossEntropyLabelSmooth(num_classes=num_classes,
                                            epsilon=0.5).cuda()
    criterion_att = nn.MSELoss().cuda()
    criterion_z = CrossEntropyLabelSmooth(num_classes=num_classes,
                                          epsilon=0.5).cuda()
    criterion_I = TripletLoss(margin=current_margin).cuda()
    criterion_I_s = TripletLoss_s(margin=current_margin).cuda()
    # criterion_D = nn.CrossEntropyLoss().cuda()

    # print(args)
    # # setup id opt
    # # if args.arch == 'ide':
    # #     ignored_params = list(map(id, model_t.model.fc.parameters() )) + list(map(id, model_t.classifier.parameters() ))
    # # else:
    # #     ignored_params = list(map(id, id_a.classifier1.parameters())) + list(map(id, id_a.classifier2.parameters()))
    # #     ignored_params_s = list(map(id, id_b.classifier1.parameters())) + list(map(id, id_b.classifier2.parameters()))
    # #
    # # base_params = filter(lambda p: id(p) not in ignored_params, id_a.parameters())
    # # base_params_s = filter(lambda p: id(p) not in ignored_params_s, id_b.parameters())
    # #
    # # if args.use_adam:
    # #     optimizer_ft = torch.optim.Adam([
    # #         {'params': filter(lambda p: p.requires_grad,base_params), 'lr': args.lr},
    # #         {'params': filter(lambda p: p.requires_grad, base_params_s), 'lr': args.lr},
    # #         {'params': id_a.classifier1.parameters(), 'lr': args.lr},
    # #         {'params': id_a.classifier2.parameters(), 'lr': args.lr},
    # #         {'params': id_b.classifier1.parameters(), 'lr': args.lr},
    # #         {'params': id_b.classifier2.parameters(), 'lr': args.lr},
    # #         ],
    # #         weight_decay=5e-4)
    # # else:
    # #     optimizer_ft = torch.optim.SGD([
    # #         {'params': filter(lambda p: p.requires_grad, base_params), 'lr': args.lr},
    # #         {'params': filter(lambda p: p.requires_grad, base_params_s), 'lr': args.lr},
    # #         {'params': model_s.classifier.parameters(), 'lr': args.lr},
    # #         {'params': model_s.attention_module.parameters(), 'lr': args.lr},
    # #         {'params': model_t.classifier.parameters(), 'lr': args.lr},
    # #         {'params': model_t.attention_module.parameters(), 'lr': args.lr},
    # #         ],
    # #         momentum=0.9,
    # #         weight_decay=5e-4,
    # #         nesterov=True)
    # #
    # # id_scheduler = WarmupMultiStepLR(optimizer_ft, args.mile_stone, args.gamma, args.warmup_factor,
    # #                                       args.warmup_iters, args.warmup_methods)
    # setup dis and gen
    dis_opt = torch.optim.Adam([
        {
            'params': dis_a.parameters(),
            'lr': args.dis_lr
        },
        {
            'params': dis_b.parameters(),
            'lr': args.dis_lr
        },
    ],
                               weight_decay=5e-4)
    gen_opt = torch.optim.Adam([
        {
            'params': gen_a.parameters(),
            'lr': args.gen_lr
        },
        {
            'params': gen_b.parameters(),
            'lr': args.gen_lr
        },
    ],
                               weight_decay=5e-4)
    dis_scheduler = WarmupMultiStepLR(dis_opt, args.mile_stone, args.gamma,
                                      args.warmup_factor, args.warmup_iters,
                                      args.warmup_methods)
    gen_scheduler = WarmupMultiStepLR(gen_opt, args.mile_stone, args.gamma,
                                      args.warmup_factor, args.warmup_iters,
                                      args.warmup_methods)
    trainer = Trainer(gen_a, gen_b, dis_a, dis_b, criterion_z, criterion_I,
                      criterion_att, trainvallabel, 1, 1, 0.15, 0.05, 5)

    flag = 1
    best_top1 = -1
    best_top1_s = -1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        print("Begin Train")
        # id_scheduler.step()
        gen_scheduler.step()
        dis_scheduler.step()
        trainer.train(epoch, train_loader, dis_opt, gen_opt)
        #
        save_checkpoint(
            {
                'content_a': gen_a.enc_content.state_dict(),
                'content_b': gen_b.enc_content.state_dict(),
                'style_a': gen_a.enc_style.state_dict(),
                'style_b': gen_b.enc_style.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            False,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')

        # if epoch < 200:
        #     continue
        if not epoch % 1 == 0:
            continue

        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                  dataset.gallery, metric)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'content_a': gen_a.enc_content.state_dict(),
                'content_b': gen_b.enc_content.state_dict(),
                'style_a': gen_a.enc_style.state_dict(),
                'style_b': gen_b.enc_style.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')
    #
    #
    #
    #
    # print('Test with best model_t:')
    # print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
    #           format(epoch, top1, best_top1, ' *' if is_best else ''))
    #
    # print('Test with best model_s:')
    # print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
    #       format(epoch, top1_s, best_top1_s, ' *' if is_best else ''))
    #
    # checkpoint = load_checkpoint(osp.join(args.logs_dir,'model_best.pth.tar'))
    # model_t.load_state_dict(checkpoint['model'])
    # metric.train(model_t, train_loader)
    # evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, metric)
    #
    # checkpoint_s = load_checkpoint(osp.join(args.logs_dir, 's_model_best.pth.tar'))
    # model_s.load_state_dict(checkpoint_s['model'])
    # evaluator_s.evaluate(query_loader_s, gallery_loader_s, dataset.query, dataset.gallery, metric)

    print(args)
Example #2
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir+'/log'))

    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else (256, 128)
    dataset, num_classes, train_loader, trainvallabel, val_loader, query_loader, gallery_loader, query_loader_rgb, gallery_loader_rgb, query_loader_ir, gallery_loader_ir = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
            args.width, args.batch_size, args.num_instances, args.workers,
            args.combine_trainval, args.flip_prob, args.padding, args.re_prob)
    model_t = models.create(args.arch, num_classes=num_classes, num_features=args.features, attention_mode=args.att_mode)
    model_s = models.create(args.arch, num_classes=num_classes, num_features=args.features, attention_mode=args.att_mode)
    model_ir = models.create(args.arch, num_classes=num_classes, num_features=args.features, attention_mode=args.att_mode)
#    print(model)
    USE_CUDA = torch.cuda.is_available()
    device = torch.device("cuda:0" if USE_CUDA else "cpu")
    model_t = nn.DataParallel(model_t, device_ids=[0,1,2])
    model_t.to(device)
    model_s = nn.DataParallel(model_s, device_ids=[0,1,2])
    model_s.to(device)
    model_ir = nn.DataParallel(model_ir, device_ids=[0,1,2])
    model_ir.to(device)
    print(num_classes)
    #model = model.cuda()
    #model_discriminator = model_discriminator.cuda()
    #model_discriminator = nn.DataParallel(model_discriminator, device_ids=[0,1,2])
    #model_discriminator.to(device)

    evaluator = Evaluator(model_t)
    metric = DistanceMetric(algorithm=args.dist_metric)
    evaluator_s = Evaluator(model_s)
    metric_s = DistanceMetric(algorithm=args.dist_metric)
    evaluator_ir = Evaluator(model_ir)
    metric_ir = DistanceMetric(algorithm=args.dist_metric)

    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['model'])
        model_discriminator.load_state_dict(checkpoint['model_discriminator'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}".format(start_epoch))

    if args.evaluate:
        metric.train(model, train_loader)
        evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery)
        exit()

    current_margin = args.margin
    #criterion_z = nn.CrossEntropyLoss().cuda()

    criterion_z = CrossEntropyLabelSmooth(num_classes= num_classes, epsilon=args.epsilon).cuda()
    criterion_att = nn.MSELoss().cuda()
    #criterion_I = TripletLoss(margin= current_margin).cuda()
    #criterion_I = Circle_Rank_loss(margin_1=args.margin_1, margin_2=args.margin_2, alpha_1=args.alpha_1, alpha_2=args.alpha_2).cuda()
    criterion_I = Rank_loss(margin_1= args.margin_1, margin_2 =args.margin_2, alpha_1 =args.alpha_1, alpha_2= args.alpha_2).cuda()
    criterion_t = Triplet(margin=current_margin).cuda()

    print(args)

    if args.arch == 'ide':
        ignored_params = list(map(id, model.model.fc.parameters() )) + list(map(id, model.classifier.parameters() ))
    else:
        ignored_params = list(map(id, model_t.module.classifier.parameters())) + list(map(id, model_t.module.attention_module.parameters()))
        ignored_params_s = list(map(id, model_s.module.classifier.parameters())) + list(map(id, model_s.module.attention_module.parameters()))
        ignored_params_ir = list(map(id, model_ir.module.classifier.parameters())) + list(map(id, model_ir.module.attention_module.parameters()))

    base_params = filter(lambda p: id(p) not in ignored_params, model_t.parameters())
    base_params_s = filter(lambda p: id(p) not in ignored_params_s, model_s.parameters())
    base_params_ir = filter(lambda p: id(p) not in ignored_params_ir, model_ir.parameters())


    if args.use_adam:
        optimizer_ft = torch.optim.Adam([
        #print("Ranger")
        #optimizer_ft = Ranger([
            {'params': filter(lambda p: p.requires_grad,base_params), 'lr': args.lr},
            {'params': filter(lambda p: p.requires_grad,base_params_s), 'lr':args.lr},
            {'params': filter(lambda p: p.requires_grad,base_params_ir), 'lr':args.lr},
            {'params': model_t.module.classifier.parameters(), 'lr': args.lr},
            {'params': model_t.module.attention_module.parameters(), 'lr': args.lr},
            {'params': model_s.module.classifier.parameters(), 'lr': args.lr},
            {'params': model_s.module.attention_module.parameters(), 'lr': args.lr},
            {'params': model_ir.module.classifier.parameters(), 'lr': args.lr},
            {'params': model_ir.module.attention_module.parameters(), 'lr': args.lr},
            ],
            weight_decay=5e-4)

        #optimizer_discriminator = torch.optim.Adam([
        #    {'params': model_discriminator.module.model.parameters(), 'lr': args.lr},
        #    {'params': model_discriminator.module.classifier.parameters(), 'lr': args.lr}
        #    ],
        #    weight_decay=5e-4)


    else:
        optimizer_ft = torch.optim.SGD([
             {'params': filter(lambda p: p.requires_grad,base_params), 'lr': args.lr},
             {'params': model.classifier.parameters(), 'lr': args.lr},
            ],
            momentum=0.9,
            weight_decay=5e-4,
            nesterov=True)
        optimizer_discriminator = torch.optim.SGD([
             {'params': model_discriminator.model.parameters(), 'lr': args.lr},
             {'params': model_discriminator.classifier.parameters(), 'lr': args.lr},
            ],
            momentum=0.9,
            weight_decay=5e-4,
            nesterov=True)
    print(args.warmup_left)
    mile_stone = []
    mile_stone.append(int(args.warmup_left))
    mile_stone.append(int(args.warmup_right))
    print(mile_stone)
    scheduler = WarmupMultiStepLR(optimizer_ft, mile_stone, args.gamma, args.warmup_factor,
                                          args.warmup_iters, args.warmup_methods)

    trainer = Trainer(model_t, model_s, model_ir, criterion_z, criterion_I, criterion_att, criterion_t, trainvallabel, 1, 1 ,args.rgb_w, args.ir_w, 1000)

    flag = 1
    best_top1 = -1
    best_top1_s = -1
    best_top1_ir = -1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        scheduler.step()
        triple_loss, tot_loss = trainer.train(epoch, train_loader, optimizer_ft)

        save_checkpoint({
            'model': model_t.module.state_dict(),
            #'model_discriminator': model_discriminator.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, False, epoch, args.logs_dir, fpath='checkpoint.pth.tar')

        if epoch < 1:
            continue
        if not epoch % 10 ==0:
            continue


        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, metric)
        top1_s = evaluator_s.evaluate(query_loader_rgb, gallery_loader_rgb, dataset.query, dataset.gallery, metric_s)
        top1_ir = evaluator_ir.evaluate(query_loader_ir, gallery_loader_ir, dataset.query, dataset.gallery, metric_ir)
        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'model': model_t.module.state_dict(),
            #'model_discriminator': model_discriminator.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, epoch, args.logs_dir, fpath='checkpoint.pth.tar')

        is_best_s = top1_s > best_top1_s
        best_top1_s = max(top1_s, best_top1_s)
        save_checkpoint_s({
            'model': model_s.module.state_dict(),
            'epoch': epoch+1,
            'best_top1': best_top1,
        }, is_best, epoch, args.logs_dir, fpath='s_checkpoint.pth.tar')

        is_best_ir = top1_ir > best_top1_ir
        best_top1_ir = max(top1_ir, best_top1_ir)
        save_checkpoint_ir({
            'model': model_ir.module.state_dict(),
            'epoch': epoch+1,
            'best_top1': best_top1,
        }, is_best, epoch, args.logs_dir, fpath='ir_checkpoint.pth.tar')

    #print('Test with best model:')
    #print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
    #          format(epoch, top1, best_top1, ' *' if is_best else ''))

    #checkpoint = load_checkpoint(osp.join(args.logs_dir,'model_best.pth.tar'))
    #model.load_state_dict(checkpoint['model'])
    #metric.train(model, train_loader)
    #evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, metric)
    print(args)
Example #3
0
def main(args):
    # seed
    if args.seed is not None:
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
    else:
        torch.backends.cudnn.benchmark = True

    if args.logs_dir is None:
        args.logs_dir = osp.join(
            f'logs/pcb/{args.dataset}',
            datetime.datetime.today().strftime('%Y-%m-%d_%H-%M-%S'))
    else:
        args.logs_dir = osp.join(f'logs/pcb/{args.dataset}', args.logs_dir)
    if args.train:
        os.makedirs(args.logs_dir, exist_ok=True)
        copy_tree('./reid', args.logs_dir + '/scripts/reid')
        for script in os.listdir('.'):
            if script.split('.')[-1] == 'py':
                dst_file = os.path.join(args.logs_dir, 'scripts',
                                        os.path.basename(script))
                shutil.copyfile(script, dst_file)
        sys.stdout = Logger(os.path.join(args.logs_dir, 'log.txt'), )
    print('Settings:')
    print(vars(args))
    print('\n')

    # Create data loaders
    dataset, num_classes, train_loader, query_loader, gallery_loader, camstyle_loader = \
        get_data(args.dataset, args.data_dir, args.height, args.width, args.batch_size, args.num_workers,
                 args.combine_trainval, args.crop, args.tracking_icams, args.tracking_fps, args.re, 0, args.camstyle)

    # Create model
    model = models.create('pcb',
                          feature_dim=args.feature_dim,
                          num_classes=num_classes,
                          norm=args.norm,
                          dropout=args.dropout,
                          last_stride=args.last_stride)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        resume_fname = osp.join(f'logs/pcb/{args.dataset}', args.resume,
                                'model_best.pth.tar')
        model, start_epoch, best_top1 = checkpoint_loader(model, resume_fname)
        print("=> Last epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
        start_epoch += 1
    model = nn.DataParallel(model).cuda()

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module,
               'base'):  # low learning_rate the base network (aka. ResNet-50)
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Evaluator
    evaluator = Evaluator(model)

    if args.train:
        # Schedule learning rate
        def adjust_lr(epoch):
            step_size = args.step_size
            lr = args.lr * (0.1**(epoch // step_size))
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)

        # Draw Curve
        epoch_s = []
        loss_s = []
        prec_s = []
        eval_epoch_s = []
        eval_top1_s = []

        # Start training
        for epoch in range(start_epoch + 1, args.epochs + 1):
            t0 = time.time()
            adjust_lr(epoch)
            # train_loss, train_prec = 0, 0
            train_loss, train_prec = trainer.train(epoch,
                                                   train_loader,
                                                   optimizer,
                                                   fix_bn=args.fix_bn)

            if epoch < args.start_save:
                continue

            if epoch % 5 == 0:
                top1 = evaluator.evaluate(query_loader, gallery_loader,
                                          dataset.query, dataset.gallery)
                eval_epoch_s.append(epoch)
                eval_top1_s.append(top1)
            else:
                top1 = 0

            is_best = top1 >= best_top1
            best_top1 = max(top1, best_top1)
            save_checkpoint(
                {
                    'state_dict': model.module.state_dict(),
                    'epoch': epoch,
                    'best_top1': best_top1,
                },
                is_best,
                fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
            epoch_s.append(epoch)
            loss_s.append(train_loss)
            prec_s.append(train_prec)
            draw_curve(os.path.join(args.logs_dir, 'train_curve.jpg'), epoch_s,
                       loss_s, prec_s, eval_epoch_s, None, eval_top1_s)

            t1 = time.time()
            t_epoch = t1 - t0
            print(
                '\n * Finished epoch {:3d}  top1: {:5.1%}  best_eval: {:5.1%} {}\n'
                .format(epoch, top1, best_top1, ' *' if is_best else ''))
            print(
                '*************** Epoch takes time: {:^10.2f} *********************\n'
                .format(t_epoch))
            pass

        # Final test
        print('Test with best model:')
        model, start_epoch, best_top1 = checkpoint_loader(
            model, osp.join(args.logs_dir, 'model_best.pth.tar'))
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))

        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery)
    else:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery)
        pass
Example #4
0
def main(args):
    print(args)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.big_height is None or args.big_width is None or args.target_height is None or args.target_width is None:
        args.big_height, args.big_width, args.target_height, args.target_width = (
            256, 256, 224, 224)
    dataset, num_classes, train_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.big_height, args.big_width,
                 args.target_height, args.target_width, args.batch_size, args.num_instances,
                 args.workers, args.combine_trainval)

    # Create model
    model = models.create(args.arch,
                          num_classes=num_classes,
                          num_features=args.features)

    # Load from checkpoint
    start_epoch = best = 0
    if args.weights:
        checkpoint = load_checkpoint(args.weights)
        if args.arch == 'cross_trihard_senet101' or args.arch == 'cross_trihard_se_resnet152':
            del (checkpoint['last_linear.weight'])
            del (checkpoint['last_linear.bias'])
            model.base.load_state_dict(checkpoint)
            #model.base.load_param(args.weights)
        elif args.arch == 'cross_trihard_mobilenet':
            del (checkpoint['state_dict']['module.fc.weight'])
            del (checkpoint['state_dict']['module.fc.bias'])
            model_dict = model.state_dict()
            checkpoint_load = {
                k.replace('module', 'base'): v
                for k, v in (checkpoint['state_dict']).items()
            }
            model_dict.update(checkpoint_load)
            model.load_state_dict(model_dict)
        elif args.arch == 'cross_trihard_shufflenetv2':
            del (checkpoint['classifier.0.weight'])
            del (checkpoint['classifier.0.bias'])
            model.base.load_state_dict(checkpoint)
        elif args.arch == 'cross_trihard_densenet121':
            del (checkpoint['classifier.weight'])
            del (checkpoint['classifier.bias'])
            pattern = re.compile(
                r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
            )
            for key in list(checkpoint.keys()):
                res = pattern.match(key)
                if res:
                    new_key = res.group(1) + res.group(2)
                    checkpoint[new_key] = checkpoint[key]
                    del checkpoint[key]
            model.base.load_state_dict(checkpoint)
        elif args.arch == 'cross_trihard_vgg19bn':
            del (checkpoint['classifier.6.weight'])
            del (checkpoint['classifier.6.bias'])
            model.base.load_state_dict(checkpoint)
        else:
            del (checkpoint['fc.weight'])
            del (checkpoint['fc.bias'])
            model.base.load_state_dict(checkpoint)
    if args.resume and not args.weights:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}  best top1 {:.1%}".format(start_epoch, best))

    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
        return

    # Criterion
    ranking_loss = nn.MarginRankingLoss(margin=args.margin).cuda()
    criterion = { 'crossentropy': nn.CrossEntropyLoss().cuda(), \
                  'trihard': TripletLoss(ranking_loss).cuda() }

    # Optimizer
    if hasattr(model.module, 'base'):
        base_params = []
        base_bn_params = []
        for name, p in model.module.base.named_parameters():
            if 'bn' in name:
                base_bn_params.append(p)
            else:
                base_params.append(p)
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': base_params,
            'lr_mult': 0.1
        }, {
            'params': base_bn_params,
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': args.lr_mult
        }]
    else:
        param_groups = model.parameters()

    if args.optimizer == 0:
        optimizer = torch.optim.SGD(param_groups,
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=True)
    else:
        print('Adam')
        optimizer = torch.optim.Adam(params=param_groups,
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)

    # Trainer
    trainer = Cross_Trihard_Trainer(model,
                                    criterion,
                                    metric_loss_weight=args.metric_loss_weight)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size, step_size2, step_size3 = args.step_size, args.step_size2, args.step_size3
        #lr = args.lr * (0.1 ** (epoch // step_size))
        if epoch <= step_size:
            lr = args.lr
        elif epoch <= step_size2:
            lr = args.lr * 0.1
        elif epoch <= step_size3:
            lr = args.lr * 0.01
        else:
            lr = args.lr * 0.001
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        return lr

    # Start training
    for epoch in range(start_epoch + 1, args.epochs + 1):
        lr = adjust_lr(epoch)
        trainer.train(epoch,
                      train_loader,
                      optimizer,
                      lr,
                      warm_up=True,
                      warm_up_ep=args.warm_up_ep)
        if epoch % args.epoch_inter == 0 or epoch >= args.dense_evaluate:
            tmp_res = evaluator.evaluate(test_loader, dataset.query,
                                         dataset.gallery)
            print('tmp_res: ', tmp_res)
            print('best: ', best)
            if tmp_res > best and epoch >= args.start_save:
                best = tmp_res
                save_checkpoint(
                    {
                        'state_dict': model.module.state_dict(),
                        'epoch': epoch,
                    },
                    False,
                    fpath=osp.join(args.logs_dir, 'pass%d.pth.tar' % (epoch)))
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    # get source data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get target data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError(
            'Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    if args.resume:
        print(
            'Resuming checkpoints from finetuned model on another dataset...\n'
        )
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()

    # evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    # if args.evaluate: return

    # Criterion
    criterion = [
        TripletLoss(args.margin, args.num_instances, isAvg=True,
                    use_semi=True).cuda(),
        TripletLoss(args.margin, args.num_instances, isAvg=True,
                    use_semi=True).cuda(),
    ]

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model,
                                                  src_extfeat_loader,
                                                  print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([
                source_features[f].unsqueeze(0)
                for f, _, _ in src_dataset.train
            ], 0)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(
            iter_n + 1))
        target_features, tarNames = extract_features(
            model, tgt_extfeat_loader, print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat([
            target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval
        ], 0)
        target_real_label = np.asarray(
            [tarNames[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval])
        numTarID = len(set(target_real_label))
        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        cluster = KMeans(n_clusters=numTarID, n_jobs=8, n_init=1)

        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        clusterRes = cluster.fit(target_features)
        labels, centers = clusterRes.labels_, clusterRes.cluster_centers_
        # labels = splitLowconfi(target_features,labels,centers)
        # num_ids = len(set(labels))
        # print('Iteration {} have {} training ids'.format(iter_n+1, num_ids))
        # generate new dataset
        new_dataset = []
        for (fname, _, cam), label in zip(tgt_dataset.trainval, labels):
            # if label==-1: continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, cam))
        print('Iteration {} have {} training images'.format(
            iter_n + 1, len(new_dataset)))
        train_loader = DataLoader(Preprocessor(new_dataset,
                                               root=tgt_dataset.images_dir,
                                               transform=train_transformer),
                                  batch_size=args.batch_size,
                                  num_workers=4,
                                  sampler=RandomIdentitySampler(
                                      new_dataset, args.num_instances),
                                  pin_memory=True,
                                  drop_last=True)

        # train model with new generated dataset
        trainer = Trainer(model, criterion)

        evaluator = Evaluator(model, print_freq=args.print_freq)

        # Start training
        for epoch in range(args.epochs):
            # trainer.train(epoch, remRate=0.2+(0.6/args.iteration)*(1+iter_n)) # to at most 80%
            trainer.train(epoch, train_loader, optimizer)
        # test only
        rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                        tgt_dataset.gallery)
        #print('co-model:\n')
        #rank_score = evaluatorB.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)

    # Evaluate
    rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                    tgt_dataset.gallery)
    save_checkpoint(
        {
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': rank_score.market1501[0],
        },
        True,
        fpath=osp.join(args.logs_dir, 'adapted.pth.tar'))
    return (rank_score.map, rank_score.market1501[0])
Example #6
0
def main(args):
    # For fast training.
    np.random.seed(10000)
    torch.manual_seed(1000)
    torch.cuda.manual_seed_all(10000)
    cudnn.benchmark = True
    device = torch.device('cuda:' + str(args.gpuid))
    torch.cuda.set_device(device)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    print('logs_dir=', args.logs_dir)

    # Print logs
    print(args)

    # Create data loaders
    dataset, num_classes, source_train_loader, \
    query_loader, gallery_loader = get_data(args.data_dir, args.source,
                                            args.target, args.height,
                                            args.width, args.batch_size,
                                            args.re, args.workers,args)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)

    # Invariance learning model
    # num_tgt = len(dataset.target_train)
    # model_inv = InvNet(args.features, num_tgt,
    #                     beta=args.inv_beta, knn=args.knn,
    #                     alpha=args.inv_alpha)

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        # model_inv.load_state_dict(checkpoint['state_dict_inv'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))

    # Set model
    model = nn.DataParallel(model, device_ids=[int(args.gpuid)])
    # model_inv = model_inv.to(device)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery, args.print_freq,
                           args.output_feature)
        return

    # Optimizer
    base_param_ids = set(map(id, model.module.base.parameters()))

    base_params_need_for_grad = filter(lambda p: p.requires_grad,
                                       model.module.base.parameters())

    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': base_params_need_for_grad,
        'lr_mult': 0.1
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    # trainer = Trainer(model, '', lmd=args.lmd)
    trainer = Trainer(model, None, lmd=args.lmd)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = args.epochs_decay
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        print(epoch)
        adjust_lr(epoch)
        trainer.sys_train(epoch,
                          source_train_loader,
                          optimizer,
                          print_freq=args.print_freq)

        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                # 'state_dict_inv': model_inv.state_dict(),
                'epoch': epoch + 1,
            },
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d} \n'.format(epoch))
        if epoch % 5 == 0:
            print(' test with epoch {:3d} : ---'.format(epoch))
            evaluator = Evaluator(model)
            evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                               dataset.gallery, args.print_freq,
                               args.output_feature)

    # Final test
    print('Final Test : Testing with best model......')
    evaluator = Evaluator(model)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery, args.print_freq, args.output_feature)
Example #7
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir + '/log'))

    if args.height is None or args.width is None:
        args.height, args.width = (144,
                                   56) if args.arch == 'inception' else (256,
                                                                         128)
    dataset, num_classes, train_loader, trainvallabel, val_loader, query_loader, gallery_loader, query_loader_s, gallery_loader_s = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
            args.width, args.batch_size, args.num_instances, args.workers,
            args.combine_trainval, args.flip_prob, args.padding, args.re_prob)
    print(num_classes)
    # model_s, model_t, model_discriminator = models.create(args.arch, num_classes=num_classes, num_features=args.features)
    model_t = models.create(args.arch,
                            num_classes=num_classes,
                            num_features=args.features,
                            attention_mode=args.att_mode)
    model_s = models.create(args.arch,
                            num_classes=num_classes,
                            num_features=args.features,
                            attention_mode=args.att_mode)
    # load source network
    # checkpoint_s = load_checkpoint('/home/fan/cross_reid/source_net/model_best.pth.tar')
    #
    # model_dict = model_s.state_dict()
    # state_dict = {k:v for k,v in checkpoint_s.items() if k in model_dict.keys()}
    # model_dict.update(state_dict)

    # print(model_s)
    # print(checkpoint_s['model'])
    # print(model_dict.keys())
    # model_s.load_state_dict(model_dict)
    # print(model_s)
    USE_CUDA = torch.cuda.is_available()
    device = torch.device("cuda:0" if USE_CUDA else "cpu")
    model_s = nn.DataParallel(model_s, device_ids=[0, 2])
    model_s.to(device)
    # model_s = model_s.cuda()
    model_t = nn.DataParallel(model_t, device_ids=[0, 2])
    model_t.to(device)
    # model_discriminator = model_discriminator.cuda()

    evaluator = Evaluator(model_t)
    metric = DistanceMetric(algorithm=args.dist_metric)

    evaluator_s = Evaluator(model_s)
    metric_s = DistanceMetric(algorithm=args.dist_metric)

    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model_t.load_state_dict(checkpoint['model'])
        # model_discriminator.load_state_dict(checkpoint['model_discriminator'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}".format(start_epoch))

    if args.evaluate:
        metric.train(model_t, train_loader)
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery)
        exit()

    current_margin = args.margin
    criterion_z_s = CrossEntropyLabelSmooth(num_classes=num_classes,
                                            epsilon=0.5).cuda()
    criterion_att = nn.MSELoss().cuda()
    criterion_z = CrossEntropyLabelSmooth(num_classes=num_classes,
                                          epsilon=0.5).cuda()
    criterion_I = TripletLoss(margin=current_margin).cuda()
    criterion_I_s = TripletLoss_s(margin=current_margin).cuda()
    # criterion_D = nn.CrossEntropyLoss().cuda()

    print(args)

    if args.arch == 'ide':
        ignored_params = list(map(id, model_t.model.fc.parameters())) + list(
            map(id, model_t.classifier.parameters()))
    else:
        ignored_params = list(map(
            id, model_t.module.classifier.parameters())) + list(
                map(id, model_t.module.attention_module.parameters()))
        ignored_params_s = list(map(
            id, model_s.module.classifier.parameters())) + list(
                map(id, model_s.module.attention_module.parameters()))

    base_params = filter(lambda p: id(p) not in ignored_params,
                         model_t.parameters())
    base_params_s = filter(lambda p: id(p) not in ignored_params_s,
                           model_s.parameters())

    if args.use_adam:
        optimizer_ft = torch.optim.Adam([
            {
                'params': filter(lambda p: p.requires_grad, base_params),
                'lr': args.lr
            },
            {
                'params': filter(lambda p: p.requires_grad, base_params_s),
                'lr': args.lr
            },
            {
                'params': model_s.module.classifier.parameters(),
                'lr': args.lr
            },
            {
                'params': model_s.module.attention_module.parameters(),
                'lr': args.lr
            },
            {
                'params': model_t.module.classifier.parameters(),
                'lr': args.lr
            },
            {
                'params': model_t.module.attention_module.parameters(),
                'lr': args.lr
            },
        ],
                                        weight_decay=5e-4)

        # optimizer_discriminator = torch.optim.Adam([
        #     {'params': model_discriminator.model.parameters(), 'lr': args.lr},
        #     {'params': model_discriminator.classifier.parameters(), 'lr': args.lr}
        #     ],
        #     weight_decay=5e-4)

    else:
        optimizer_ft = torch.optim.SGD([
            {
                'params': filter(lambda p: p.requires_grad, base_params),
                'lr': args.lr
            },
            {
                'params': filter(lambda p: p.requires_grad, base_params_s),
                'lr': args.lr
            },
            {
                'params': model_s.classifier.parameters(),
                'lr': args.lr
            },
            {
                'params': model_s.attention_module.parameters(),
                'lr': args.lr
            },
            {
                'params': model_t.classifier.parameters(),
                'lr': args.lr
            },
            {
                'params': model_t.attention_module.parameters(),
                'lr': args.lr
            },
        ],
                                       momentum=0.9,
                                       weight_decay=5e-4,
                                       nesterov=True)
        # optimizer_discriminator = torch.optim.SGD([
        #      {'params': model_discriminator.model.parameters(), 'lr': args.lr},
        #      {'params': model_discriminator.classifier.parameters(), 'lr': args.lr},
        #     ],
        #     momentum=0.9,
        #     weight_decay=5e-4,
        #     nesterov=True)

    scheduler = WarmupMultiStepLR(optimizer_ft, args.mile_stone, args.gamma,
                                  args.warmup_factor, args.warmup_iters,
                                  args.warmup_methods)

    trainer = Trainer(model_s, model_t, criterion_z, criterion_z_s,
                      criterion_I, criterion_I_s, criterion_att, trainvallabel,
                      1, 1, 0.15, 0.05, 5)

    flag = 1
    best_top1 = -1
    best_top1_s = -1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        scheduler.step()
        triple_loss, tot_loss, att_loss = trainer.train(
            epoch, train_loader, optimizer_ft)

        save_checkpoint(
            {
                'model': model_t.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            False,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')

        # checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
        # for i, keys in enumerate(checkpoint):
        # # model_t.load_state_dict(checkpoint['model'])
        #     print(keys)
        #     print(i)

        # if epoch < 200:
        #     continue
        if not epoch % 10 == 0:
            continue

        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                  dataset.gallery, metric)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'model': model_t.module.state_dict(),
                # 'model_discriminator': model_discriminator.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')

        top1_s = evaluator_s.evaluate(query_loader_s, gallery_loader_s,
                                      dataset.query, dataset.gallery, metric)

        is_best_s = top1_s > best_top1_s
        best_top1_s = max(top1_s, best_top1_s)
        save_checkpoint_s(
            {
                'model': model_s.module.state_dict(),
                # 'model_discriminator': model_discriminator.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1_s,
            },
            is_best,
            epoch,
            args.logs_dir,
            fpath='s_checkpoint.pth.tar')

    print('Test with best model_t:')
    print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.format(
        epoch, top1, best_top1, ' *' if is_best else ''))

    print('Test with best model_s:')
    print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.format(
        epoch, top1_s, best_top1_s, ' *' if is_best else ''))

    # state_dict = {k:v for k,v in checkpoint_s.items() if k in model_dict.keys()}
    # model_dict.update(state_dict)
    metric.train(model_t, train_loader)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery, metric)

    checkpoint_s = load_checkpoint(
        osp.join(args.logs_dir, 's_model_best.pth.tar'))

    model_s.load_state_dict(checkpoint_s['model'])
    evaluator_s.evaluate(query_loader_s, gallery_loader_s, dataset.query,
                         dataset.gallery, metric)

    print(args)
Example #8
0
def main(args):
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    ## get_source_data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get_target_data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(2048) -> FC(args.features)
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch,
                              num_classes=0,
                              num_split=args.num_split,
                              cluster=args.dce_loss)  #duke
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch,
                              num_classes=0,
                              num_split=args.num_split,
                              cluster=args.dce_loss)
    else:
        raise RuntimeError(
            'Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        print(
            'Resuming checkpoints from finetuned model on another dataset...\n'
        )
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint, strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model')
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, print_freq=args.print_freq)
    print("Test with the original model trained on source domain:")
    best_top1 = evaluator.evaluate(test_loader, tgt_dataset.query,
                                   tgt_dataset.gallery)
    if args.evaluate:
        return

    # Criterion
    criterion = []
    criterion.append(
        TripletLoss(margin=args.margin,
                    num_instances=args.num_instances).cuda())
    criterion.append(
        TripletLoss(margin=args.margin,
                    num_instances=args.num_instances).cuda())

    #multi lr
    base_param_ids = set(map(id, model.module.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': model.module.base.parameters(),
        'lr_mult': 1.0
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]
    # Optimizer
    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=args.weight_decay)

    ##### adjust lr
    def adjust_lr(epoch):
        if epoch <= 7:
            lr = args.lr
        elif epoch <= 14:
            lr = 0.3 * args.lr
        else:
            lr = 0.1 * args.lr
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    ##### training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # Start training
    iter_nums = args.iteration
    cluster_list = []
    top_percent = args.rho
    for iter_n in range(0, iter_nums):
        #### get source datas' feature
        if args.load_dist and iter_n == 0:
            dist = pickle.load(
                open('dist' + str(args.num_split) + '.pkl', 'rb'))
            euclidean_dist_list = dist['euclidean']
            rerank_dist_list = dist['rerank']
        else:
            source_features, _ = extract_features(model,
                                                  src_extfeat_loader,
                                                  for_eval=False)
            if isinstance(source_features[src_dataset.trainval[0][0]], list):
                len_f = len(source_features[src_dataset.trainval[0][0]])
                source_features = [
                    torch.cat([
                        source_features[f][i].unsqueeze(0)
                        for f, _, _ in src_dataset.trainval
                    ], 0) for i in range(len_f)
                ]
            else:
                source_features = torch.cat([
                    source_features[f].unsqueeze(0)
                    for f, _, _ in src_dataset.trainval
                ], 0)  # synchronization feature order with s_dataset.trainval
            #### extract training images' features
            print('Iteration {}: Extracting Target Dataset Features...'.format(
                iter_n + 1))
            target_features, _ = extract_features(model,
                                                  tgt_extfeat_loader,
                                                  for_eval=False)
            if isinstance(target_features[tgt_dataset.trainval[0][0]], list):
                len_f = len(target_features[tgt_dataset.trainval[0][0]])
                target_features = [
                    torch.cat([
                        target_features[f][i].unsqueeze(0)
                        for f, _, _ in tgt_dataset.trainval
                    ], 0) for i in range(len_f)
                ]
            else:
                target_features = torch.cat([
                    target_features[f].unsqueeze(0)
                    for f, _, _ in tgt_dataset.trainval
                ], 0)  # synchronization feature order with dataset.trainval
            #### calculate distance and rerank result
            print('Calculating feature distances...')
            # target_features = target_features.numpy()
            euclidean_dist_list, rerank_dist_list = compute_dist(
                source_features,
                target_features,
                lambda_value=args.lambda_value,
                no_rerank=args.no_rerank,
                num_split=args.num_split)  # lambda=1 means only source dist
            del target_features
            del source_features

        labels_list, cluster_list = generate_selflabel(euclidean_dist_list,
                                                       rerank_dist_list,
                                                       iter_n, args,
                                                       cluster_list)
        #### generate new dataset
        train_loader_list = generate_dataloader(tgt_dataset, labels_list,
                                                train_transformer, iter_n,
                                                args)
        del labels_list
        # del cluster_list
        top1 = iter_trainer(model, tgt_dataset, train_loader_list, test_loader,
                            optimizer, criterion, args.epochs, args.logs_dir,
                            args.print_freq)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': iter_n + 1,
                'best_top1': best_top1,
                # 'num_ids': num_ids,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(iter_n + 1, top1, best_top1, ' *' if is_best else ''))
Example #9
0
def run():
    np.random.seed(opt.seed)
    torch.manual_seed(opt.seed)
    cudnn.benchmark = True
    data_dir = opt.data_dir

    # Redirect print to both console and log file
    #if not opt.evaluate:
    #    sys.stdout = Logger(osp.join(opt.logs_dir, 'log_l2_per.txt'))
    # Create data loaders
    def readlist(path):
        lines = []
        with open(path, 'r') as f:
            data = f.readlines()

        #pdb.set_trace()
        for line in data:
            name, pid, cam = line.split()
            lines.append((name, int(pid), int(cam)))
        return lines

    # Load data list for wuzhen
    if osp.exists(osp.join(data_dir, 'train.txt')):
        train_list = readlist(osp.join(data_dir, 'train.txt'))
    else:
        print("The training list doesn't exist")

    if osp.exists(osp.join(data_dir, 'val.txt')):
        val_list = readlist(osp.join(data_dir, 'val.txt'))
    else:
        print("The validation list doesn't exist")

    if osp.exists(osp.join(data_dir, 'query.txt')):
        query_list = readlist(osp.join(data_dir, 'query.txt'))
    else:
        print("The query.txt doesn't exist")

    if osp.exists(osp.join(data_dir, 'gallery.txt')):
        gallery_list = readlist(osp.join(data_dir, 'gallery.txt'))
    else:
        print("The gallery.txt doesn't exist")

    if opt.height is None or opt.width is None:
        opt.height, opt.width = (144, 56) if opt.arch == 'inception' else \
                                  (256, 128)

    train_loader,val_loader, test_loader = \
        get_data(opt.split, data_dir, opt.height,
                 opt.width, opt.batchSize, opt.workers,
                 opt.combine_trainval, train_list, val_list, query_list, gallery_list)
    # Create model
    # ori 14514; clear 12654,  16645
    densenet = densenet121(num_classes=20330, num_features=256)
    start_epoch = best_top1 = 0
    if opt.resume:
        #checkpoint = load_checkpoint(opt.resume)
        #densenet.load_state_dict(checkpoint['state_dict'])
        densenet.load_state_dict(torch.load(opt.resume))
        start_epoch = opt.resume_epoch
        print("=> Finetune Start epoch {} ".format(start_epoch))
    if opt.pretrained_model:
        print('Start load params...')
        load_params(densenet, opt.pretrained_model)
    # Load from checkpoint
    #densenet = nn.DataParallel(densenet).cuda()
    metric = DistanceMetric(algorithm=opt.dist_metric)
    print('densenet')
    show_info(densenet, with_arch=True, with_grad=False)
    netG = netg()
    print('netG')
    show_info(netG, with_arch=True, with_grad=False)
    netG.apply(weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))
        #load_params(netG,opt.netG)
    if opt.cuda:
        netG = netG.cuda()
        densenet = densenet.cuda()
    perceptionloss = perception_loss(cuda=opt.cuda)
    l2loss = l2_loss(cuda=opt.cuda)
    #    discriloss=discri_loss(cuda = opt.cuda,batchsize = opt.batchSize,height = \
    #                           opt.height,width = opt.width,lr = opt.lr,step_size = \
    #                           opt.step_size,decay_step = opt.decay_step )
    # Evaluator
    evaluator = Evaluator(densenet)
    #    if opt.evaluate:
    metric.train(densenet, train_loader)
    print("Validation:")
    evaluator.evaluate(val_loader, val_list, val_list, metric)
    print("Test:")
    evaluator.evaluate(test_loader, query_list, gallery_list, metric)
    #    return
    # Criterion
    #    criterion = nn.CrossEntropyLoss(ignore_index=-100).cuda()
    criterion = nn.CrossEntropyLoss().cuda()
    # Optimizer
    param_groups = []
    mult_lr(densenet, param_groups)
    optimizer = optim.SGD(param_groups,
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)
    #    optimizer = optim.Adam(param_groups, lr=opt.lr, betas=(opt.beta1, 0.9))

    optimizerG = optim.Adam(netG.parameters(),
                            lr=opt.lr,
                            betas=(opt.beta1, 0.9))

    # Start training
    for epoch in range(start_epoch, opt.epochs):
        adjust_lr(optimizer, epoch)
        adjust_lr(optimizerG, epoch)
        #discriloss.adjust_lr(epoch)
        losses = AverageMeter()
        precisions = AverageMeter()
        densenet.train()
        for i, data in enumerate(train_loader):
            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            real_cpu, _, pids, _ = data
            if opt.cuda:
                real_cpu = real_cpu.cuda()
                targets = Variable(pids.cuda())
                input.resize_as_(real_cpu).copy_(real_cpu)
            inputv = Variable(input)
            outputs, output_dense, _ = densenet(inputv)
            fake = netG(output_dense)
            fake = fake * 3
            #discriloss(fake = fake, inputv = inputv, i = i)
            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            if i % opt.CRITIC_ITERS == 0:
                netG.zero_grad()
                optimizer.zero_grad()
                #loss_discri = discriloss.gloss(fake = fake)
                loss_l2 = l2loss(fake=fake, inputv=inputv)
                loss_perception = perceptionloss(fake=fake, inputv=inputv)
                loss_classify = criterion(outputs, targets)
                prec, = accuracy(outputs.data, targets.data)
                prec = prec[0]
                losses.update(loss_classify.data[0], targets.size(0))
                precisions.update(prec, targets.size(0))
                loss = loss_classify + 0 * loss_l2 + 0 * loss_perception
                #                loss = loss_discri
                loss.backward()
                optimizerG.step()
                optimizer.step()
            #print(precisions.val)
            #print(precisions.avg)
#           print('[%d/%d][%d/%d] '%(epoch, opt.epochs, i, len(train_loader)))


#            print('[%d/%d][%d/%d] Loss_discri: %.4f '%(epoch, opt.epochs, i, \
#                  len(train_loader),loss_discri.data[0]))
            print('[%d/%d][%d/%d] Loss_l2: %.4f Loss_perception: %.4f '%(epoch, opt.epochs, i, \
                  len(train_loader),loss_l2.data[0],loss_perception.data[0]))
            print('Loss {}({})\t'
                  'Prec {}({})\t'.format(losses.val, losses.avg,
                                         precisions.val, precisions.avg))
            if i % 100 == 0:
                vutils.save_image(real_cpu,
                                  '%s/real_samples.png' % opt.outf,
                                  normalize=True)
                outputs, output_dense, _ = densenet(x=inputv)
                fake = netG(output_dense)
                fake = fake * 3
                vutils.save_image(fake.data,
                                  '%s/fake_samples_epoch_%03d.png' %
                                  (opt.outf, epoch),
                                  normalize=True)
        show_info(densenet, with_arch=False, with_grad=True)
        show_info(netG, with_arch=False, with_grad=True)
        if epoch % 5 == 0:
            torch.save(densenet.state_dict(),
                       '%s/densenet_epoch_%d.pth' % (opt.outf, epoch))
            torch.save(netG.state_dict(),
                       '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
        if epoch < opt.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, val_list, val_list)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': densenet.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(opt.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))
        if (epoch + 1) % 5 == 0:
            print('Test model: \n')
            evaluator.evaluate(test_loader, query_list, gallery_list)
            model_name = 'epoch_' + str(epoch) + '.pth.tar'
            torch.save({'state_dict': densenet.state_dict()},
                       osp.join(opt.logs_dir, model_name))
    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(opt.logs_dir, 'model_best.pth.tar'))
    densenet.load_state_dict(checkpoint['state_dict'])
    print('best epoch: ', checkpoint['epoch'])
    metric.train(densenet, train_loader)
    evaluator.evaluate(test_loader, query_list, gallery_list, metric)
Example #10
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    # get source data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get target data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError(
            'Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    if args.resume:
        print(
            'Resuming checkpoints from finetuned model on another dataset...\n'
        )
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()

    # evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    # if args.evaluate: return

    # Criterion
    criterion = [
        TripletLoss(args.margin, args.num_instances, isAvg=True,
                    use_semi=True).cuda(),
        TripletLoss(args.margin, args.num_instances, isAvg=True,
                    use_semi=True).cuda(), None, None
    ]

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    evaluator = Evaluator(model, print_freq=args.print_freq)
    evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)

    # # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model,
                                                  src_extfeat_loader,
                                                  print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([
                source_features[f].unsqueeze(0)
                for f, _, _, _ in src_dataset.train
            ], 0)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(
            iter_n + 1))
        target_features, tarNames = extract_features(
            model, tgt_extfeat_loader, print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat([
            target_features[f].unsqueeze(0)
            for f, _, _, _ in tgt_dataset.trainval
        ], 0)
        # target_real_label = np.asarray([tarNames[f].unsqueeze(0) for f, _, _, _ in tgt_dataset.trainval])

        # calculate distance and rerank result
        # method 1
        target_features = target_features.numpy()
        rerank_dist = re_ranking(source_features,
                                 target_features,
                                 lambda_value=args.lambda_value)

        # method 2
        # distmat_qq = calDis(source_features, source_features)
        # distmat_qg = calDis(source_features, target_features)
        # distmat_gg = calDis(target_features, target_features)
        # rerank_dist = re_ranking2(distmat_qg.numpy(), distmat_qq.numpy(), distmat_gg.numpy())

        cluster = HDBSCAN(metric='precomputed', min_samples=10)
        # select & cluster images as training set of this epochs
        clusterRes = cluster.fit(rerank_dist)
        labels, label_num = clusterRes.labels_, clusterRes.labels_.max() + 1
        centers = np.zeros((label_num, target_features.shape[1]))
        nums = [0] * target_features.shape[1]
        print('clusters num =', label_num)

        # generate new dataset
        new_dataset = []
        index = -1
        for (fname, _, cam, timestamp), label in zip(tgt_dataset.trainval,
                                                     labels):
            index += 1
            if label == -1: continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, cam, timestamp))
            centers[label] += target_features[index]
            nums[label] += 1
        print('Iteration {} have {} training images'.format(
            iter_n + 1, len(new_dataset)))

        train_loader = DataLoader(Preprocessor(new_dataset,
                                               root=tgt_dataset.images_dir,
                                               transform=train_transformer),
                                  batch_size=args.batch_size,
                                  num_workers=4,
                                  sampler=RandomIdentitySampler(
                                      new_dataset, args.num_instances),
                                  pin_memory=True,
                                  drop_last=True)

        for i in range(label_num):
            centers[i] /= nums[i]
        criterion[3] = ClassificationLoss(normalize(centers, axis=1)).cuda()

        classOptimizer = torch.optim.Adam(
            [{
                'params': model.parameters()
            }, {
                'params': criterion[3].classifier.parameters(),
                'lr': 1e-3
            }],
            lr=args.lr)

        class_trainer = ClassificationTrainer(model, train_loader, criterion,
                                              classOptimizer)

        for epoch in range(args.epochs):
            class_trainer.train(epoch)

        rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                        tgt_dataset.gallery)

    # Evaluate
    rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                    tgt_dataset.gallery)
    save_checkpoint(
        {
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': rank_score.market1501[0],
        },
        True,
        fpath=osp.join(args.logs_dir, 'adapted.pth.tar'))
    return (rank_score.map, rank_score.market1501[0])
Example #11
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          dropout=args.dropout, num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Example #12
0
def main(args):
    # seed
    if args.seed is not None:
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
    else:
        torch.backends.cudnn.benchmark = True

    if args.logs_dir is None:
        args.logs_dir = osp.join(
            f'logs/triplet/{args.dataset}',
            datetime.datetime.today().strftime('%Y-%m-%d_%H-%M-%S'))
    else:
        args.logs_dir = osp.join(f'logs/triplet/{args.dataset}', args.logs_dir)
    if args.train:
        os.makedirs(args.logs_dir, exist_ok=True)
        copy_tree('./reid', args.logs_dir + '/scripts/reid')
        for script in os.listdir('.'):
            if script.split('.')[-1] == 'py':
                dst_file = os.path.join(args.logs_dir, 'scripts',
                                        os.path.basename(script))
                shutil.copyfile(script, dst_file)
        sys.stdout = Logger(os.path.join(args.logs_dir, 'log.txt'), )
    print('Settings:')
    print(vars(args))
    print('\n')

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be larger than 1"
    assert args.batch_size % args.num_instances == 0, 'num_instances should divide batch_size'
    dataset, num_classes, train_loader, query_loader, gallery_loader, _ = \
        get_data(args.dataset, args.data_dir, args.height, args.width, args.batch_size, args.num_workers,
                 args.combine_trainval, args.crop, args.tracking_icams, args.tracking_fps, args.re, args.num_instances,
                 False)

    # Create model for triplet (num_classes = 0, num_instances > 0)
    model = models.create('ide',
                          feature_dim=args.feature_dim,
                          num_classes=0,
                          norm=args.norm,
                          dropout=args.dropout,
                          last_stride=args.last_stride)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        resume_fname = osp.join(f'logs/triplet/{args.dataset}', args.resume,
                                'model_best.pth.tar')
        model, start_epoch, best_top1 = checkpoint_loader(model, resume_fname)
        print("=> Last epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
        start_epoch += 1
    model = nn.DataParallel(model).cuda()

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Evaluator
    evaluator = Evaluator(model)

    if args.train:
        # Schedule learning rate
        def adjust_lr(epoch):
            if epoch <= args.step_size:
                lr = args.lr
            else:
                lr = args.lr * (0.001**(float(epoch - args.step_size) /
                                        (args.epochs - args.step_size)))
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)

        # Draw Curve
        epoch_s = []
        loss_s = []
        prec_s = []
        eval_epoch_s = []
        eval_top1_s = []

        # Start training
        for epoch in range(start_epoch + 1, args.epochs + 1):
            adjust_lr(epoch)
            # train_loss, train_prec = 0, 0
            train_loss, train_prec = trainer.train(epoch,
                                                   train_loader,
                                                   optimizer,
                                                   fix_bn=args.fix_bn)

            if epoch < args.start_save:
                continue

            if epoch % 25 == 0:
                top1 = evaluator.evaluate(query_loader, gallery_loader,
                                          dataset.query, dataset.gallery)
                eval_epoch_s.append(epoch)
                eval_top1_s.append(top1)
            else:
                top1 = 0

            is_best = top1 >= best_top1
            best_top1 = max(top1, best_top1)
            save_checkpoint(
                {
                    'state_dict': model.module.state_dict(),
                    'epoch': epoch,
                    'best_top1': best_top1,
                },
                is_best,
                fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
            epoch_s.append(epoch)
            loss_s.append(train_loss)
            prec_s.append(train_prec)
            draw_curve(os.path.join(args.logs_dir, 'train_curve.jpg'), epoch_s,
                       loss_s, prec_s, eval_epoch_s, None, eval_top1_s)
            pass

        # Final test
        print('Test with best model:')
        model, start_epoch, best_top1 = checkpoint_loader(
            model, osp.join(args.logs_dir, 'model_best.pth.tar'))
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))

        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery)
    else:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery)
        pass
Example #13
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.loss == 'triplet':
        assert args.num_instances > 1, 'TripletLoss requires num_instances > 1'
        assert args.batch_size % args.num_instances == 0, \
            'num_instances should divide batch_size'
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir,
                 args.batch_size, args.workers, args.num_instances,
                 combine_trainval=args.combine_trainval)

    # Create model
    if args.loss == 'xentropy':
        model = InceptionNet(num_classes=num_classes,
                             num_features=args.features,
                             dropout=args.dropout)
    elif args.loss == 'oim':
        model = InceptionNet(num_features=args.features,
                             norm=True,
                             dropout=args.dropout)
    elif args.loss == 'triplet':
        model = InceptionNet(num_features=args.features, dropout=args.dropout)
    else:
        raise ValueError("Cannot recognize loss type:", args.loss)
    model = torch.nn.DataParallel(model).cuda()

    # Load from checkpoint
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        args.start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> start epoch {}  best top1 {:.1%}".format(
            args.start_epoch, best_top1))
    else:
        best_top1 = 0

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    if args.loss == 'xentropy':
        criterion = torch.nn.CrossEntropyLoss()
    elif args.loss == 'oim':
        criterion = OIMLoss(model.module.num_features,
                            num_classes,
                            scalar=args.oim_scalar,
                            momentum=args.oim_momentum)
    elif args.loss == 'triplet':
        criterion = TripletLoss(margin=args.triplet_margin)
    else:
        raise ValueError("Cannot recognize loss type:", args.loss)
    criterion.cuda()

    # Optimizer
    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)
    else:
        raise ValueError("Cannot recognize optimizer type:", args.optimizer)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        if args.optimizer == 'sgd':
            lr = args.lr * (0.1**(epoch // 60))
        elif args.optimizer == 'adam':
            lr = args.lr if epoch <= 100 else \
                args.lr * (0.001 ** (epoch - 100) / 50)
        else:
            raise ValueError("Cannot recognize optimizer type:",
                             args.optimizer)
        for g in optimizer.param_groups:
            g['lr'] = lr

    # Start training
    for epoch in range(args.start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Example #14
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    logger = TensorLogger(osp.join(args.log_root, 'Tensorboard_logs', args.logs_dir))
    # Redirect print to both console and log file
    logs_dir = osp.join(args.log_root, args.logs_dir)
    if not args.evaluate:
        sys.stdout = Logger(osp.join(logs_dir, 'log.txt'))

    # Create data loaders
    # assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    print("lr:", args.lr)
    print("max_epoch:", args.epochs)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch, num_features=1024,
                          num_diff_features=128,
                          dropout=args.dropout,
                          cut_at_pooling=False)
    print(model)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        pretrained_dict = {k: v for k, v in checkpoint['state_dict'].items()
                           if k in model.state_dict()}
        model_dict = model.state_dict()
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        if len(args.dataset)>1:
            for dataset_name in args.dataset:
                print("{} test result:".format(dataset_name))
                evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                                   dataset.gallery[dataset_name], metric)
            return
        else:
            evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
            return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 270 else \
        #     args.lr * (0.001 ** ((epoch - 270) / 135))
        # lr = args.lr if epoch <= 100 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
        lr = args.lr if epoch <= args.lr_change_epochs else \
            args.lr * (0.001 ** ((epoch - args.lr_change_epochs)/float(args.epochs-args.lr_change_epochs)))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
    # epoch_num = args.maxstep//(750//18) + 1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        for tag, value in model.named_parameters():
            tag = tag.replace('.', '/')
            try:
                logger.histo_summary(tag, to_np(value), epoch*len(train_loader) + 1)
                logger.histo_summary(tag + '/grad', to_np(value.grad), epoch*len(train_loader) + 1)
            except AttributeError:
                pass
        # top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)
        # top1 = evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
        top1 = 1
        # is_best = top1 > best_top1
        # best_top1 = max(top1, best_top1)
        # save_checkpoint({
        #     'state_dict': model.module.state_dict(),
        #     'epoch': epoch + 1,
        #     'best_top1': best_top1,
        # }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
        is_best = False
        best_top1 = 1
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))
        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    print("Test:")
    if len(args.dataset) > 1:
        for dataset_name in args.dataset:
            print("{} test result:".format(dataset_name))
            evaluator.evaluate(test_loader[dataset_name], dataset.query[dataset_name],
                               dataset.gallery[dataset_name], metric)
    else:
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Example #15
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    print(args)
    # Create data loaders
    if args.big_height is None or args.big_width is None or args.target_height is None or args.target_width is None:
        args.big_height, args.big_width, args.target_height, args.target_width = (
            256, 256, 224, 224)
    dataset, num_classes, train_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.big_height, args.big_width,
                 args.target_height, args.target_width, args.batch_size, args.num_instances,
                 args.workers, args.combine_trainval)

    # Create models
    model = models.create(name=args.arch,
                          num_classes=num_classes,
                          num_features=args.features,
                          norm=True)
    print(model)
    # Load from checkpoint
    start_epoch = best = 0

    if args.weights and hasattr(model, 'base'):
        print('loading resnet50')
        checkpoint = load_checkpoint(args.weights)
        del (checkpoint['fc.weight'])
        del (checkpoint['fc.bias'])
        model.base.load_state_dict(checkpoint)
    if args.resume and not args.weights:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}  best top1 {:.1%}".format(start_epoch, best))

    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
        return

    # Criterion
    if args.arch == 'ResNet50_mgn_lr' or args.arch == 'ResNet101_mgn_lr' or args.arch == 'ResNet152_mgn_lr':
        criterion = MGN_loss(margin1=1.2,
                             num_instances=4,
                             alpha=1.0,
                             gamma=1.0,
                             theta=0.1,
                             has_trip=True).cuda()
    elif args.arch == 'ResNet_reid_50' or args.arch == 'ResNet_reid_101' or args.arch == 'ResNet_reid_152':
        # criterion = XentropyLoss_SAC(theta=0.2,gamma=1).cuda()
        ranking_loss = nn.MarginRankingLoss(margin=args.margin).cuda()
        criterion = { 'XentropyLoss_SAC': XentropyLoss_SAC(theta=0.2,gamma=1).cuda(), \
                    'trihard': TripletLoss(ranking_loss).cuda() }
    else:
        criterion = nn.CrossEntropyLoss().cuda()
    # Optimizer
    frozen_layerName = [
        'conv1',
        'bn1',
        'relu',
        'maxpool',
        'layer1',
        'layer2',
    ]
    ##### Optimizer
    if args.frozen_sublayer:
        frozen_Source = None
        if hasattr(model.module, 'base'):
            frozen_Source = 'model.module.base.'
        elif hasattr(model.module, frozen_layerName[0]):
            frozen_Source = 'model.module.'
        else:
            raise RuntimeError(
                'Not freeze layers but frozen_sublayer is True!')

        base_params_set = set()
        for subLayer in frozen_layerName:
            if hasattr(eval(frozen_Source[:-1]), subLayer):
                print('frozen layer: ', subLayer)
                single_module_param = eval(frozen_Source + subLayer +
                                           '.parameters()')
                # base_params.append(single_module_param)
                single_module_param_set = set(map(id, single_module_param))
                base_params_set = base_params_set | single_module_param_set
            else:
                print("current model doesn't have ", subLayer)

        new_params = [
            p for p in model.parameters() if id(p) not in base_params_set
        ]

        base_params = [
            p for p in model.parameters() if id(p) in base_params_set
        ]
        param_groups = [{
            'params': base_params,
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    if args.arch == 'ResNet50_mgn_lr' or args.arch == 'ResNet101_mgn_lr' or args.arch == 'ResNet152_mgn_lr':
        trainer = Trainer(model, criterion)

    elif args.arch == 'ResNet_reid_50' or args.arch == 'ResNet_reid_101' or args.arch == 'ResNet_reid_152':

        trainer = Trainer_SAC_Triplet(
            model, criterion, metric_loss_weight=args.metric_loss_weight)
    else:
        trainer = Cross_Trihard_Trainer(
            model, criterion, metric_loss_weight=args.metric_loss_weight)

    # Schedule learning rate
    print(args.step_epoch)
    scheduler = WarmupMultiStepLR(optimizer,
                                  args.step_epoch,
                                  gamma=args.gamma,
                                  warmup_factor=args.warm_up_factor,
                                  warmup_iters=args.warm_up_iter)
    # Start training
    for epoch in range(start_epoch + 1, args.epochs + 1):
        scheduler.step()
        trainer.train(epoch, train_loader, optimizer)

        if epoch % args.epoch_inter == 0 or epoch >= args.dense_evaluate:
            tmp_mAP, tmp_res = evaluator.evaluate(test_loader, dataset.query,
                                                  dataset.gallery)
            if epoch >= args.start_save:
                if tmp_mAP > best:
                    best = tmp_mAP
                    flag = True
                else:
                    flag = False
                # save_checkpoint({
                #     'state_dict': model.module.state_dict(),
                #     'epoch': epoch,
                #     'best_map':tmp_mAP
                # }, flag, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))
                save_checkpoint(
                    {
                        'state_dict': model.module.state_dict(),
                        'epoch': epoch,
                        'best_map': tmp_mAP
                    },
                    flag,
                    fpath=osp.join(args.logs_dir, 'pass%d.pth.tar' % (epoch)))
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    logger = TensorLogger(osp.join(args.log_root, 'Tensorboard_logs', args.logs_dir))
    # Redirect print to both console and log file
    logs_dir = osp.join(args.log_root, args.logs_dir)
    if not args.evaluate:
        sys.stdout = Logger(osp.join(logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.source_batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    # Create source data_loader
    source_dataset, source_num_classes, source_train_loader\
        , source_val_loader, source_test_loader = \
        get_source_data(args.source_dataset, args.split, args.data_dir, args.height,
                 args.width, args.source_batch_size, args.num_instances, args.workers,
                 args.combine_trainval)
    # Create target data_loader
    target_dataset, target_num_classes, target_train_loader\
        , target_val_loader, target_test_loader = \
        get_target_data(args.target_dataset, args.split, args.data_dir, args.height,
                 args.width, args.target_batch_size, args.workers,
                 args.combine_trainval)
    print("lr:", args.lr)
    print("max_epoch:", args.epochs)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = ResNet_recon(num_features=1024, dropout=args.dropout)
    if args.evaluate:
        model_evalu = nn.DataParallel(model).cuda()
    model = model.cuda(0)

    # For source triplet-loss
    trip_embedding = Trip_embedding(num_features=1024,
                                    num_diff_features=128, dropout=args.dropout).cuda(1)
    # For target reconstruction-loss
    recon_module = Reconstruct(num_features=1024).cuda(1)
    # Criterion
    criterion = ReconTripLoss().cuda(1)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        model_path = load_checkpoint(args.resume)
        model.load_state_dict(model_path['state_dict'])
        # trip_embedding.load_state_dict(model_path['trip_em'])
        recon_module.load_state_dict(model_path['recon_dict'])
        start_epoch = model_path['epoch']
        best_top1 = model_path['best_top1']
        is_best = False
        top1 = best_top1
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    # model = nn.DataParallel(model, device_ids=[0,1]).cuda(1)

    # model.cuda(0)
    # trip_embedding.cuda(1)
    # recon_module.cuda(1)
    # criterion.cuda(1)
    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator

    if args.evaluate:
        evaluator = Evaluator(model_evalu)
        metric.train(model_evalu, source_train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        if len(args.source_dataset) > 1:
            for dataset_name in args.source_dataset:
                print("{} source_test result:".format(dataset_name))
                evaluator.evaluate(source_test_loader[dataset_name],
                                   source_dataset.query[dataset_name],
                                   source_dataset.gallery[dataset_name],
                                   metric)
            return
        else:
            print("source test result")
            evaluator.evaluate(source_test_loader, source_dataset.query,
                               source_dataset.gallery, metric)
            print("target test result")
            evaluator.evaluate(target_test_loader, target_dataset.query,
                               target_dataset.gallery, metric)
            return

    evaluator = Evaluator(model)
    # Optimizer
    optimizer = torch.optim.Adam([{'params': model.parameters()},
                                  # {'params': trip_embedding.parameters()},
                                  {'params': recon_module.parameters()}],
                                  lr=args.lr, weight_decay=args.weight_decay)
    # Trainer
    trainer = Transfer_Trainer(model, recon_module, trip_embedding, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= args.lr_change_epochs else \
            args.lr * (0.001 ** ((epoch - args.lr_change_epochs)/
                                 float(args.epochs-args.lr_change_epochs)))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
    # epoch_num = args.maxstep//(750//18) + 1
    # Start training
    top1 = 0
    is_best = True
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, source_train_loader, target_train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        # for tag, value in model.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(source_train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(source_train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        if epoch % 25 == 0 and epoch != 0:
            top1 = evaluator.evaluate(source_test_loader, source_dataset.query, source_dataset.query)
            target_top1 = evaluator.evaluate(target_test_loader, target_dataset.query, target_dataset.query)
            print('target_top1 = {:5.1%}'.format(target_top1))
            # top1 = evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
            is_best = top1 > best_top1
            best_top1 = max(top1, best_top1)

        save_checkpoint({
            'state_dict': model.state_dict(),
            'recon_dict': recon_module.state_dict(),
            # 'trip_em': trip_embedding.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(logs_dir, 'checkpoint.pth.tar'))
        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, source_train_loader)
    print("Test:")
    if len(args.dataset) > 1:
        for dataset_name in args.dataset:
            print("{} test result:".format(dataset_name))
            evaluator.evaluate(source_test_loader[dataset_name], source_dataset.query[dataset_name],
                               source_dataset.gallery[dataset_name], metric)
    else:
        evaluator.evaluate(source_test_loader, source_dataset.query, source_dataset.gallery, metric)
Example #17
0
def main(args):
    fix(args.seed)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    
    print(args)
    # Create data loaders
    dataset, test_dataset, num_classes, source_train_loader, grid_query_loader, grid_gallery_loader,prid_query_loader, prid_gallery_loader,viper_query_loader, viper_gallery_loader, ilid_query_loader, ilid_gallery_loader = \
        get_data(args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instance, args.re, args.workers)

    # Create model
    Encoder, Transfer, CamDis = models.create(args.arch, num_features=args.features,
                          dropout=args.dropout, num_classes=num_classes)

    invNet = InvNet(args.features, num_classes, args.batch_size, beta=args.beta, knn=args.knn, alpha=args.alpha).cuda()

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        Encoder.load_state_dict(checkpoint['Encoder'])
        Transfer.load_state_dict(checkpoint['Transfer'])
        CamDis.load_state_dict(checkpoint['CamDis'])
        invNet.load_state_dict(checkpoint['InvNet'])
        start_epoch = checkpoint['epoch']

    Encoder = Encoder.cuda()
    Transfer = Transfer.cuda()
    CamDis = CamDis.cuda()

    model = [Encoder, Transfer, CamDis]
    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        # -----------------------------
        v = evaluator.eval_viper(viper_query_loader, viper_gallery_loader, test_dataset.viper_query, test_dataset.viper_gallery, args.output_feature, seed=57)
        p = evaluator.eval_prid(prid_query_loader, prid_gallery_loader, test_dataset.prid_query, test_dataset.prid_gallery, args.output_feature, seed=40)
        g = evaluator.eval_grid(grid_query_loader, grid_gallery_loader, test_dataset.grid_query, test_dataset.grid_gallery, args.output_feature, seed=35)
        l = evaluator.eval_ilids(ilid_query_loader, test_dataset.ilid_query, args.output_feature, seed=24)
        # -----------------------------

    criterion = []
    criterion.append(nn.CrossEntropyLoss().cuda())
    criterion.append(TripletLoss(margin=args.margin))


    # Optimizer
    base_param_ids = set(map(id, Encoder.base.parameters()))
    new_params = [p for p in Encoder.parameters() if
                    id(p) not in base_param_ids]
    param_groups = [
        {'params': Encoder.base.parameters(), 'lr_mult': 0.1},
        {'params': new_params, 'lr_mult': 1.0}]

    optimizer_Encoder = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=0.9, weight_decay=5e-4, nesterov=True)
    # ====
    base_param_ids = set(map(id, Transfer.base.parameters()))
    new_params = [p for p in Transfer.parameters() if
                    id(p) not in base_param_ids]
    param_groups = [
        {'params': Transfer.base.parameters(), 'lr_mult': 0.1},
        {'params': new_params, 'lr_mult': 1.0}]

    optimizer_Transfer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=0.9, weight_decay=5e-4, nesterov=True)
    # ====
    param_groups = [
        {'params':CamDis.parameters(), 'lr_mult':1.0},
    ]
    optimizer_Cam = torch.optim.SGD(param_groups, lr=args.lr,momentum=0.9, weight_decay=5e-4, nesterov=True)

    optimizer = [optimizer_Encoder, optimizer_Transfer, optimizer_Cam]

    # Trainer
    trainer = Trainer(model, criterion, InvNet=invNet)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 40
        lr = args.lr * (0.1 ** ((epoch) // step_size))
        for g in optimizer_Encoder.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        for g in optimizer_Transfer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        for g in optimizer_Cam.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, source_train_loader, optimizer, args.tri_weight, args.adv_weight, args.mem_weight)

        save_checkpoint({
            'Encoder': Encoder.state_dict(),
            'Transfer': Transfer.state_dict(),
            'CamDis': CamDis.state_dict(),
            'InvNet': invNet.state_dict(),
            'epoch': epoch + 1,
        }, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        evaluator = Evaluator(model)
        print('\n * Finished epoch {:3d} \n'.
              format(epoch))

    # Final test
    print('Test with best model:')
    evaluator = Evaluator(model)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, args.output_feature, args.rerank)
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, train_loader_head, train_loader_upper, train_loader_lower,\
    val_loader, val_loader_head, val_loader_upper, val_loader_lower,\
    test_loader, test_loader_head, test_loader_upper, test_loader_lower= \
        get_data(args.dataset, args.split, args.data_dir, args.height,args.width, args.batch_size,args.num_instances,args.workers,args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = models.create(args.arch,
                          num_features=1024,
                          dropout=args.dropout,
                          num_classes=args.features)
    model_head = models.create(args.arch,
                               num_features=1024,
                               dropout=args.dropout,
                               num_classes=args.features)
    model_upper = models.create(args.arch,
                                num_features=1024,
                                dropout=args.dropout,
                                num_classes=args.features)
    model_lower = models.create(args.arch,
                                num_features=1024,
                                dropout=args.dropout,
                                num_classes=args.features)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    #    if args.resume:
    #        checkpoint = load_checkpoint(args.resume)
    #        model.load_state_dict(checkpoint['state_dict'])
    #        start_epoch = checkpoint['epoch']
    #       best_top1 = checkpoint['best_top1']
    #        print("=> Start epoch {}  best top1 {:.1%}"
    #              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()
    model_head = nn.DataParallel(model_head).cuda()
    model_upper = nn.DataParallel(model_upper).cuda()
    model_lower = nn.DataParallel(model_lower).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, model_head, model_upper, model_lower)
    #if args.evaluate:
    #    metric.train(model, train_loader)
    #    print("Validation:")
    #    evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
    #   print("Test:")
    #    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
    #    return

    # Criterion
    criterion = TripletLoss(margin=args.margin).cuda()
    criterion_head = TripletLoss(margin=args.margin).cuda()
    criterion_upper = TripletLoss(margin=args.margin).cuda()
    criterion_lower = TripletLoss(margin=args.margin).cuda()

    # Optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    optimizer_head = torch.optim.Adam(model_head.parameters(),
                                      lr=args.lr,
                                      weight_decay=args.weight_decay)
    optimizer_upper = torch.optim.Adam(model_upper.parameters(),
                                       lr=args.lr,
                                       weight_decay=args.weight_decay)
    optimizer_lower = torch.optim.Adam(model_lower.parameters(),
                                       lr=args.lr,
                                       weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)
    trainer_head = Trainer(model_head, criterion_head)
    trainer_upper = Trainer(model_upper, criterion_upper)
    trainer_lower = Trainer(model_lower, criterion_lower)

    # Schedule learning rate
    def adjust_lr(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr_head(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer_head.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr_upper(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer_upper.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    def adjust_lr_lower(epoch):
        lr = args.lr if epoch <= 100 else \
            args.lr * (0.001 ** ((epoch - 100) / 50.0))
        for g in optimizer_lower.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        adjust_lr_head(epoch)
        adjust_lr_upper(epoch)
        adjust_lr_lower(epoch)
        trainer.train(epoch, train_loader, optimizer)
        trainer_head.train(epoch, train_loader_head, optimizer_head)
        trainer_upper.train(epoch, train_loader_upper, optimizer_upper)
        trainer_lower.train(epoch, train_loader_lower, optimizer_lower)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, val_loader_head,
                                  val_loader_upper, val_loader_lower,
                                  dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'),
            opath='model_best.pth.tar')

        save_checkpoint(
            {
                'state_dict': model_head.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint_head.pth.tar'),
            opath='model_head_best.pth.tar')

        save_checkpoint(
            {
                'state_dict': model_upper.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint_upper.pth.tar'),
            opath='model_upper_best.pth.tar')

        save_checkpoint(
            {
                'state_dict': model_lower.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint_lower.pth.tar'),
            opath='model_lower_best.pth.tar')

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    checkpoint_head = load_checkpoint(
        osp.join(args.logs_dir, 'model_head_best.pth.tar'))
    checkpoint_upper = load_checkpoint(
        osp.join(args.logs_dir, 'model_upper_best.pth.tar'))
    checkpoint_lower = load_checkpoint(
        osp.join(args.logs_dir, 'model_lower_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    model_head.module.load_state_dict(checkpoint_head['state_dict'])
    model_upper.module.load_state_dict(checkpoint_upper['state_dict'])
    model_lower.module.load_state_dict(checkpoint_lower['state_dict'])
    metric.train(model, train_loader)
    metric.train(model_head, train_loader_head)
    metric.train(model_upper, train_loader_upper)
    metric.train(model_lower, train_loader_lower)

    evaluator.evaluate(test_loader, test_loader_head, test_loader_upper,
                       test_loader_lower, dataset.query, dataset.gallery,
                       metric)
Example #19
0
def main(args):
    # For fast training.
    cudnn.benchmark = True
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
    print('log_dir=', args.logs_dir)

    # Print logs
    print(args)

    # Create data loaders
    dataset, num_classes, source_train_loader, target_train_loader, \
    query_loader, gallery_loader = get_data(args.data_dir, args.source,
                                            args.target, args.height,
                                            args.width, args.batch_size,
                                            args.camstyle_type,
                                            args.re, args.workers)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)

    # Invariance learning model
    num_tgt = len(dataset.target_train)
    model_inv = InvNet(args.features,
                       num_tgt,
                       beta=args.inv_beta,
                       knn=args.knn,
                       alpha=args.inv_alpha)

    # Load from checkpoint
    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        model_inv.load_state_dict(checkpoint['state_dict_inv'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))

    # Set model
    model = nn.DataParallel(model).to(device)
    model_inv = model_inv.to(device)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        query, retrieveds = evaluator.evaluate(query_loader, gallery_loader,
                                               dataset.query, dataset.gallery,
                                               args.output_feature)
        for i in range(len(query)):
            Image.open("data/" + args.target + "/query/" + query[i]).save(
                "q_%d.jpg" % i)
            for j in range(len(retrieveds[i])):
                Image.open("data/" + args.target + "/bounding_box_test/" +
                           retrieveds[i][j]).save("r_%d_%d.jpg" % (i, j))
        return

    # Optimizer
    base_param_ids = set(map(id, model.module.base.parameters()))

    base_params_need_for_grad = filter(lambda p: p.requires_grad,
                                       model.module.base.parameters())

    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': base_params_need_for_grad,
        'lr_mult': 0.1
    }, {
        'params': new_params,
        'lr_mult': 1.0
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model,
                      model_inv,
                      lmd=args.lmd,
                      include_mmd=args.include_mmd,
                      include_coral=args.include_coral,
                      lmd_ext=args.lmd_ext)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = args.epochs_decay
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, source_train_loader, target_train_loader,
                      optimizer)

        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'state_dict_inv': model_inv.state_dict(),
                'epoch': epoch + 1,
            },
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d} \n'.format(epoch))

    # Final test

    print('Test with best model:')
    evaluator = Evaluator(model)
    query, retrieveds = evaluator.evaluate(query_loader, gallery_loader,
                                           dataset.query, dataset.gallery,
                                           args.output_feature)
    for i in range(len(query)):
        Image.open("data/" + args.target + "/query/" + query[i]).save(
            "q_%d.jpg" % i)
        for j in range(len(retrieveds[i])):
            Image.open("data/" + args.target + "/bounding_box_test/" +
                       retrieveds[i][j]).save("r_%d_%d.jpg" % (i, j))
Example #20
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    random.seed(args.seed) 

    cudnn.benchmark = True
    # Redirect print to both console and log file
    date_str = '{}'.format(datetime.datetime.today().strftime('%Y-%m-%d_%H-%M-%S'))
    if (not args.evaluate) and args.log:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log_{}.txt'.format(date_str)))
        # save opts
        with open(osp.join(args.logs_dir, 'args_{}.json'.format(date_str)), 'w') as fp:
            json.dump(vars(args), fp, indent=1)

    assert args.real or args.synthetic, "At least one dataset should be used"
    # Create data loaders
    print (args.real, args.synthetic)

    dataset, num_classes, train_loader, query_loader, gallery_loader, camstyle_loader = \
        get_data(args.dataset, args.data_dir, args.height, args.width, args.batch_size, args.num_workers,
                    args.combine_trainval, args.crop, args.tracking_icams, args.tracking_fps, args.real, args.synthetic, args.re, 0, args.camstyle)

    # Create model
    model = models.create('ide', num_features=args.features, norm=args.norm,
                          dropout=args.dropout, num_classes=num_classes, last_stride=args.last_stride,
                          output_feature=args.output_feature, arch=args.arch)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        if args.evaluate:
            model, start_epoch, best_top1 = checkpoint_loader(model, args.resume, eval_only=True)
        else:
            model, start_epoch, best_top1 = checkpoint_loader(model, args.resume)
        print("=> Start epoch {}  best top1_eval {:.1%}".format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, eval_only=True)
        return
    if args.evaluate_VID:
        print("Test on VID dataset:")
        mAP_list = []
        cmc1_list = []
        cmc5_list = []
        cmc10_list = []
        for i in range(10):
            dataset, num_classes, train_loader, query_loader, gallery_loader, camstyle_loader = \
            get_data(args.dataset, args.data_dir, args.height, args.width, args.batch_size, args.num_workers,
                    args.combine_trainval, args.crop, args.tracking_icams, args.tracking_fps, args.re, 0, args.camstyle)
            mAP, cmc1, cmc5 , cmc10 = evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, eval_only=True)
            mAP_list.append(mAP)
            cmc1_list.append(cmc1)
            cmc5_list.append(cmc5)
            cmc10_list.append(cmc10)
        print('Final VID test [mAP: {:5.2%}], [cmc1: {:5.2%}], [cmc5: {:5.2%}], [cmc10: {:5.2%}]'
          .format(np.mean(mAP_list), np.mean(cmc1_list), np.mean(cmc5_list), np.mean(cmc10_list)))
        return 

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda() if not args.LSR else LSR_loss().cuda()

    if args.train or args.finetune:
        # Optimizer
        if hasattr(model.module, 'base'):  # low learning_rate the base network (aka. ResNet-50)
            base_param_ids = set(map(id, model.module.base.parameters()))
            new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
            param_groups = [{'params': model.module.base.parameters(), 'lr_mult': 0.1},
                            {'params': new_params, 'lr_mult': 1.0}]
        else:
            param_groups = model.parameters()
        optimizer = torch.optim.SGD(param_groups, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay,
                                    nesterov=True)

        # Trainer
        if args.camstyle == 0:
            trainer = Trainer(model, criterion)
        else:
            trainer = CamStyleTrainer(model, criterion, camstyle_loader)

        # Schedule learning rate
        def adjust_lr(epoch):
            step_size = args.step_size
            lr = args.lr * (0.1 ** (epoch // step_size))
            if args.finetune:
                if args.dataset == "veri":
                    lr = lr / 50
                elif args.dataset == "vihicle_id":
                    lr = lr / 10
            for g in optimizer.param_groups:
                g['lr'] = lr * g.get('lr_mult', 1)

        # Draw Curve
        epoch_s = []
        loss_s = []
        prec_s = []

        # Start training
        for epoch in range(start_epoch, args.epochs):
            t0 = time.time()
            adjust_lr(epoch)
            # train_loss, train_prec = 0, 0
            train_loss, train_prec = trainer.train(epoch, train_loader, optimizer, fix_bn=args.fix_bn)

            if epoch < args.start_save:
                continue
            
            if epoch % 20 == 0 or args.finetune:
                evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, eval_only=True)
            
            top1_eval = 50

            is_best = top1_eval >= best_top1
            best_top1 = max(top1_eval, best_top1)
            save_checkpoint({
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint_{}.pth.tar'.format(date_str)))
            epoch_s.append(epoch)
            loss_s.append(train_loss)
            prec_s.append(train_prec)
            draw_curve(os.path.join(args.logs_dir, 'train_{}.jpg'.format(date_str)), epoch_s, loss_s, prec_s)

            t1 = time.time()
            t_epoch = t1 - t0
            print('\n * Finished epoch {:3d}  top1_eval: {:5.1%}  best_eval: {:5.1%} \n'.
                  format(epoch, top1_eval, best_top1, ' *' if is_best else ''))
            print('*************** Epoch takes time: {:^10.2f} *********************\n'.format(t_epoch))
            pass

        # Final test
        print('Test with best model:')
        model, start_epoch, best_top1 = checkpoint_loader(model, osp.join(args.logs_dir, 'model_best.pth.tar'),
                                                          eval_only=True)
        print("=> Start epoch {}  best top1 {:.1%}".format(start_epoch, best_top1))

        evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, eval_only=True)
Example #21
0
def main(args):
    cudnn.deterministic = False
    cudnn.benchmark = True

    exp_database_dir = osp.join(args.exp_dir, string.capwords(args.dataset))
    output_dir = osp.join(exp_database_dir, args.method, args.sub_method)
    log_file = osp.join(output_dir, 'log.txt')
    # Redirect print to both console and log file
    sys.stdout = Logger(log_file)

    # Create model
    ibn_type = args.ibn
    if ibn_type == 'none':
        ibn_type = None
    model = resmap.create(args.arch,
                          ibn_type=ibn_type,
                          final_layer=args.final_layer,
                          neck=args.neck).cuda()
    num_features = model.num_features
    # print(model)
    # print('\n')

    feamap_factor = {'layer2': 8, 'layer3': 16, 'layer4': 32}
    hei = args.height // feamap_factor[args.final_layer]
    wid = args.width // feamap_factor[args.final_layer]
    matcher = QAConv(num_features, hei, wid).cuda()

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')

    # Criterion
    criterion = PairwiseMatchingLoss(matcher).cuda()

    # Optimizer
    base_param_ids = set(map(id, model.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    param_groups = [{
        'params': model.base.parameters(),
        'lr': 0.1 * args.lr
    }, {
        'params': new_params,
        'lr': args.lr
    }, {
        'params': matcher.parameters(),
        'lr': args.lr
    }]

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=0.9,
                                weight_decay=5e-4,
                                nesterov=True)

    # Load from checkpoint
    start_epoch = 0

    if args.resume or args.evaluate:
        print('Loading checkpoint...')
        if args.resume and (args.resume != 'ori'):
            checkpoint = load_checkpoint(args.resume)
        else:
            checkpoint = load_checkpoint(
                osp.join(output_dir, 'checkpoint.pth.tar'))
        model.load_state_dict(checkpoint['model'])
        criterion.load_state_dict(checkpoint['criterion'])
        optimizer.load_state_dict(checkpoint['optim'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {} ".format(start_epoch))

    model = nn.DataParallel(model).cuda()

    # Create data loaders
    # Warning: this training data loader cannot be used elsewhere other than a continueous training, otherwise the
    #   switch between the PK sampler and GS sampler will be incorrect!
    save_path = None
    if args.gs_save:
        save_path = output_dir
    dataset, num_classes, train_loader, _, _ = get_data(
        args.dataset, args.data_dir, model, matcher, start_epoch - 1,
        save_path, args)

    # Decay LR by a factor of 0.1 every step_size epochs
    lr_scheduler = StepLR(optimizer,
                          step_size=args.step_size,
                          gamma=0.1,
                          last_epoch=start_epoch - 1)

    if not args.evaluate:
        # Trainer
        trainer = Trainer(model, criterion, args.clip_value)

        t0 = time.time()
        # Start training
        for epoch in range(start_epoch, args.epochs):
            loss, acc = trainer.train(epoch, train_loader, optimizer)

            lr = list(map(lambda group: group['lr'], optimizer.param_groups))
            lr_scheduler.step()
            train_time = time.time() - t0

            print(
                '* Finished epoch %d at lr=[%g, %g, %g]. Loss: %.3f. Acc: %.2f%%. Training time: %.0f seconds.                  \n'
                %
                (epoch + 1, lr[0], lr[1], lr[2], loss, acc * 100, train_time))

            save_checkpoint(
                {
                    'model': model.module.state_dict(),
                    'criterion': criterion.state_dict(),
                    'optim': optimizer.state_dict(),
                    'epoch': epoch + 1,
                },
                fpath=osp.join(output_dir, 'checkpoint.pth.tar'))

    # Final test
    print('Evaluate the learned model:')
    t0 = time.time()

    # Evaluator
    evaluator = Evaluator(model)

    avg_rank1 = 0
    avg_mAP = 0
    num_testsets = 0
    results = {}

    test_names = args.testset.strip().split(',')
    for test_name in test_names:
        if test_name not in datasets.names():
            print('Unknown dataset: %s.' % test_name)
            continue

        testset, test_query_loader, test_gallery_loader = \
            get_test_data(test_name, args.data_dir, args.height, args.width, args.workers, args.test_fea_batch)

        if not args.do_tlift:
            testset.has_time_info = False
        test_rank1, test_mAP, test_rank1_rerank, test_mAP_rerank, test_rank1_tlift, test_mAP_tlift, test_dist, \
        test_dist_rerank, test_dist_tlift, pre_tlift_dict = \
            evaluator.evaluate(matcher, testset, test_query_loader, test_gallery_loader,
                                args.test_gal_batch, args.test_prob_batch,
                               args.tau, args.sigma, args.K, args.alpha)

        results[test_name] = [test_rank1, test_mAP]
        if test_name != args.dataset:
            avg_rank1 += test_rank1
            avg_mAP += test_mAP
            num_testsets += 1

        if testset.has_time_info:
            print(
                '  %s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f,'
                ' rank1_rerank_tlift=%.1f, mAP_rerank_tlift=%.1f.\n' %
                (test_name, test_rank1 * 100, test_mAP * 100,
                 test_rank1_rerank * 100, test_mAP_rerank * 100,
                 test_rank1_tlift * 100, test_mAP_tlift * 100))
        else:
            print('  %s: rank1=%.1f, mAP=%.1f.\n' %
                  (test_name, test_rank1 * 100, test_mAP * 100))

        result_file = osp.join(exp_database_dir, args.method,
                               test_name + '_results.txt')
        with open(result_file, 'a') as f:
            f.write('%s/%s:\n' % (args.method, args.sub_method))
            if testset.has_time_info:
                f.write(
                    '\t%s: rank1=%.1f, mAP=%.1f, rank1_rerank=%.1f, mAP_rerank=%.1f, rank1_rerank_tlift=%.1f, '
                    'mAP_rerank_tlift=%.1f.\n\n' %
                    (test_name, test_rank1 * 100, test_mAP * 100,
                     test_rank1_rerank * 100, test_mAP_rerank * 100,
                     test_rank1_tlift * 100, test_mAP_tlift * 100))
            else:
                f.write('\t%s: rank1=%.1f, mAP=%.1f.\n\n' %
                        (test_name, test_rank1 * 100, test_mAP * 100))

        if args.save_score:
            test_gal_list = np.array(
                [fname for fname, _, _, _ in testset.gallery], dtype=np.object)
            test_prob_list = np.array(
                [fname for fname, _, _, _ in testset.query], dtype=np.object)
            test_gal_ids = [pid for _, pid, _, _ in testset.gallery]
            test_prob_ids = [pid for _, pid, _, _ in testset.query]
            test_gal_cams = [c for _, _, c, _ in testset.gallery]
            test_prob_cams = [c for _, _, c, _ in testset.query]
            test_score_file = osp.join(exp_database_dir, args.method,
                                       args.sub_method,
                                       '%s_score.mat' % test_name)
            sio.savemat(test_score_file, {
                'score': 1. - test_dist,
                'score_rerank': 1. - test_dist_rerank,
                'score_tlift': 1. - test_dist_tlift,
                'gal_time': pre_tlift_dict['gal_time'],
                'prob_time': pre_tlift_dict['prob_time'],
                'gal_list': test_gal_list,
                'prob_list': test_prob_list,
                'gal_ids': test_gal_ids,
                'prob_ids': test_prob_ids,
                'gal_cams': test_gal_cams,
                'prob_cams': test_prob_cams
            },
                        oned_as='column',
                        do_compression=True)

    test_time = time.time() - t0
    avg_rank1 /= num_testsets
    avg_mAP /= num_testsets
    for key in results.keys():
        print('%s: rank1=%.1f%%, mAP=%.1f%%.' %
              (key, results[key][0] * 100, results[key][1] * 100))
    print('Average: rank1=%.2f%%, mAP=%.2f%%.\n\n' %
          (avg_rank1 * 100, avg_mAP * 100))

    result_file = osp.join(exp_database_dir, args.method,
                           args.sub_method[:-5] + '_avg_results.txt')
    with open(result_file, 'a') as f:
        f.write('%s/%s:\n' % (args.method, args.sub_method))
        if not args.evaluate:
            f.write('\t Loss: %.3f, acc: %.2f%%. ' % (loss, acc * 100))
            f.write("Train: %.0fs. " % train_time)
        f.write("Test: %.0fs. " % test_time)
        f.write('Rank1: %.2f%%, mAP: %.2f%%.\n' %
                (avg_rank1 * 100, avg_mAP * 100))
        for key in results.keys():
            f.write('\t %s: Rank1: %.1f%%, mAP: %.1f%%.\n' %
                    (key, results[key][0] * 100, results[key][1] * 100))
        f.write('\n')

    if not args.evaluate:
        print('Finished training at epoch %d, loss = %.3f, acc = %.2f%%.\n' %
              (epoch + 1, loss, acc * 100))
        print(
            "Total training time: %.3f sec. Average training time per epoch: %.3f sec."
            % (train_time, train_time / (args.epochs - start_epoch + 1)))
    print("Total testing time: %.3f sec.\n" % test_time)

    for arg in sys.argv:
        print('%s ' % arg, end='')
    print('\n')
Example #22
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
            (256, 128)

    # get source data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get target data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
        coModel = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
        coModel = models.create(args.arch, num_classes=676, pretrained=False)
    elif args.src_dataset == 'msmt17':
        model = models.create(args.arch, num_classes=1041, pretrained=False)
        coModel = models.create(args.arch, num_classes=1041, pretrained=False)
    elif args.src_dataset == 'cuhk03':
        model = models.create(args.arch, num_classes=1230, pretrained=False)
        coModel = models.create(args.arch, num_classes=1230, pretrained=False)
    else:
        raise RuntimeError('Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    if args.resume:
        print('Resuming checkpoints from finetuned model on another dataset...\n')
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
        coModel.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()
    coModel = nn.DataParallel(coModel).cuda()

    evaluator = Evaluator(model, print_freq=args.print_freq)
    # evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    # if args.evaluate: return

    # Criterion
    criterion = [
        TripletLoss(args.margin, args.num_instances, isAvg=False, use_semi=False).cuda(),
        TripletLoss(args.margin, args.num_instances, isAvg=False, use_semi=False).cuda(),
    ]

    # Optimizer
    optimizer = torch.optim.Adam(
        model.parameters(), lr=args.lr
    )
    coOptimizer = torch.optim.Adam(
        coModel.parameters(), lr=args.lr
    )

    optims = [optimizer, coOptimizer]

    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model, src_extfeat_loader, print_freq=args.print_freq, numStripe=None)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([source_features[f].unsqueeze(0) for f, _, _ in src_dataset.train], 0)

            # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(iter_n + 1))
        target_features, _ = extract_features(model, tgt_extfeat_loader, print_freq=args.print_freq, numStripe=None)
        # synchronization feature order with dataset.train
        target_features = torch.cat([target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval], 0)
        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        rerank_dist = re_ranking(source_features, target_features, lambda_value=args.lambda_value)
        if iter_n == 0:
            # DBSCAN cluster
            tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2
            tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
            tri_mat = np.sort(tri_mat, axis=None)
            top_num = np.round(args.rho * tri_mat.size).astype(int)
            eps = tri_mat[:top_num].mean()
            print('eps in cluster: {:.3f}'.format(eps))
            cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=8)

        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        labels = cluster.fit_predict(rerank_dist)
        num_ids = len(set(labels)) - 1
        print('Iteration {} have {} training ids'.format(iter_n + 1, num_ids))
        # generate new dataset
        new_dataset, unknown_dataset = [], []
        # assign label for target ones
        unknownLab = labelNoise(torch.from_numpy(target_features), torch.from_numpy(labels))
        # unknownFeats = target_features[labels==-1,:]
        unCounter, index = 0, 0
        from collections import defaultdict
        realIDs, fakeIDs = defaultdict(list), []
        for (fname, realPID, cam), label in zip(tgt_dataset.trainval, labels):
            if label == -1:
                unknown_dataset.append((fname, int(unknownLab[unCounter]), cam))  # unknown data
                fakeIDs.append(int(unknownLab[unCounter]))
                realIDs[realPID].append(index)
                unCounter += 1
                index += 1
                continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, cam))
            fakeIDs.append(label)
            realIDs[realPID].append(index)
            index += 1
        print('Iteration {} have {} training images'.format(iter_n + 1, len(new_dataset)))
        precision, recall, fscore = calScores(realIDs, np.asarray(fakeIDs))  # fakeIDs does not contain -1
        print('precision:{}, recall:{}, fscore: {}'.format(100 * precision, 100 * recall, fscore))

        train_loader = DataLoader(
            Preprocessor(new_dataset, root=tgt_dataset.images_dir, transform=train_transformer),
            batch_size=args.batch_size, num_workers=4,
            sampler=RandomIdentitySampler(new_dataset, args.num_instances),
            pin_memory=True, drop_last=True
        )
        # hard samples
        # noiseImgs = [name[1] for name in unknown_dataset]
        # saveAll(noiseImgs, tgt_dataset.images_dir, 'noiseImg')
        # import ipdb; ipdb.set_trace()
        unLoader = DataLoader(
            Preprocessor(unknown_dataset, root=tgt_dataset.images_dir, transform=train_transformer),
            batch_size=args.batch_size, num_workers=4,
            sampler=RandomIdentitySampler(unknown_dataset, args.num_instances),
            pin_memory=True, drop_last=True
        )
        # train model with new generated dataset
        trainer = RCoTeaching(
           model, coModel, train_loader, unLoader, criterion, optims
        )

        # Start training
        for epoch in range(args.epochs):
            trainer.train(epoch, remRate=0.2 + (0.8 / args.iteration) * (1 + iter_n))

        # test only
        rank_score = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
        # print('co-model:\n')
        # rank_score = evaluatorB.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)

    # Evaluate
    rank_score = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    save_checkpoint({
        'state_dict': model.module.state_dict(),
        'epoch': epoch + 1, 'best_top1': rank_score.market1501[0],
    }, True, fpath=osp.join(args.logs_dir, 'RCT.pth'))
    return rank_score.map, rank_score.market1501[0]
Example #23
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset,  args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 )

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model_dict = model.state_dict()
        checkpoint_load = {
            k: v
            for k, v in (checkpoint['state_dict']).items() if k in model_dict
        }
        model_dict.update(checkpoint_load)
        model.load_state_dict(model_dict)
        #        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))

    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                           dataset.gallery)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)
    # optimizer = torch.optim.Adam(param_groups,lr=args.lr)
    # Trainer
    trainer = Trainer(model, criterion, 0, 0, SMLoss_mode=0)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else args.step_size
        lr = args.lr * (0.1**(epoch // step_size))
        # if epoch>70:
        #     lr = 0.01
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        is_best = True
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'checkpoint.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    results = evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                 dataset.gallery)

    # save the parameters and results
    with open('%s/opts.yaml' % args.logs_dir, 'w') as fp:
        yaml.dump(vars(args), fp, default_flow_style=False)

    txtName = args.logs_dir + "results.txt"
    file = open(txtName, 'w')
    for key in results:
        file.write(key + ': ' + str(results[key]) + '\n')
Example #24
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.num_instances, args.workers,
                 args.combine_trainval)

    # Create model
    # Hacking here to let the classifier be the last feature embedding layer
    # Net structure: avgpool -> FC(1024) -> FC(args.features)
    model = ResNet(args.depth, num_features=args.features,
                          dropout=args.dropout, num_classes=num_classes)
    model = nn.DataParallel(model).cuda()
    # Load from checkpoint
    start_epoch = best_map = 0
    if args.if_resume:
        print(args.resume)
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        prior_best_map = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, prior_best_map))
    # model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    alpha= args.alpha
    beta = args.beta
    gamma = args.gamma
    criterion = TripletLoss_biu(margin = args.margin, num_instances=args.num_instances, 
                                    alpha = alpha, beta =beta , gamma =gamma).cuda()

    # Optimizer
    if args.optimizer == 'sgd':
            # base_param_ids = set(map(id, model.module.base.parameters()))
            # new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
            # param_groups = [
            #     {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            #     {'params': new_params, 'lr_mult': 1.0}]
        param_groups = model.parameters()
        optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=True)
    else :
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
                                 weight_decay=args.weight_decay)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        if args.optimizer == 'sgd':
            lr = args.lr * (0.1 ** (epoch // 40))
        else :
            lr = args.lr if epoch <= 80 else \
                 args.lr * (0.1 ** ((epoch - 100) / 60.0))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        if epoch % 3 ==0:
            metric.train(model,train_loader)
            top_map = evaluator.evaluate(test_loader, dataset.query, dataset.gallery) 
            is_best = top_map > prior_best_map
            prior_best_map = max(top_map, prior_best_map)
            save_checkpoint({
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_map': top_map,
            }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        # print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
        #       format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Example #25
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir + '/log'))

    if args.height is None or args.width is None:
        args.height, args.width = (144,
                                   56) if args.arch == 'inception' else (256,
                                                                         128)
    dataset, num_classes, train_loader, trainvallabel, val_loader, query_loader, gallery_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
            args.width, args.batch_size, args.num_instances, args.workers,
            args.combine_trainval, args.flip_prob, args.padding, args.re_prob)
    model = models.create(args.arch,
                          num_classes=num_classes,
                          num_features=args.features,
                          attention_mode=args.att_mode)
    print(model)
    model = model.cuda()

    evaluator = Evaluator(model)
    metric = DistanceMetric(algorithm=args.dist_metric)

    start_epoch = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['model'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}".format(start_epoch))

    if args.evaluate:
        metric.train(model, train_loader)
        evaluator.evaluate(dataset.query, dataset.gallery)
        exit()

    current_margin = args.margin
    criterion_z = nn.CrossEntropyLoss().cuda()
    # criterion_z = CrossEntropyLabelSmooth(num_classes= num_classes, epsilon=0.5).cuda()
    # criterion_I = TripletLoss(margin= current_margin).cuda()
    # criterion_D = nn.CrossEntropyLoss().cuda()

    print(args)

    if args.arch == 'ide':
        ignored_params = list(map(id, model.model.fc.parameters())) + list(
            map(id, model.classifier.parameters()))
    else:
        ignored_params = list(map(id, model.classifier.parameters())) + list(
            map(id, model.attention_module.parameters()))

    base_params = filter(lambda p: id(p) not in ignored_params,
                         model.parameters())

    if args.use_adam:
        # optimizer_ft = torch.optim.Adam(model.parameters(),lr=args.lr, weight_decay=5e-4)
        optimizer_ft = torch.optim.Adam([
            {
                'params': filter(lambda p: p.requires_grad, base_params),
                'lr': args.lr
            },
            {
                'params': model.classifier.parameters(),
                'lr': args.lr
            },
            {
                'params': model.attention_module.parameters(),
                'lr': args.lr
            },
        ],
                                        weight_decay=5e-4)

        # optimizer_discriminator = torch.optim.Adam([
        #     {'params': model_discriminator.model.parameters(), 'lr': args.lr},
        #     ],
        #     weight_decay=5e-4)

    else:
        optimizer_ft = torch.optim.Adam([
            {
                'params': filter(lambda p: p.requires_grad, base_params),
                'lr': args.lr
            },
            {
                'params': model.classifier.parameters(),
                'lr': args.lr
            },
            {
                'params': model.attention_module.parameters(),
                'lr': args.lr
            },
        ],
                                        weight_decay=5e-4)
        # optimizer_ft = torch.optim.SGD([
        #      {'params': filter(lambda p: p.requires_grad,base_params), 'lr': args.lr},
        #      {'params': model.classifier.parameters(), 'lr': args.lr},
        #      {'params': model.att_classifier.parameters(), 'lr': args.lr},
        #      {'params': model.projector1.parameters(), 'lr': args.lr},
        #      {'params': model.projector2.parameters(), 'lr': args.lr},
        #      {'params': model.projector3.parameters(), 'lr': args.lr},
        #      {'params': model.projector4.parameters(), 'lr': args.lr},
        #      {'params': model.att1.parameters(), 'lr': args.lr},
        #      {'params': model.att2.parameters(), 'lr': args.lr},
        #      {'params': model.att3.parameters(), 'lr': args.lr},
        #      {'params': model.att4.parameters(), 'lr': args.lr},
        #     ],
        #     momentum=0.9,
        #     weight_decay=5e-4,
        #     nesterov=True)

        # optimizer_discriminator = torch.optim.SGD([
        #      {'params': model_discriminator.model.parameters(), 'lr': args.lr},
        #      {'params': model_discriminator.classifier.parameters(), 'lr': args.lr},
        #     ],
        #     momentum=0.9,
        #     weight_decay=5e-4,
        #     nesterov=True)

    scheduler = WarmupMultiStepLR(optimizer_ft, args.mile_stone, args.gamma,
                                  args.warmup_factor, args.warmup_iters,
                                  args.warmup_methods)

    trainer = Trainer(model, criterion_z, trainvallabel)

    flag = 1
    best_top1 = -1
    # Start training
    for epoch in range(start_epoch, args.epochs):
        scheduler.step()
        id_loss = trainer.train(epoch, train_loader, optimizer_ft)
        save_checkpoint(
            {
                'model': model.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            False,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')

        if epoch < 100:
            continue
        if not epoch % 10 == 0:
            continue

        top1 = evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                                  dataset.gallery, metric)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'model': model.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            epoch,
            args.logs_dir,
            fpath='checkpoint.pth.tar')

    print('Test with best model:')
    print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.format(
        epoch, top1, best_top1, ' *' if is_best else ''))

    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.load_state_dict(checkpoint['model'])
    metric.train(model, train_loader)
    evaluator.evaluate(query_loader, gallery_loader, dataset.query,
                       dataset.gallery, metric)
    print(args)
    model.eval()
    modelTest.eval()
    if torch.cuda.is_available():
        model = model.cuda()
        modelTest = modelTest.cuda()

    features, _ = extract_features(model, mix_loader, print_freq=10)
    features = torch.stack([features[f] for f, _, _ in mix_loader.dataset.dataset])

    ncentroids = 512
    fDim = features.shape[1]
    cluster = faiss.Kmeans(fDim, ncentroids, niter=20, gpu=True)
    cluster.train(features.cpu().numpy())
    centroids = torch.from_numpy(cluster.centroids).cuda().float()

    evaluator = Evaluator(modelTest, args.print_freq)
    evaSrc = Evaluator(model, args.print_freq)

    # universal noise
    noise = torch.zeros((3, args.height, args.width)).cuda()
    normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

    noise.requires_grad = True
    MAX_EPS = args.max_eps / 255.0

    optimizer = MI_SGD(
        [
            {"params": [noise], "lr": MAX_EPS / 10, "momentum": 1, "sign": True}
        ],
        max_eps=MAX_EPS,
    )
Example #27
0
def main(args):
    random.seed(args.seed)
    np.random.seed(1)
    torch.manual_seed(1)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # params
    params = {}
    if args.model_type == 'masks':
        params['num_m_features'] = args.num_m_features
        params['masks'] = args.masks
    else:
        print('unkrown model type.')
        return

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (256, 256)
    dataset, num_classes, random_train_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.logs_dir, args.model_type, params,
                 args.height, args.width, args.crop_height, args.crop_width,
                 args.batch_size, args.workers, args.combine_trainval)

    # Create model
    model = models.create(args.arch, num_classes=num_classes, params=params)

    # Load from checkpoint
    start_epoch = best_top1 = best_mAP = 0
    if args.weights:
        checkpoint = load_checkpoint(args.weights)
        model_dict = model.state_dict()
        checkpoint_load = {
            k: v
            for k, v in (checkpoint['state_dict']).items() if k in model_dict
        }
        model_dict.update(checkpoint_load)
        model.load_state_dict(model_dict)
    if args.resume and not args.weights:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model, [0, 1, 2, 3]).cuda()

    # Criterion
    criterion = TripletLoss().cuda()

    # Optimizer
    base_params = []
    for name, p in model.module.base.named_parameters():
        base_params.append(p)
    base_param_ids = set(map(id, model.module.base.parameters()))
    new_params = [p for p in model.parameters() if id(p) not in base_param_ids]
    if args.model_type == 'masks':
        param_groups = [{
            'params': base_params,
            'lr_mult': args.lrm
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        print('unkrown model type.')
        return

    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion, num_classes, args.logs_dir)

    # Evaluator
    evaluator = Evaluator(model)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = args.step_size
        if epoch < step_size:
            lr = args.lr
        elif epoch >= step_size and epoch < args.epochs:
            lr = args.lr * 0.1
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        return lr

    # Start training
    for epoch in range(start_epoch, args.epochs):
        lr = adjust_lr(epoch)

        if epoch < args.warm_up_ep:
            trainer.train(epoch, random_train_loader, optimizer, lr, True,
                          args.warm_up_ep)
        else:
            trainer.train(epoch, random_train_loader, optimizer, lr, False,
                          args.warm_up_ep)

        if epoch < args.start_save:
            continue

        if epoch % 10 == 9:
            print('Epoch: [%d]' % epoch)
            top1, mAP = evaluator.evaluate(test_loader, dataset.query,
                                           dataset.gallery)

            is_best = mAP > best_mAP
            best_mAP = max(mAP, best_mAP)
            save_checkpoint(
                {
                    'state_dict': model.module.state_dict(),
                    'epoch': epoch + 1,
                    'best_mAP': mAP,
                },
                is_best,
                fpath=osp.join(args.logs_dir, 'model_best.pth.tar'))

            if epoch == args.epochs - 1:
                save_checkpoint(
                    {
                        'state_dict': model.module.state_dict(),
                        'epoch': epoch + 1,
                        'best_mAP': mAP,
                    },
                    True,
                    fpath=osp.join(args.logs_dir, 'last.pth.tar'))
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, query_loader, gallery_loader = \
        get_data(args.dataset,  args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 )

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          dropout=args.dropout,
                          num_classes=num_classes,
                          cut_at_pooling=False,
                          FCN=True)

    # Load from checkpoint
    start_epoch = best_mAP = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model_dict = model.state_dict()
        checkpoint_load = {
            k: v
            for k, v in (checkpoint['state_dict']).items() if k in model_dict
        }
        model_dict.update(checkpoint_load)
        model.load_state_dict(model_dict)
        #        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_mAP']
        print("=> Start epoch {}  best mAP {:.1%}".format(
            start_epoch, best_top1))

    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    evaluator_6stripes = Evaluator_6stripes(model)
    if args.evaluate:
        print("Test:")
        #evaluator.evaluate(query_loader, gallery_loader,  dataset.query, dataset.gallery)
        evaluator_6stripes.evaluate(query_loader, gallery_loader,
                                    dataset.query, dataset.gallery)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer_6stripes(model, criterion, 0, 0, SMLoss_mode=0)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else args.step_size
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)

        def save_model(model_ema, is_best, best_mAP, mid):
            save_checkpoint(
                {
                    'state_dict': model_ema.state_dict(),
                    'epoch': epoch + 1,
                    'best_mAP': best_mAP,
                },
                is_best,
                fpath=osp.join(args.logs_dir,
                               'model' + str(mid) + '_checkpoint.pth.tar'))

        if ((epoch + 1) % args.eval_step == 0 or (epoch == args.epochs - 1)):
            R1, mAP = evaluator.evaluate(query_loader, gallery_loader,
                                         dataset.query, dataset.gallery)
            is_best = mAP > best_mAP
            if is_best:
                best_mAP = mAP
            save_model(model, is_best, best_mAP, epoch)
            print('\n * Finished epoch {:3d}  mAP: {:5.1%}  best: {:5.1%}{}\n'.
                  format(epoch, mAP, best_mAP, ' *' if is_best else ''))
Example #29
0
def main(args):
    print(args)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.big_height is None or args.big_width is None or args.target_height is None or args.target_width is None:
        args.big_height, args.big_width, args.target_height, args.target_width = (
            256, 256, 224, 224)
    dataset, num_classes, train_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.big_height, args.big_width,
                 args.target_height, args.target_width, args.batch_size, args.num_instances,
                 args.workers, args.combine_trainval)

    # Create model
    model = models.create(args.arch,
                          num_classes=num_classes,
                          num_features=args.features,
                          is_cls=args.is_cls)

    # Load from checkpoint
    start_epoch = best = 0
    if args.weights:
        #model_dict = model.state_dict()
        #checkpoint_load = {k: v for k, v in (checkpoint['state_dict']).items() if k in model_dict}
        #model_dict.update(checkpoint_load)
        #model.load_state_dict(model_dict)
        if args.arch == 'cross_trihard_senet101':
            model.base.load_param(args.weights)
        else:
            checkpoint = load_checkpoint(args.weights)
            del (checkpoint['fc.weight'])
            del (checkpoint['fc.bias'])
            model.base.load_state_dict(checkpoint)
    if args.resume and not args.weights:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        print("=> Start epoch {}  best top1 {:.1%}".format(start_epoch, best))

    model = nn.DataParallel(model).cuda()

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery)
        return

    # Criterion
    ranking_loss = nn.MarginRankingLoss(margin=args.margin).cuda()
    if args.multi_attribute == 1:
        criterion = { 'MultiAttributeLoss': MultiAttributeLoss(is_cls=args.is_cls).cuda(), \
                  'trihard': TripletLoss(ranking_loss).cuda() }
    else:
        criterion = { 'TypeAttributeLoss': TypeAttributeLoss(is_cls=args.is_cls).cuda(), \
                  'trihard': TripletLoss(ranking_loss).cuda() }

    # Optimizer
    if hasattr(model.module, 'base'):
        base_params = []
        base_bn_params = []
        for name, p in model.module.base.named_parameters():
            if 'bn' in name:
                base_bn_params.append(p)
            else:
                base_params.append(p)
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': base_params,
            'lr_mult': 0.1
        }, {
            'params': base_bn_params,
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': args.lr_mult
        }]
    else:
        param_groups = model.parameters()

    if args.optimizer == 0:
        print('SGD')
        optimizer = torch.optim.SGD(param_groups,
                                    lr=args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay,
                                    nesterov=True)
    else:
        print('Adam')
        optimizer = torch.optim.Adam(params=param_groups,
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)

    # Trainer
    if args.multi_attribute == 1:
        print('Multi Attribute')
        trainer = Multi_Attribute_Trainer(
            model,
            criterion,
            metric_loss_weight=args.metric_loss_weight,
            sub_task_loss_weight=args.sub_task_loss_weight)
    else:
        print('Type Attribute')
        trainer = Type_Attribute_Trainer(
            model,
            criterion,
            metric_loss_weight=args.metric_loss_weight,
            sub_task_loss_weight=args.sub_task_loss_weight)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size, step_size2, step_size3 = args.step_size, args.step_size2, args.step_size3
        if epoch <= step_size:
            lr = args.lr
        elif epoch <= step_size2:
            lr = args.lr * 0.1
        elif epoch <= step_size3:
            lr = args.lr * 0.01
        else:
            lr = args.lr * 0.001
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)
        return lr

    # Start training
    for epoch in range(start_epoch + 1, args.epochs + 1):
        lr = adjust_lr(epoch)
        trainer.train(epoch,
                      train_loader,
                      optimizer,
                      lr,
                      warm_up=True,
                      warm_up_ep=args.warm_up_ep)
        if epoch % args.epoch_inter == 0 or epoch >= args.dense_evaluate:
            tmp_res = evaluator.evaluate(test_loader,
                                         dataset.query,
                                         dataset.gallery,
                                         is_attribute=True)
            if tmp_res > best and epoch >= args.start_save:
                best = tmp_res
                save_checkpoint(
                    {
                        'state_dict': model.module.state_dict(),
                        'epoch': epoch,
                    },
                    False,
                    fpath=osp.join(args.logs_dir, 'pass%d.pth.tar' % (epoch)))
Example #30
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
            (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = models.create(args.arch,
                          num_features=args.features,
                          norm=True,
                          dropout=args.dropout)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = OIMLoss(model.module.num_features,
                        num_classes,
                        scalar=args.oim_scalar,
                        momentum=args.oim_momentum).cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    # get source data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get target data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError(
            'Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    if args.resume:
        print(
            'Resuming checkpoints from finetuned model on another dataset...\n'
        )
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()

    # Distance metric
    # metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, print_freq=args.print_freq)
    print(
        "Test with the original model trained on source domain (direct transfer):"
    )
    rank_score_best = evaluator.evaluate(test_loader, tgt_dataset.query,
                                         tgt_dataset.gallery)
    best_map = rank_score_best.map  #market1501[0]-->rank-1

    if args.evaluate:
        return

    # Criterion
    criterion = [
        TripletLoss(args.margin, args.num_instances).cuda(),
        TripletLoss(args.margin, args.num_instances).cuda(),
        AccumulatedLoss(args.margin, args.num_instances).cuda(),
        nn.CrossEntropyLoss().cuda()
    ]

    # Optimizer
    optimizer = torch.optim.SGD(
        model.parameters(),
        lr=args.lr,
        momentum=0.9,
    )

    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0  #this value controls the usage of source data
        else:
            # get source datas' feature
            source_features, _ = extract_features(model,
                                                  src_extfeat_loader,
                                                  print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([
                source_features[f].unsqueeze(0)
                for f, _, _ in src_dataset.train
            ], 0)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(
            iter_n + 1))
        target_features, _ = extract_features(model,
                                              tgt_extfeat_loader,
                                              print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat([
            target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval
        ], 0)
        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        rerank_dist = re_ranking(source_features,
                                 target_features,
                                 lambda_value=args.lambda_value)

        if iter_n == 0:
            # DBSCAN cluster
            tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2
            tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
            tri_mat = np.sort(tri_mat, axis=None)
            top_num = np.round(args.rho * tri_mat.size).astype(int)
            eps = tri_mat[:top_num].mean()
            print('eps in cluster: {:.3f}'.format(eps))
            cluster = DBSCAN(eps=eps,
                             min_samples=4,
                             metric='precomputed',
                             n_jobs=8)

            # HDBSCAN cluster
            import hdbscan
            cluster_hdbscan = hdbscan.HDBSCAN(min_cluster_size=10,
                                              min_samples=4,
                                              metric='precomputed')

            # select & cluster images as training set of this epochs
            print('Clustering and labeling...')
            if args.use_hdbscan_clustering:
                print(
                    'Use the better chlustering algorithm HDBSCAN for clustering'
                )
                labels = cluster_hdbscan.fit_predict(rerank_dist)
            else:
                print('Use DBSCAN for clustering')
                labels = cluster.fit_predict(rerank_dist)
            num_ids = len(set(labels)) - 1
            print('Only do once, Iteration {} have {} training ids'.format(
                iter_n + 1, num_ids))

            # generate new dataset
            new_dataset = []
            for (fname, _, _), label in zip(tgt_dataset.trainval, labels):
                if label == -1:
                    continue
                # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
                new_dataset.append((fname, label, 0))
            print('Only do once, Iteration {} have {} training images'.format(
                iter_n + 1, len(new_dataset)))

            train_loader = DataLoader(
                Preprocessor_return_index(new_dataset,
                                          root=tgt_dataset.images_dir,
                                          transform=train_transformer),
                batch_size=args.batch_size,
                num_workers=4,
                sampler=RandomIdentitySampler(new_dataset, args.num_instances),
                pin_memory=True,
                drop_last=True)

            # init pseudo/fake labels, y_tilde in cvpr19's paper:
            new_label = np.zeros([len(new_dataset), num_ids])
            # init y_tilde, let softmax(y_tilde) is noisy labels
            for index, (imgs, _, pids, _, index) in enumerate(train_loader):
                index = index.numpy()
                onehot = torch.zeros(pids.size(0),
                                     num_ids).scatter_(1, pids.view(-1, 1),
                                                       10.0)
                onehot = onehot.numpy()
                new_label[index, :] = onehot

            # Using clustered label to init the new classifier:
            classifier = nn.Linear(2048, num_ids, bias=False)
            classifier.apply(weights_init_classifier)
            classifier = nn.DataParallel(classifier).cuda()
            optimizer_cla = torch.optim.SGD(classifier.parameters(),
                                            lr=args.lr * 10,
                                            momentum=0.9)

        # train model with new generated dataset
        trainer = Trainer_with_learnable_label(model,
                                               classifier,
                                               criterion,
                                               print_freq=args.print_freq)
        evaluator = Evaluator(model, print_freq=args.print_freq)
        # Start training
        for epoch in range(args.epochs):
            trainer.train(epoch, train_loader, new_label, optimizer,
                          optimizer_cla)

        # Evaluate
        rank_score = evaluator.evaluate(test_loader, tgt_dataset.query,
                                        tgt_dataset.gallery)

        #Save the best ckpt:
        rank1 = rank_score.market1501[0]
        mAP = rank_score.map
        is_best_mAP = mAP > best_map
        best_map = max(mAP, best_map)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': iter_n + 1,
                'best_mAP': best_map,
                # 'num_ids': num_ids,
            },
            is_best_mAP,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print(
            '\n * Finished epoch {:3d}  top1: {:5.1%}  mAP: {:5.1%}  best_mAP: {:5.1%}{}\n'
            .format(iter_n + 1, rank1, mAP, best_map,
                    ' *' if is_best_mAP else ''))

    return (rank_score.map, rank_score.market1501[0])
Example #32
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
            (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = models.create(args.arch, num_features=args.features, norm=True,
                          dropout=args.dropout)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = OIMLoss(model.module.num_features, num_classes,
                        scalar=args.oim_scalar,
                        momentum=args.oim_momentum).cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1 ** (epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Example #33
0
 def evaluate(self, query, gallery):
     test_loader = self.get_dataloader(list(set(query) | set(gallery)), training = False)
     evaluator = Evaluator(self.model)
     return  evaluator.evaluate(test_loader, query, gallery)
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True
    #######Tensorboard-logs##########
    logger = TensorLogger(args.Tensorlogs_dir)
    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, _, _, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    trans_train_loader, num_classes = get_fake_data(args.trans_name, args.trans_data_txt, args.height,
                                       args.width, args.batch_size, args.workers)
    # Create model
    model = models.create(args.arch,
                          dropout=0, num_classes=num_classes)
    # model = models.create(args.arch, num_features=1024, num_diff_features=args.features,
    #                       dropout=args.dropout, num_classes=num_classes, iden_pretrain=True)
    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}"
              .format(start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, trans_train_loader)
        # print("Validation:")
        # evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [p for p in model.parameters() if
                      id(p) not in base_param_ids]
        param_groups = [
            {'params': model.module.base.parameters(), 'lr_mult': 0.1},
            {'params': new_params, 'lr_mult': 1.0}]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups, lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        # lr = args.lr if epoch <= 270 else \
        #     args.lr * (0.001 ** ((epoch - 270) / 135))
        # lr = args.lr if epoch <= 100 else \
        #     args.lr * (0.001 ** ((epoch - 100) / 50.0))
        lr = args.lr * 0.1**(epoch//args.lr_change_epochs)
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, trans_train_loader, optimizer, logger)
        #######Tensorboard-logs##########
        # for tag, value in model.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        # for tag, value in criterion.named_parameters():
        #     tag = tag.replace('.', '/')
        #     try:
        #         logger.histo_summary(tag, to_np(value), epoch * len(train_loader) + 1)
        #         logger.histo_summary(tag + '/grad', to_np(value.grad), epoch * len(train_loader) + 1)
        #     except AttributeError, e:
        #         pass
        #################################
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint({
            'state_dict': model.module.state_dict(),
            'epoch': epoch + 1,
            'best_top1': best_top1,
        }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, trans_train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
Example #35
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Create data loaders
    assert args.num_instances > 1, "num_instances should be greater than 1"
    assert args.batch_size % args.num_instances == 0, \
        'num_instances should divide batch_size'
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)

    # get_source_data
    src_dataset, src_extfeat_loader = \
        get_source_data(args.src_dataset, args.data_dir, args.height,
                        args.width, args.batch_size, args.workers)
    # get_target_data
    tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
        get_data(args.tgt_dataset, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers)

    # Create model
    # Hacking here to let the classifier be the number of source ids
    if args.src_dataset == 'dukemtmc':
        model = models.create(args.arch, num_classes=632, pretrained=False)
    elif args.src_dataset == 'market1501':
        model = models.create(args.arch, num_classes=676, pretrained=False)
    else:
        raise RuntimeError(
            'Please specify the number of classes (ids) of the network.')

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        print(
            'Resuming checkpoints from finetuned model on another dataset...\n'
        )
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
    else:
        raise RuntimeWarning('Not using a pre-trained model.')
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model, print_freq=args.print_freq)
    print("Test with the original model trained on source domain:")
    evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
    if args.evaluate:
        return

    # Criterion
    criterion = []
    criterion.append(
        TripletLoss(margin=args.margin,
                    num_instances=args.num_instances).cuda())
    criterion.append(
        TripletLoss(margin=args.margin,
                    num_instances=args.num_instances).cuda())

    # Optimizer
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)

    # training stage transformer on input images
    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    train_transformer = T.Compose([
        T.Resize((args.height, args.width)),
        T.RandomHorizontalFlip(),
        T.ToTensor(), normalizer,
        T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
    ])

    # Start training
    for iter_n in range(args.iteration):
        if args.lambda_value == 0:
            source_features = 0
        else:
            # get source datas' feature
            source_features, _ = extract_features(model,
                                                  src_extfeat_loader,
                                                  print_freq=args.print_freq)
            # synchronization feature order with src_dataset.train
            source_features = torch.cat([
                source_features[f].unsqueeze(0)
                for f, _, _ in src_dataset.train
            ], 0)

        # extract training images' features
        print('Iteration {}: Extracting Target Dataset Features...'.format(
            iter_n + 1))
        target_features, _ = extract_features(model,
                                              tgt_extfeat_loader,
                                              print_freq=args.print_freq)
        # synchronization feature order with dataset.train
        target_features = torch.cat(
            [target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.train],
            0)
        # calculate distance and rerank result
        print('Calculating feature distances...')
        target_features = target_features.numpy()
        rerank_dist = re_ranking(source_features,
                                 target_features,
                                 lambda_value=args.lambda_value)
        if iter_n == 0:
            # DBSCAN cluster
            tri_mat = np.triu(rerank_dist, 1)  # tri_mat.dim=2
            tri_mat = tri_mat[np.nonzero(tri_mat)]  # tri_mat.dim=1
            tri_mat = np.sort(tri_mat, axis=None)
            top_num = np.round(args.rho * tri_mat.size).astype(int)
            eps = tri_mat[:top_num].mean()
            print('eps in cluster: {:.3f}'.format(eps))
            cluster = DBSCAN(eps=eps,
                             min_samples=4,
                             metric='precomputed',
                             n_jobs=8)

        # select & cluster images as training set of this epochs
        print('Clustering and labeling...')
        labels = cluster.fit_predict(rerank_dist)
        num_ids = len(set(labels)) - 1
        print('Iteration {} have {} training ids'.format(iter_n + 1, num_ids))
        # generate new dataset
        new_dataset = []
        for (fname, _, _), label in zip(tgt_dataset.trainval, labels):
            if label == -1:
                continue
            # dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
            new_dataset.append((fname, label, 0))
        print('Iteration {} have {} training images'.format(
            iter_n + 1, len(new_dataset)))

        train_loader = DataLoader(Preprocessor(new_dataset,
                                               root=tgt_dataset.images_dir,
                                               transform=train_transformer),
                                  batch_size=args.batch_size,
                                  num_workers=4,
                                  sampler=RandomIdentitySampler(
                                      new_dataset, args.num_instances),
                                  pin_memory=True,
                                  drop_last=True)

        # train model with new generated dataset
        trainer = Trainer(model, criterion, print_freq=args.print_freq)
        evaluator = Evaluator(model, print_freq=args.print_freq)
        # Start training
        for epoch in range(args.epochs):
            trainer.train(epoch, train_loader, optimizer)
        # Evaluate
        rank1 = evaluator.evaluate(test_loader, tgt_dataset.query,
                                   tgt_dataset.gallery)

        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': iter_n + 1,
                'num_ids': num_ids,
            },
            True,
            fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  rank1: {:5.1%} \n'.format(
            iter_n + 1, rank1))