Esempio n. 1
0
def combine_evaluate(features, dataset):
    metric = DistanceMetric(algorithm='euclidean')
    distmats = [pairwise_distance(feature, dataset.query, dataset.gallery, metric)\
            for feature in features]
    distmats = np.array([dist.numpy() for dist in distmats])
    distmat = np.sum(distmats, axis=0)
    evaluate_all(distmat, dataset.query, dataset.gallery)
Esempio n. 2
0
def evaluate(model, data_loader, query, gallery):
    model.eval()
    features = OrderedDict()
    labels = OrderedDict()

    for i, (imgs, fnames, pids, _) in enumerate(data_loader):
        # normal imgs
        normal_inputs = Variable(imgs.cuda(), volatile=True)
        normal_outputs = model(normal_inputs)[-1]
        normal_outputs = normal_outputs.data.cpu()

        # fliped imgs
        inv_idx = torch.arange(imgs.size(3) - 1, -1,
                               -1).long()  # N x C x H x W
        flip_imgs = imgs.index_select(3, inv_idx)
        flip_inputs = Variable(flip_imgs.cuda(), volatile=True)
        flip_outputs = model(flip_inputs)[-1]
        flip_outputs = flip_outputs.data.cpu()

        outputs = F.normalize(normal_outputs + flip_outputs)
        for fname, output, pid in zip(fnames, outputs, pids):
            features[fname] = output
            labels[fname] = pid

    distmat = pairwise_distance(features, query, gallery)
    return evaluate_all(distmat, query, gallery)
Esempio n. 3
0
def metric_evaluate(model, query_set, gallery_set):
    model.eval()
    print('=> L2 distance')
    dist = pairwise_distance(query_set.features, gallery_set.features)
    evaluate_all(
        dist,
        query_ids=query_set.labels[:, 1],
        gallery_ids=gallery_set.labels[:, 1],
        query_cams=query_set.labels[:, 0],
        gallery_cams=gallery_set.labels[:, 0],
    )
    print('=> Metric')
    dist = metric_distance(model, query_set.features, gallery_set.features)
    evaluate_all(
        dist,
        query_ids=query_set.labels[:, 1],
        gallery_ids=gallery_set.labels[:, 1],
        query_cams=query_set.labels[:, 0],
        gallery_cams=gallery_set.labels[:, 0],
    )
    return
Esempio n. 4
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(os.path.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = InceptionNet(num_channels=8,
                         num_features=args.features,
                         dropout=args.dropout,
                         num_classes=num_classes)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=os.path.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(
        os.path.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)

    features, _ = extract_features(evaluator.model, test_loader)
    distmat = pairwise_distance(features,
                                dataset.query,
                                dataset.gallery,
                                metric=metric)
    evaluate_all(distmat,
                 query=dataset.query,
                 gallery=dataset.gallery,
                 cmc_topk=(1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50))

    torch.save(model, os.path.join(args.logs_dir, 'model.pt'))
                print('Encoding {}'.format(
                    os.path.join(args.dataset_path, subset, target_id,
                                 image_filename)))
                cam_id, frame_id = int(image_filename.split('_')[1][1]), int(
                    image_filename.split('.')[0].split('_')[2][1:])
                image = cv2.imread(
                    os.path.join(args.dataset_path, subset, target_id,
                                 image_filename))
                h, w, _ = image.shape
                feature = encoder([Detection(np.array([0, 0, w, h]), score=1)],
                                  image)[0]
                all_infos.append([int(target_id), cam_id, frame_id])
                all_features.append(feature)

        all_infos = np.array(all_infos)
        all_features = np.hstack((all_infos, all_features))
        subset_features[subset] = all_features

    query_features, gallery_features = subset_features[
        'query'], subset_features['gallery']

    distmat = pairwise_distance(torch.tensor(query_features[:, 3:]),
                                torch.tensor(gallery_features[:, 3:]))

    query = query_features[:, (2, 0, 1)]
    gallery = gallery_features[:, (2, 0, 1)]

    print('Shape of distance matrix: ', distmat.shape)

    evaluate_all(distmat, query, gallery)
Esempio n. 6
0
        outputs = F.normalize(outputs, p=2, dim=1)

        for fname, output, pid in zip(fnames, outputs, pids):
            features[fname] = output
            labels[fname] = pid

        batch_time.update(time.time() - end)
        end = time.time()

        if (i + 1) % args.print_freq == 0:
            print('Extract Features: [{}/{}]\t'
                  'Time {:.3f} ({:.3f})\t'
                  'Data {:.3f} ({:.3f})\t'.format(i + 1, len(test_loader),
                                                  batch_time.val,
                                                  batch_time.avg,
                                                  data_time.val,
                                                  data_time.avg))

print("Extracing features is finished... Now evaluating begins...")

#Evaluating distance matrix
distmat = evaluators.pairwise_distance(features, dataset.query,
                                       dataset.gallery)

evaluators.evaluate_all(distmat,
                        dataset.query,
                        dataset.gallery,
                        dataset=args.dataset_type,
                        top1=True)