Ejemplo n.º 1
0
def combine_evaluate(features, dataset):
    metric = DistanceMetric(algorithm='euclidean')
    distmats = [pairwise_distance(feature, dataset.query, dataset.gallery, metric)\
            for feature in features]
    distmats = np.array([dist.numpy() for dist in distmats])
    distmat = np.sum(distmats, axis=0)
    evaluate_all(distmat, dataset.query, dataset.gallery)
Ejemplo n.º 2
0
Archivo: mining.py Proyecto: luzai/reid
def mine_hard_triplets(model, data_loader, margin=0.5, batch_size=32):
    model.eval()
    # Compute pairwise distance
    new_loader = DataLoader(
        data_loader.dataset,
        batch_size=batch_size,
        num_workers=8,
        pin_memory=True if torch.cuda.is_available() else False)

    features, _ = extract_features(model, new_loader, print_freq=10)
    timer = cvb.Timer()
    print('timer start')
    distmat = pairwise_distance(features)
    # print('timer ', timer.since_start())
    distmat = distmat.cpu().numpy()
    # Get the pids
    dataset = data_loader.dataset.dataset
    pids = np.asarray([pid for _, pid, _ in dataset])
    # Find the hard triplets
    pids_exp = np.repeat(pids, pids.shape[0]).reshape(pids.shape[0],
                                                      pids.shape[0])
    mask = (pids_exp == pids_exp.T)
    n = 1024 * 4  # batch_size
    distmat_n = distmat.copy()
    distmat_n[mask == True] = distmat_n.max()
    no_sel = np.setdiff1d(np.arange(pids.shape[0]),
                          np.random.choice(pids.shape[0], 1024 * 4))
    distmat_n[no_sel, :] = distmat_n.max()
    distmat_n[:, no_sel] = distmat_n.max()
    ind = np.argpartition(distmat_n.ravel(), n)[:n]
    ind = ind[np.argsort(distmat_n.ravel()[ind])]
    ind = ind[3:256 + 3]
    print('timer ', timer.since_start())
    # ind = np.random.choice(ind, n)
    # plt.plot(np.sort(distmat_n.ravel()[ind]),'.')

    # plt.hist(distmat_n.ravel()[ind] )
    anc, neg = np.unravel_index(ind, distmat.shape)
    triplets = []
    triplets_dist = []
    for anc_, neg_ in zip(anc, neg):
        # pos_inds = np.where(pids == pids[anc_])[0]
        pos_inds = np.where(mask[anc_] == True)[0]
        # pos_ind = np.random.choice(pos_inds)
        d = distmat[anc_][pos_inds]
        pos_ind = pos_inds[np.argmax(d)]
        triplets.append([anc_, pos_ind, neg_])
        triplets_dist.append(distmat[anc_, neg_] - distmat[anc_, pos_ind])
    print(triplets_dist)
    # cvb.dump(distmat, 'distmat.pkl')
    # cvb.dump(mask, 'mask.pkl')
    # db=Database('fea.h5')
    # features_id2key= dict(zip(range(len(features.keys())), features.keys()))
    # for ind_ in np.asarray(triplets).ravel():
    #     key=features_id2key[ind_]
    #     db[key] = to_numpy(features[key])
    # db.close()
    print('mined hard', len(triplets),
          np.unique(triplets).shape, 'num pids ', pids.shape[0])
    return triplets
Ejemplo n.º 3
0
def evaluate(model, data_loader, query, gallery):
    model.eval()
    features = OrderedDict()
    labels = OrderedDict()

    for i, (imgs, fnames, pids, _) in enumerate(data_loader):
        # normal imgs
        normal_inputs = Variable(imgs.cuda(), volatile=True)
        normal_outputs = model(normal_inputs)[-1]
        normal_outputs = normal_outputs.data.cpu()

        # fliped imgs
        inv_idx = torch.arange(imgs.size(3) - 1, -1,
                               -1).long()  # N x C x H x W
        flip_imgs = imgs.index_select(3, inv_idx)
        flip_inputs = Variable(flip_imgs.cuda(), volatile=True)
        flip_outputs = model(flip_inputs)[-1]
        flip_outputs = flip_outputs.data.cpu()

        outputs = F.normalize(normal_outputs + flip_outputs)
        for fname, output, pid in zip(fnames, outputs, pids):
            features[fname] = output
            labels[fname] = pid

    distmat = pairwise_distance(features, query, gallery)
    return evaluate_all(distmat, query, gallery)
Ejemplo n.º 4
0
    def calc_distance(self, dataset):
        data_loader = DataLoader(
            Preprocessor(dataset, self.img_path, transform=self.transformer),
            batch_size=64, num_workers=8,
            shuffle=False, pin_memory=True)

        if self.verbose:
            print('\t GraphSampler: ', end='\t')
        features, _ = extract_features(self.model, data_loader, self.verbose)
        features = torch.cat([features[fname].unsqueeze(0) for fname, _, _, _ in dataset], 0)

        if self.verbose:
            print('\t GraphSampler: \tCompute distance...', end='\t')
        start = time.time()
        dist = pairwise_distance(self.matcher, features, features)        
        if self.verbose:
            print('Time: %.3f seconds.' % (time.time() - start))

        if self.rerank:
            if self.verbose:
                print('\t GraphSampler: \tRerank...', end='\t')
            start = time.time()
            with torch.no_grad():
                dist = torch.cat((dist, dist))
                dist = torch.cat((dist, dist), dim=1)
                dist = reranking(dist, self.num_pids)
                dist = torch.from_numpy(dist).cuda()
            if self.verbose:
                print('Time: %.3f seconds.' % (time.time() - start))

        return dist
Ejemplo n.º 5
0
Archivo: mining.py Proyecto: luzai/reid
def mine_hard_pairs(model, data_loader, margin=0.32):
    model.eval()
    # Compute pairwise distance
    features = extract_features(model, data_loader, print_freq=1)
    distmat = pairwise_distance(features)
    distmat = distmat.cpu().numpy()
    # Get the pids
    dataset = data_loader.dataset.dataset
    pids = np.asarray([pid for _, pid, _ in dataset])
    # Find the hard triplets
    pairs = []
    for i, d in enumerate(distmat):
        pos_indices = np.where(pids == pids[i])[0]
        threshold = max(d[pos_indices]) + margin
        neg_indices = np.where(pids != pids[i])[0]
        pairs.extend([(i, p) for p in pos_indices])
        pairs.extend([(i, n) for n in neg_indices if threshold >= d[n]])
    return pairs
Ejemplo n.º 6
0
    def calc_distance(self, dataset):
        data_loader = DataLoader(
            Preprocessor(dataset, self.img_path, transform=self.transformer),
            batch_size=64, num_workers=8,
            shuffle=False, pin_memory=True)

        if self.verbose:
            print('\t GraphSampler: ', end='\t')
        features, _ = extract_features(self.model, data_loader, self.verbose)
        features = torch.cat([features[fname].unsqueeze(0) for fname, _, _, _ in dataset], 0)

        if self.verbose:
            print('\t GraphSampler: \tCompute distance...', end='\t')
        start = time.time()
        dist = pairwise_distance(self.matcher, features, features, self.gal_batch_size, self.prob_batch_size)
        
        if self.verbose:
            print('Time: %.3f seconds.' % (time.time() - start))

        return dist
Ejemplo n.º 7
0
def metric_evaluate(model, query_set, gallery_set):
    model.eval()
    print('=> L2 distance')
    dist = pairwise_distance(query_set.features, gallery_set.features)
    evaluate_all(
        dist,
        query_ids=query_set.labels[:, 1],
        gallery_ids=gallery_set.labels[:, 1],
        query_cams=query_set.labels[:, 0],
        gallery_cams=gallery_set.labels[:, 0],
    )
    print('=> Metric')
    dist = metric_distance(model, query_set.features, gallery_set.features)
    evaluate_all(
        dist,
        query_ids=query_set.labels[:, 1],
        gallery_ids=gallery_set.labels[:, 1],
        query_cams=query_set.labels[:, 0],
        gallery_cams=gallery_set.labels[:, 0],
    )
    return
Ejemplo n.º 8
0
def main(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.benchmark = True

    # Redirect print to both console and log file
    if not args.evaluate:
        sys.stdout = Logger(os.path.join(args.logs_dir, 'log.txt'))

    # Create data loaders
    if args.height is None or args.width is None:
        args.height, args.width = (144, 56) if args.arch == 'inception' else \
                                  (256, 128)
    dataset, num_classes, train_loader, val_loader, test_loader = \
        get_data(args.dataset, args.split, args.data_dir, args.height,
                 args.width, args.batch_size, args.workers,
                 args.combine_trainval)

    # Create model
    model = InceptionNet(num_channels=8,
                         num_features=args.features,
                         dropout=args.dropout,
                         num_classes=num_classes)

    # Load from checkpoint
    start_epoch = best_top1 = 0
    if args.resume:
        checkpoint = load_checkpoint(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        best_top1 = checkpoint['best_top1']
        print("=> Start epoch {}  best top1 {:.1%}".format(
            start_epoch, best_top1))
    model = nn.DataParallel(model).cuda()

    # Distance metric
    metric = DistanceMetric(algorithm=args.dist_metric)

    # Evaluator
    evaluator = Evaluator(model)
    if args.evaluate:
        metric.train(model, train_loader)
        print("Validation:")
        evaluator.evaluate(val_loader, dataset.val, dataset.val, metric)
        print("Test:")
        evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)
        return

    # Criterion
    criterion = nn.CrossEntropyLoss().cuda()

    # Optimizer
    if hasattr(model.module, 'base'):
        base_param_ids = set(map(id, model.module.base.parameters()))
        new_params = [
            p for p in model.parameters() if id(p) not in base_param_ids
        ]
        param_groups = [{
            'params': model.module.base.parameters(),
            'lr_mult': 0.1
        }, {
            'params': new_params,
            'lr_mult': 1.0
        }]
    else:
        param_groups = model.parameters()
    optimizer = torch.optim.SGD(param_groups,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay,
                                nesterov=True)

    # Trainer
    trainer = Trainer(model, criterion)

    # Schedule learning rate
    def adjust_lr(epoch):
        step_size = 60 if args.arch == 'inception' else 40
        lr = args.lr * (0.1**(epoch // step_size))
        for g in optimizer.param_groups:
            g['lr'] = lr * g.get('lr_mult', 1)

    # Start training
    for epoch in range(start_epoch, args.epochs):
        adjust_lr(epoch)
        trainer.train(epoch, train_loader, optimizer)
        if epoch < args.start_save:
            continue
        top1 = evaluator.evaluate(val_loader, dataset.val, dataset.val)

        is_best = top1 > best_top1
        best_top1 = max(top1, best_top1)
        save_checkpoint(
            {
                'state_dict': model.module.state_dict(),
                'epoch': epoch + 1,
                'best_top1': best_top1,
            },
            is_best,
            fpath=os.path.join(args.logs_dir, 'checkpoint.pth.tar'))

        print('\n * Finished epoch {:3d}  top1: {:5.1%}  best: {:5.1%}{}\n'.
              format(epoch, top1, best_top1, ' *' if is_best else ''))

    # Final test
    print('Test with best model:')
    checkpoint = load_checkpoint(
        os.path.join(args.logs_dir, 'model_best.pth.tar'))
    model.module.load_state_dict(checkpoint['state_dict'])
    metric.train(model, train_loader)
    evaluator.evaluate(test_loader, dataset.query, dataset.gallery, metric)

    features, _ = extract_features(evaluator.model, test_loader)
    distmat = pairwise_distance(features,
                                dataset.query,
                                dataset.gallery,
                                metric=metric)
    evaluate_all(distmat,
                 query=dataset.query,
                 gallery=dataset.gallery,
                 cmc_topk=(1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50))

    torch.save(model, os.path.join(args.logs_dir, 'model.pt'))
                print('Encoding {}'.format(
                    os.path.join(args.dataset_path, subset, target_id,
                                 image_filename)))
                cam_id, frame_id = int(image_filename.split('_')[1][1]), int(
                    image_filename.split('.')[0].split('_')[2][1:])
                image = cv2.imread(
                    os.path.join(args.dataset_path, subset, target_id,
                                 image_filename))
                h, w, _ = image.shape
                feature = encoder([Detection(np.array([0, 0, w, h]), score=1)],
                                  image)[0]
                all_infos.append([int(target_id), cam_id, frame_id])
                all_features.append(feature)

        all_infos = np.array(all_infos)
        all_features = np.hstack((all_infos, all_features))
        subset_features[subset] = all_features

    query_features, gallery_features = subset_features[
        'query'], subset_features['gallery']

    distmat = pairwise_distance(torch.tensor(query_features[:, 3:]),
                                torch.tensor(gallery_features[:, 3:]))

    query = query_features[:, (2, 0, 1)]
    gallery = gallery_features[:, (2, 0, 1)]

    print('Shape of distance matrix: ', distmat.shape)

    evaluate_all(distmat, query, gallery)
Ejemplo n.º 10
0
        outputs = F.normalize(outputs, p=2, dim=1)

        for fname, output, pid in zip(fnames, outputs, pids):
            features[fname] = output
            labels[fname] = pid

        batch_time.update(time.time() - end)
        end = time.time()

        if (i + 1) % args.print_freq == 0:
            print('Extract Features: [{}/{}]\t'
                  'Time {:.3f} ({:.3f})\t'
                  'Data {:.3f} ({:.3f})\t'.format(i + 1, len(test_loader),
                                                  batch_time.val,
                                                  batch_time.avg,
                                                  data_time.val,
                                                  data_time.avg))

print("Extracing features is finished... Now evaluating begins...")

#Evaluating distance matrix
distmat = evaluators.pairwise_distance(features, dataset.query,
                                       dataset.gallery)

evaluators.evaluate_all(distmat,
                        dataset.query,
                        dataset.gallery,
                        dataset=args.dataset_type,
                        top1=True)