Пример #1
0
def eval_model(db, net, trfs, pooling='mean', gemp=3, detailed=False, whiten=None,
               aqe=None, adba=None, threads=8, batch_size=16, save_feats=None,
               load_feats=None, dbg=()):
    """ Evaluate a trained model (network) on a given dataset.
    The dataset is supposed to contain the evaluation code.
    """
    print("\n>> Evaluation...")
    query_db = db.get_query_db()

    # load DB feats
    bdescs = []
    qdescs = []

    bdescs = np.load(os.path.join(load_feats, 'feats.bdescs.npy'))
    qdescs = bdescs 

    if whiten is not None:
        bdescs = common.whiten_features(tonumpy(bdescs), net.pca, **whiten)
        qdescs = common.whiten_features(tonumpy(qdescs), net.pca, **whiten)

    if adba is not None:
        bdescs = expand_descriptors(bdescs, **args.adba)
    if aqe is not None:
        qdescs = expand_descriptors(qdescs, db=bdescs, **args.aqe)

    scores = matmul(qdescs, bdescs)
    data_sorted = np.argsort(-scores)

    del bdescs
    del qdescs

    return data_sorted
Пример #2
0
def expand_descriptors(descs, db=None, alpha=0, k=0):
    assert k >= 0 and alpha >= 0, 'k and alpha must be non-negative'
    if k == 0:
        return descs
    descs = tonumpy(descs)
    n = descs.shape[0]
    db_descs = tonumpy(db if db is not None else descs)

    sim = matmul(descs, db_descs)
    if db is None:
        sim[np.diag_indices(n)] = 0

    idx = np.argpartition(sim, int(-k), axis=1)[:, int(-k):]
    descs_aug = np.zeros_like(descs)
    for i in range(n):
        new_q = np.vstack([db_descs[j, :] * sim[i, j]**alpha for j in idx[i]])
        new_q = np.vstack([descs[i], new_q])
        new_q = np.mean(new_q, axis=0)
        descs_aug[i] = new_q / np.linalg.norm(new_q)
    return descs_aug
Пример #3
0
def eval_model(db,
               net,
               trfs,
               pooling='mean',
               gemp=3,
               detailed=False,
               whiten=None,
               aqe=None,
               adba=None,
               threads=8,
               batch_size=16,
               save_feats=None,
               load_feats=None,
               dbg=()):
    """ Evaluate a trained model (network) on a given dataset.
    The dataset is supposed to contain the evaluation code.
    """
    print("\n>> Evaluation...")
    query_db = db.get_query_db()

    # extract DB feats
    bdescs = []
    qdescs = []

    if not load_feats:
        trfs_list = [trfs] if isinstance(trfs, str) else trfs

        for trfs in trfs_list:
            kw = dict(iscuda=net.iscuda,
                      threads=threads,
                      batch_size=batch_size,
                      same_size='Pad' in trfs or 'Crop' in trfs)
            bdescs.append(
                extract_image_features(db, trfs, net, desc="DB", **kw))

            # extract query feats
            qdescs.append(
                bdescs[-1] if db is query_db else extract_image_features(
                    query_db, trfs, net, desc="query", **kw))

        # pool from multiple transforms (scales)
        bdescs = F.normalize(pool(bdescs, pooling, gemp), p=2, dim=1)
        qdescs = F.normalize(pool(qdescs, pooling, gemp), p=2, dim=1)
    else:
        bdescs = np.load(os.path.join(load_feats, 'feats.bdescs.npy'))
        if query_db is not db:
            qdescs = np.load(os.path.join(load_feats, 'feats.qdescs.npy'))
        else:
            qdescs = bdescs

    if save_feats:
        mkdir(save_feats)
        np.save(os.path.join(save_feats, 'feats.bdescs.npy'),
                bdescs.cpu().numpy())
        if query_db is not db:
            np.save(os.path.join(save_feats, 'feats.qdescs.npy'),
                    qdescs.cpu().numpy())

    if whiten is not None:
        bdescs = common.whiten_features(tonumpy(bdescs), net.pca, **whiten)
        qdescs = common.whiten_features(tonumpy(qdescs), net.pca, **whiten)

    if adba is not None:
        bdescs = expand_descriptors(bdescs, **args.adba)
    if aqe is not None:
        qdescs = expand_descriptors(qdescs, db=bdescs, **args.aqe)

    scores = matmul(qdescs, bdescs)

    del bdescs
    del qdescs

    res = {}

    try:
        aps = [
            db.eval_query_AP(q, s)
            for q, s in enumerate(tqdm.tqdm(scores, desc='AP'))
        ]
        if not isinstance(aps[0], dict):
            aps = [float(e) for e in aps]
            if detailed:
                res['APs'] = aps
            # Queries with no relevants have an AP of -1
            res['mAP'] = float(np.mean([e for e in aps if e >= 0]))
        else:
            modes = aps[0].keys()
            for mode in modes:
                apst = [float(e[mode]) for e in aps]
                if detailed:
                    res['APs' + '-' + mode] = apst
                # Queries with no relevants have an AP of -1
                res['mAP' + '-' + mode] = float(
                    np.mean([e for e in apst if e >= 0]))
    except NotImplementedError:
        print(" AP not implemented!")

    try:
        tops = [
            db.eval_query_top(q, s)
            for q, s in enumerate(tqdm.tqdm(scores, desc='top1'))
        ]
        if detailed:
            res['tops'] = tops
        for k in tops[0]:
            res['top%d' % k] = float(np.mean([top[k] for top in tops]))
    except NotImplementedError:
        pass

    return res
Пример #4
0
def test(db,
         net,
         trfs,
         pooling='mean',
         gemp=3,
         detailed=False,
         threads=8,
         batch_size=16):
    """ Evaluate a trained model (network) on a given dataset.
    The dataset is supposed to contain the evaluation code.
    """
    print("\n>> Evaluation...")
    query_db = db.get_query_db()

    # extract DB feats
    bdescs = []
    qdescs = []

    trfs_list = [trfs] if isinstance(trfs, str) else trfs

    for trfs in trfs_list:
        kw = dict(iscuda=net.iscuda,
                  threads=threads,
                  batch_size=batch_size,
                  same_size='Pad' in trfs or 'Crop' in trfs)
        bdescs.append(extract_image_features(db, trfs, net, desc="DB", **kw))

        # extract query feats
        qdescs.append(bdescs[-1] if db is query_db else extract_image_features(
            query_db, trfs, net, desc="query", **kw))

    # pool from multiple transforms (scales)
    bdescs = F.normalize(pool(bdescs, pooling, gemp), p=2, dim=1)
    qdescs = F.normalize(pool(qdescs, pooling, gemp), p=2, dim=1)

    bdescs = tonumpy(bdescs)
    qdescs = tonumpy(qdescs)

    scores = matmul(qdescs, bdescs)

    del bdescs
    del qdescs

    res = {}

    try:
        aps = [
            db.eval_query_AP(q, s)
            for q, s in enumerate(tqdm.tqdm(scores, desc='AP'))
        ]
        if not isinstance(aps[0], dict):
            aps = [float(e) for e in aps]
            if detailed:
                res['APs'] = aps
            # Queries with no relevants have an AP of -1
            res['mAP'] = float(np.mean([e for e in aps if e >= 0]))
        else:
            modes = aps[0].keys()
            for mode in modes:
                apst = [float(e[mode]) for e in aps]
                if detailed:
                    res['APs' + '-' + mode] = apst
                # Queries with no relevants have an AP of -1
                res['mAP' + '-' + mode] = float(
                    np.mean([e for e in apst if e >= 0]))
    except NotImplementedError:
        print(" AP not implemented!")

    #writer.add_scalar('mAP', res['mAP'], epoch)
    return res
Пример #5
0
def extract_kapture_global_features(kapture_root_path: str,
                                    net,
                                    global_features_type: str,
                                    trfs,
                                    pooling='mean',
                                    gemp=3,
                                    whiten=None,
                                    threads=8,
                                    batch_size=16):
    """ Extract features from trained model (network) on a given dataset.
    """
    print(f'loading {kapture_root_path}')
    with get_all_tar_handlers(kapture_root_path,
                              mode={
                                  kapture.Keypoints: 'r',
                                  kapture.Descriptors: 'r',
                                  kapture.GlobalFeatures: 'a',
                                  kapture.Matches: 'r'
                              }) as tar_handlers:
        kdata = kapture_from_dir(kapture_root_path,
                                 None,
                                 skip_list=[
                                     kapture.Keypoints, kapture.Descriptors,
                                     kapture.Matches, kapture.Points3d,
                                     kapture.Observations
                                 ],
                                 tar_handlers=tar_handlers)
        root = get_image_fullpath(kapture_root_path, image_filename=None)
        assert kdata.records_camera is not None
        imgs = [
            image_name
            for _, _, image_name in kapture.flatten(kdata.records_camera)
        ]
        if kdata.global_features is None:
            kdata.global_features = {}

        if global_features_type in kdata.global_features:
            imgs = [
                image_name for image_name in imgs if image_name not in
                kdata.global_features[global_features_type]
            ]
        if len(imgs) == 0:
            print('All global features are already extracted')
            return

        dataset = ImageList(img_list_path=None, root=root, imgs=imgs)

        print(f'\nEvaluation on {dataset}')
        # extract DB feats
        bdescs = []
        trfs_list = [trfs] if isinstance(trfs, str) else trfs

        for trfs in trfs_list:
            kw = dict(iscuda=net.iscuda,
                      threads=threads,
                      batch_size=batch_size,
                      same_size='Pad' in trfs or 'Crop' in trfs)
            bdescs.append(
                extract_image_features(dataset, trfs, net, desc="DB", **kw))

        # pool from multiple transforms (scales)
        bdescs = tonumpy(F.normalize(pool(bdescs, pooling, gemp), p=2, dim=1))

        if whiten is not None:
            bdescs = common.whiten_features(bdescs, net.pca, **whiten)

        print('writing extracted global features')
        os.umask(0o002)
        gfeat_dtype = bdescs.dtype
        gfeat_dsize = bdescs.shape[1]
        if global_features_type not in kdata.global_features:
            kdata.global_features[
                global_features_type] = kapture.GlobalFeatures(
                    'dirtorch', gfeat_dtype, gfeat_dsize, 'L2')
            global_features_config_absolute_path = get_feature_csv_fullpath(
                kapture.GlobalFeatures, global_features_type,
                kapture_root_path)
            global_features_to_file(
                global_features_config_absolute_path,
                kdata.global_features[global_features_type])
        else:
            assert kdata.global_features[
                global_features_type].dtype == gfeat_dtype
            assert kdata.global_features[
                global_features_type].dsize == gfeat_dsize
            assert kdata.global_features[
                global_features_type].metric_type == 'L2'
        for i in tqdm.tqdm(range(dataset.nimg)):
            image_name = dataset.get_key(i)
            global_feature_fullpath = get_global_features_fullpath(
                global_features_type, kapture_root_path, image_name,
                tar_handlers)
            gfeat_i = bdescs[i, :]
            assert gfeat_i.shape == (gfeat_dsize, )
            image_global_features_to_file(global_feature_fullpath, gfeat_i)
            kdata.global_features[global_features_type].add(image_name)
            del gfeat_i

        del bdescs

        if not global_features_check_dir(
                kdata.global_features[global_features_type],
                global_features_type, kapture_root_path, tar_handlers):
            print(
                'global feature extraction ended successfully but not all files were saved'
            )
        else:
            print('Features extracted.')