Ejemplo n.º 1
0
def get_training_fsv(cm, namemode=True, num=None, top_percent=None):
    """
    CommandLine:
        python -m ibeis.algo.hots.scorenorm --exec-get_training_fsv --show

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.scorenorm import *  # NOQA
        >>> import ibeis
        >>> num = None
        >>> cm, qreq_ = ibeis.testdata_cm('PZ_MTEST', a='default:dindex=0:10,qindex=0:1', t='best')
        >>> (tp_fsv, tn_fsv) = get_training_fsv(cm, namemode=False)
        >>> result = ('(tp_fsv, tn_fsv) = %s' % (ut.repr2((tp_fsv, tn_fsv), nl=1),))
        >>> print(result)
    """
    if namemode:
        tp_idxs, tn_idxs = get_topname_training_idxs(cm, num=num)
    else:
        tp_idxs, tn_idxs = get_topannot_training_idxs(cm, num=num)

    # Keep only the top scoring half of the feature matches
    # top_percent = None
    if top_percent is not None:
        cm_orig = cm
        #cm_orig.assert_self(qreq_)

        tophalf_indicies = [
            ut.take_percentile(fs.argsort()[::-1], top_percent)
            for fs in cm.get_fsv_prod_list()
        ]
        cm = cm_orig.take_feature_matches(tophalf_indicies, keepscores=True)

        assert np.all(
            cm_orig.daid_list.take(tp_idxs) == cm.daid_list.take(tp_idxs))
        assert np.all(
            cm_orig.daid_list.take(tn_idxs) == cm.daid_list.take(tn_idxs))
        #cm.assert_self(qreq_)

    tp_fsv = np.vstack(ut.take(cm.fsv_list, tp_idxs))
    tn_fsv = np.vstack(ut.take(cm.fsv_list, tn_idxs))
    return tp_fsv, tn_fsv
Ejemplo n.º 2
0
def get_training_fsv(cm, namemode=True, num=None, top_percent=None):
    """
    CommandLine:
        python -m ibeis.algo.hots.scorenorm --exec-get_training_fsv --show

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.scorenorm import *  # NOQA
        >>> import ibeis
        >>> num = None
        >>> cm, qreq_ = ibeis.testdata_cm('PZ_MTEST', a='default:dindex=0:10,qindex=0:1', t='best')
        >>> (tp_fsv, tn_fsv) = get_training_fsv(cm, namemode=False)
        >>> result = ('(tp_fsv, tn_fsv) = %s' % (ut.repr2((tp_fsv, tn_fsv), nl=1),))
        >>> print(result)
    """
    if namemode:
        tp_idxs, tn_idxs = get_topname_training_idxs(cm, num=num)
    else:
        tp_idxs, tn_idxs = get_topannot_training_idxs(cm, num=num)

    # Keep only the top scoring half of the feature matches
    # top_percent = None
    if top_percent is not None:
        cm_orig = cm
        #cm_orig.assert_self(qreq_)

        tophalf_indicies = [
            ut.take_percentile(fs.argsort()[::-1], top_percent)
            for fs in cm.get_fsv_prod_list()
        ]
        cm = cm_orig.take_feature_matches(tophalf_indicies, keepscores=True)

        assert np.all(cm_orig.daid_list.take(tp_idxs) == cm.daid_list.take(tp_idxs))
        assert np.all(cm_orig.daid_list.take(tn_idxs) == cm.daid_list.take(tn_idxs))
        #cm.assert_self(qreq_)

    tp_fsv = np.vstack(ut.take(cm.fsv_list, tp_idxs))
    tn_fsv = np.vstack(ut.take(cm.fsv_list, tn_idxs))
    return tp_fsv, tn_fsv
Ejemplo n.º 3
0
def get_training_desc_dist(cm, qreq_, fsv_col_lbls=[], namemode=True,
                           top_percent=None, data_annots=None,
                           query_annots=None, num=None):
    r"""
    computes custom distances on prematched descriptors

    SeeAlso:
        python -m ibeis --tf learn_featscore_normalizer --show --disttype=ratio

        python -m ibeis --tf learn_featscore_normalizer --show --disttype=normdist -a timectrl -t default:K=1 --db PZ_Master1 --save pzmaster_normdist.png
        python -m ibeis --tf learn_featscore_normalizer --show --disttype=normdist -a timectrl -t default:K=1 --db PZ_MTEST --save pzmtest_normdist.png
        python -m ibeis --tf learn_featscore_normalizer --show --disttype=normdist -a timectrl -t default:K=1 --db GZ_ALL

        python -m ibeis --tf learn_featscore_normalizer --show --disttype=L2_sift -a timectrl -t default:K=1 --db PZ_MTEST
        python -m ibeis --tf learn_featscore_normalizer --show --disttype=L2_sift -a timectrl -t default:K=1 --db PZ_Master1

        python -m ibeis --tf compare_featscores --show --disttype=L2_sift,normdist -a timectrl -t default:K=1 --db GZ_ALL

    CommandLine:
        python -m ibeis.algo.hots.scorenorm --exec-get_training_desc_dist
        python -m ibeis.algo.hots.scorenorm --exec-get_training_desc_dist:1

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.scorenorm import *  # NOQA
        >>> import ibeis
        >>> cm, qreq_ = ibeis.testdata_cm(defaultdb='PZ_MTEST')
        >>> fsv_col_lbls = ['ratio', 'lnbnn', 'L2_sift']
        >>> namemode = False
        >>> (tp_fsv, tn_fsv) = get_training_desc_dist(cm, qreq_, fsv_col_lbls,
        >>>                                           namemode=namemode)
        >>> result = ut.repr2((tp_fsv.T, tn_fsv.T), nl=1)
        >>> print(result)

    Example1:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.scorenorm import *  # NOQA
        >>> import ibeis
        >>> cm, qreq_ = ibeis.testdata_cm(defaultdb='PZ_MTEST')
        >>> fsv_col_lbls = cm.fsv_col_lbls
        >>> num = None
        >>> namemode = False
        >>> top_percent = None
        >>> data_annots = None
        >>> (tp_fsv1, tn_fsv1) = get_training_fsv(cm, namemode=namemode,
        >>>                                       top_percent=top_percent)
        >>> (tp_fsv, tn_fsv) = get_training_desc_dist(cm, qreq_, fsv_col_lbls,
        >>>                                           namemode=namemode,
        >>>                                           top_percent=top_percent)
        >>> vt.asserteq(tp_fsv1, tp_fsv)
        >>> vt.asserteq(tn_fsv1, tn_fsv)
    """
    if namemode:
        tp_idxs, tn_idxs = get_topname_training_idxs(cm, num=num)
    else:
        tp_idxs, tn_idxs = get_topannot_training_idxs(cm, num=num)

    if top_percent is not None:
        cm_orig = cm
        cm_orig.assert_self(qreq_, verbose=False)

        # Keep only the top scoring half of the feature matches
        tophalf_indicies = [
            ut.take_percentile(fs.argsort()[::-1], top_percent)
            for fs in cm.get_fsv_prod_list()
        ]
        cm = cm_orig.take_feature_matches(tophalf_indicies, keepscores=True)

        assert np.all(cm_orig.daid_list.take(tp_idxs) == cm.daid_list.take(tp_idxs))
        assert np.all(cm_orig.daid_list.take(tn_idxs) == cm.daid_list.take(tn_idxs))

        cm.assert_self(qreq_, verbose=False)

    ibs = qreq_.ibs
    query_config2_ = qreq_.extern_query_config2
    data_config2_ = qreq_.extern_data_config2
    special_xs, dist_xs = vt.index_partition(fsv_col_lbls, ['fg', 'ratio', 'lnbnn', 'normdist'])
    dist_lbls = ut.take(fsv_col_lbls, dist_xs)
    special_lbls = ut.take(fsv_col_lbls, special_xs)

    qaid = cm.qaid
    # cm.assert_self(qreq_=qreq_)

    fsv_list = []
    for idxs in [tp_idxs, tn_idxs]:
        daid_list = cm.daid_list.take(idxs)

        # Matching indices in query / databas images
        qfxs_list = ut.take(cm.qfxs_list, idxs)
        dfxs_list = ut.take(cm.dfxs_list, idxs)

        need_norm = len(ut.setintersect_ordered(['ratio', 'lnbnn', 'normdist'], special_lbls)) > 0
        #need_norm |= 'parzen' in special_lbls
        #need_norm |= 'norm_parzen' in special_lbls
        need_dists = len(dist_xs) > 0

        if need_dists or need_norm:
            qaid_list = [qaid] * len(qfxs_list)
            qvecs_flat_m = np.vstack(ibs.get_annot_vecs_subset(qaid_list, qfxs_list, config2_=query_config2_))
            dvecs_flat_m = np.vstack(ibs.get_annot_vecs_subset(daid_list, dfxs_list, config2_=data_config2_))

        if need_norm:
            assert any(x is not None for x in  cm.filtnorm_aids), 'no normalizer known'
            naids_list = ut.take(cm.naids_list, idxs)
            nfxs_list  = ut.take(cm.nfxs_list, idxs)
            nvecs_flat = ibs.lookup_annot_vecs_subset(naids_list, nfxs_list, config2_=data_config2_,
                                                      annots=data_annots)
            #import utool
            #with utool.embed_on_exception_context:
            #nvecs_flat_m = np.vstack(ut.compress(nvecs_flat, nvecs_flat))
            _nvecs_flat_m = ut.compress(nvecs_flat, nvecs_flat)
            nvecs_flat_m = vt.safe_vstack(_nvecs_flat_m, qvecs_flat_m.shape, qvecs_flat_m.dtype)

            vdist = vt.L2_sift(qvecs_flat_m, dvecs_flat_m)
            ndist = vt.L2_sift(qvecs_flat_m, nvecs_flat_m)

            #assert np.all(vdist <= ndist)
            #import utool
            #utool.embed()

            #vdist = vt.L2_sift_sqrd(qvecs_flat_m, dvecs_flat_m)
            #ndist = vt.L2_sift_sqrd(qvecs_flat_m, nvecs_flat_m)

            #vdist = vt.L2_root_sift(qvecs_flat_m, dvecs_flat_m)
            #ndist = vt.L2_root_sift(qvecs_flat_m, nvecs_flat_m)

            #x = cm.fsv_list[0][0:5].T[0]
            #y = (ndist - vdist)[0:5]

        if len(special_xs) > 0:
            special_dist_list = []
            # assert special_lbls[0] == 'fg'
            if 'fg' in special_lbls:
                # hack for fgweights (could get them directly from fsv)
                qfgweights_flat_m = np.hstack(ibs.get_annot_fgweights_subset([qaid] * len(qfxs_list), qfxs_list, config2_=query_config2_))
                dfgweights_flat_m = np.hstack(ibs.get_annot_fgweights_subset(daid_list, dfxs_list, config2_=data_config2_))
                fgweights = np.sqrt(qfgweights_flat_m * dfgweights_flat_m)
                special_dist_list.append(fgweights)

            if 'ratio' in special_lbls:
                # Integrating ratio test
                ratio_dist = (vdist / ndist)
                special_dist_list.append(ratio_dist)

            if 'lnbnn' in special_lbls:
                lnbnn_dist = ndist - vdist
                special_dist_list.append(lnbnn_dist)

            #if 'parzen' in special_lbls:
            #    parzen = vt.gauss_parzen_est(vdist, sigma=.38)
            #    special_dist_list.append(parzen)

            #if 'norm_parzen' in special_lbls:
            #    parzen = vt.gauss_parzen_est(ndist, sigma=.38)
            #    special_dist_list.append(parzen)

            if 'normdist' in special_lbls:
                special_dist_list.append(ndist)

            special_dists = np.vstack(special_dist_list).T
        else:
            special_dists = np.empty((0, 0))

        if len(dist_xs) > 0:
            # Get descriptors
            # Compute descriptor distnaces
            _dists = vt.compute_distances(qvecs_flat_m, dvecs_flat_m, dist_lbls)
            dists = np.vstack(_dists.values()).T
        else:
            dists = np.empty((0, 0))

        fsv = vt.rebuild_partition(special_dists.T, dists.T,
                                      special_xs, dist_xs)
        fsv = np.array(fsv).T
        fsv_list.append(fsv)
    tp_fsv, tn_fsv = fsv_list
    return tp_fsv, tn_fsv
Ejemplo n.º 4
0
def vsone_feature_matching(kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, cfgdict={},
                           flann1=None, flann2=None, verbose=None):
    r"""
    Actual logic for matching
    Args:
        vecs1 (ndarray[uint8_t, ndim=2]): SIFT descriptors
        vecs2 (ndarray[uint8_t, ndim=2]): SIFT descriptors
        kpts1 (ndarray[float32_t, ndim=2]):  keypoints
        kpts2 (ndarray[float32_t, ndim=2]):  keypoints

    Ignore:
        >>> from vtool.matching import *  # NOQA
        %pylab qt4
        import plottool as pt
        pt.imshow(rchip1)
        pt.draw_kpts2(kpts1)

        pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs)
        pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs)
    """
    import vtool as vt
    import pyflann
    from vtool import spatial_verification as sver
    #import vtool as vt
    sver_xy_thresh = cfgdict.get('sver_xy_thresh', .01)
    ratio_thresh   = cfgdict.get('ratio_thresh', .625)
    refine_method  = cfgdict.get('refine_method', 'homog')
    symmetric      = cfgdict.get('symmetric', False)
    K              = cfgdict.get('K', 1)
    Knorm          = cfgdict.get('Knorm', 1)
    #ratio_thresh =  .99
    # GET NEAREST NEIGHBORS
    checks = 800
    #pseudo_max_dist_sqrd = (np.sqrt(2) * 512) ** 2
    #pseudo_max_dist_sqrd = 2 * (512 ** 2)
    if verbose is None:
        verbose = True

    flann_params = {'algorithm': 'kdtree', 'trees': 8}
    if flann1 is None:
        flann1 = vt.flann_cache(vecs1, flann_params=flann_params, verbose=verbose)

    #print('symmetric = %r' % (symmetric,))
    if symmetric:
        if flann2 is None:
            flann2 = vt.flann_cache(vecs2, flann_params=flann_params, verbose=verbose)

    try:
        try:
            num_neighbors = K + Knorm
            fx2_to_fx1, fx2_to_dist = normalized_nearest_neighbors(flann1, vecs2, num_neighbors, checks)
            #fx2_to_fx1, _fx2_to_dist = flann1.nn_index(vecs2, num_neighbors=K, checks=checks)
            if symmetric:
                fx1_to_fx2, fx1_to_dist = normalized_nearest_neighbors(flann2, vecs1, K, checks)

        except pyflann.FLANNException:
            print('vecs1.shape = %r' % (vecs1.shape,))
            print('vecs2.shape = %r' % (vecs2.shape,))
            print('vecs1.dtype = %r' % (vecs1.dtype,))
            print('vecs2.dtype = %r' % (vecs2.dtype,))
            raise
        if symmetric:
            is_symmetric = flag_symmetric_matches(fx2_to_fx1, fx1_to_fx2)
            fx2_to_fx1 = fx2_to_fx1.compress(is_symmetric, axis=0)
            fx2_to_dist = fx2_to_dist.compress(is_symmetric, axis=0)

        assigntup = assign_unconstrained_matches(fx2_to_fx1, fx2_to_dist)

        fx2_match, fx1_match, fx1_norm, match_dist, norm_dist = assigntup
        fm_ORIG = np.vstack((fx1_match, fx2_match)).T
        fs_ORIG = 1 - np.divide(match_dist, norm_dist)
        # APPLY RATIO TEST
        fm_RAT, fs_RAT, fm_norm_RAT = ratio_test(fx2_match, fx1_match, fx1_norm,
                                                 match_dist, norm_dist,
                                                 ratio_thresh)

        # SPATIAL VERIFICATION FILTER
        #with ut.EmbedOnException():
        match_weights = np.ones(len(fm_RAT))
        svtup = sver.spatially_verify_kpts(kpts1, kpts2, fm_RAT, sver_xy_thresh,
                                           dlen_sqrd2, match_weights=match_weights,
                                           refine_method=refine_method)
        if svtup is not None:
            (homog_inliers, homog_errors, H_RAT) = svtup[0:3]
        else:
            H_RAT = np.eye(3)
            homog_inliers = []
        fm_RAT_SV = fm_RAT.take(homog_inliers, axis=0)
        fs_RAT_SV = fs_RAT.take(homog_inliers, axis=0)
        fm_norm_RAT_SV = fm_norm_RAT[homog_inliers]

        top_percent = .5
        top_idx = ut.take_percentile(fx2_to_dist.T[0].argsort(), top_percent)
        fm_TOP = fm_ORIG.take(top_idx, axis=0)
        fs_TOP = fx2_to_dist.T[0].take(top_idx)
        #match_weights = np.ones(len(fm_TOP))
        #match_weights = (np.exp(fs_TOP) / np.sqrt(np.pi * 2))
        match_weights = 1 - fs_TOP
        #match_weights = np.ones(len(fm_TOP))
        svtup = sver.spatially_verify_kpts(kpts1, kpts2, fm_TOP, sver_xy_thresh,
                                           dlen_sqrd2, match_weights=match_weights,
                                           refine_method=refine_method)
        if svtup is not None:
            (homog_inliers, homog_errors, H_TOP) = svtup[0:3]
            np.sqrt(homog_errors[0] / dlen_sqrd2)
        else:
            H_TOP = np.eye(3)
            homog_inliers = []
        fm_TOP_SV = fm_TOP.take(homog_inliers, axis=0)
        fs_TOP_SV = fs_TOP.take(homog_inliers, axis=0)

        matches = {
            'ORIG'   : MatchTup2(fm_ORIG, fs_ORIG),
            'RAT'    : MatchTup3(fm_RAT, fs_RAT, fm_norm_RAT),
            'RAT+SV' : MatchTup3(fm_RAT_SV, fs_RAT_SV, fm_norm_RAT_SV),
            'TOP'    : MatchTup2(fm_TOP, fs_TOP),
            'TOP+SV' : MatchTup2(fm_TOP_SV, fs_TOP_SV),
        }
        output_metdata = {
            'H_RAT': H_RAT,
            'H_TOP': H_TOP,
        }

    except MatchingError:
        fm_ERR = np.empty((0, 2), dtype=np.int32)
        fs_ERR = np.empty((0, 1), dtype=np.float32)
        H_ERR = np.eye(3)
        matches = {
            'ORIG'   : MatchTup2(fm_ERR, fs_ERR),
            'RAT'    : MatchTup3(fm_ERR, fs_ERR, fm_ERR),
            'RAT+SV' : MatchTup3(fm_ERR, fs_ERR, fm_ERR),
            'TOP'    : MatchTup2(fm_ERR, fs_ERR),
            'TOP+SV' : MatchTup2(fm_ERR, fs_ERR),
        }
        output_metdata = {
            'H_RAT': H_ERR,
            'H_TOP': H_ERR,
        }

    return matches, output_metdata
Ejemplo n.º 5
0
def vsone_feature_matching(kpts1,
                           vecs1,
                           kpts2,
                           vecs2,
                           dlen_sqrd2,
                           cfgdict={},
                           flann1=None,
                           flann2=None,
                           verbose=None):
    r"""
    Actual logic for matching
    Args:
        vecs1 (ndarray[uint8_t, ndim=2]): SIFT descriptors
        vecs2 (ndarray[uint8_t, ndim=2]): SIFT descriptors
        kpts1 (ndarray[float32_t, ndim=2]):  keypoints
        kpts2 (ndarray[float32_t, ndim=2]):  keypoints

    Ignore:
        >>> from vtool.matching import *  # NOQA
        %pylab qt4
        import plottool as pt
        pt.imshow(rchip1)
        pt.draw_kpts2(kpts1)

        pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs)
        pt.show_chipmatch2(rchip1, rchip2, kpts1, kpts2, fm=fm, fs=fs)
    """
    import vtool as vt
    import pyflann
    from vtool import spatial_verification as sver
    #import vtool as vt
    sver_xy_thresh = cfgdict.get('sver_xy_thresh', .01)
    ratio_thresh = cfgdict.get('ratio_thresh', .625)
    refine_method = cfgdict.get('refine_method', 'homog')
    symmetric = cfgdict.get('symmetric', False)
    K = cfgdict.get('K', 1)
    Knorm = cfgdict.get('Knorm', 1)
    #ratio_thresh =  .99
    # GET NEAREST NEIGHBORS
    checks = 800
    #pseudo_max_dist_sqrd = (np.sqrt(2) * 512) ** 2
    #pseudo_max_dist_sqrd = 2 * (512 ** 2)
    if verbose is None:
        verbose = True

    flann_params = {'algorithm': 'kdtree', 'trees': 8}
    if flann1 is None:
        flann1 = vt.flann_cache(vecs1,
                                flann_params=flann_params,
                                verbose=verbose)

    #print('symmetric = %r' % (symmetric,))
    if symmetric:
        if flann2 is None:
            flann2 = vt.flann_cache(vecs2,
                                    flann_params=flann_params,
                                    verbose=verbose)

    try:
        try:
            num_neighbors = K + Knorm
            fx2_to_fx1, fx2_to_dist = normalized_nearest_neighbors(
                flann1, vecs2, num_neighbors, checks)
            #fx2_to_fx1, _fx2_to_dist = flann1.nn_index(vecs2, num_neighbors=K, checks=checks)
            if symmetric:
                fx1_to_fx2, fx1_to_dist = normalized_nearest_neighbors(
                    flann2, vecs1, K, checks)

        except pyflann.FLANNException:
            print('vecs1.shape = %r' % (vecs1.shape, ))
            print('vecs2.shape = %r' % (vecs2.shape, ))
            print('vecs1.dtype = %r' % (vecs1.dtype, ))
            print('vecs2.dtype = %r' % (vecs2.dtype, ))
            raise
        if symmetric:
            is_symmetric = flag_symmetric_matches(fx2_to_fx1, fx1_to_fx2)
            fx2_to_fx1 = fx2_to_fx1.compress(is_symmetric, axis=0)
            fx2_to_dist = fx2_to_dist.compress(is_symmetric, axis=0)

        assigntup = assign_unconstrained_matches(fx2_to_fx1, fx2_to_dist)

        fx2_match, fx1_match, fx1_norm, match_dist, norm_dist = assigntup
        fm_ORIG = np.vstack((fx1_match, fx2_match)).T
        fs_ORIG = 1 - np.divide(match_dist, norm_dist)
        # APPLY RATIO TEST
        fm_RAT, fs_RAT, fm_norm_RAT = ratio_test(fx2_match, fx1_match,
                                                 fx1_norm, match_dist,
                                                 norm_dist, ratio_thresh)

        # SPATIAL VERIFICATION FILTER
        #with ut.EmbedOnException():
        match_weights = np.ones(len(fm_RAT))
        svtup = sver.spatially_verify_kpts(kpts1,
                                           kpts2,
                                           fm_RAT,
                                           sver_xy_thresh,
                                           dlen_sqrd2,
                                           match_weights=match_weights,
                                           refine_method=refine_method)
        if svtup is not None:
            (homog_inliers, homog_errors, H_RAT) = svtup[0:3]
        else:
            H_RAT = np.eye(3)
            homog_inliers = []
        fm_RAT_SV = fm_RAT.take(homog_inliers, axis=0)
        fs_RAT_SV = fs_RAT.take(homog_inliers, axis=0)
        fm_norm_RAT_SV = fm_norm_RAT[homog_inliers]

        top_percent = .5
        top_idx = ut.take_percentile(fx2_to_dist.T[0].argsort(), top_percent)
        fm_TOP = fm_ORIG.take(top_idx, axis=0)
        fs_TOP = fx2_to_dist.T[0].take(top_idx)
        #match_weights = np.ones(len(fm_TOP))
        #match_weights = (np.exp(fs_TOP) / np.sqrt(np.pi * 2))
        match_weights = 1 - fs_TOP
        #match_weights = np.ones(len(fm_TOP))
        svtup = sver.spatially_verify_kpts(kpts1,
                                           kpts2,
                                           fm_TOP,
                                           sver_xy_thresh,
                                           dlen_sqrd2,
                                           match_weights=match_weights,
                                           refine_method=refine_method)
        if svtup is not None:
            (homog_inliers, homog_errors, H_TOP) = svtup[0:3]
            np.sqrt(homog_errors[0] / dlen_sqrd2)
        else:
            H_TOP = np.eye(3)
            homog_inliers = []
        fm_TOP_SV = fm_TOP.take(homog_inliers, axis=0)
        fs_TOP_SV = fs_TOP.take(homog_inliers, axis=0)

        matches = {
            'ORIG': MatchTup2(fm_ORIG, fs_ORIG),
            'RAT': MatchTup3(fm_RAT, fs_RAT, fm_norm_RAT),
            'RAT+SV': MatchTup3(fm_RAT_SV, fs_RAT_SV, fm_norm_RAT_SV),
            'TOP': MatchTup2(fm_TOP, fs_TOP),
            'TOP+SV': MatchTup2(fm_TOP_SV, fs_TOP_SV),
        }
        output_metdata = {
            'H_RAT': H_RAT,
            'H_TOP': H_TOP,
        }

    except MatchingError:
        fm_ERR = np.empty((0, 2), dtype=np.int32)
        fs_ERR = np.empty((0, 1), dtype=np.float32)
        H_ERR = np.eye(3)
        matches = {
            'ORIG': MatchTup2(fm_ERR, fs_ERR),
            'RAT': MatchTup3(fm_ERR, fs_ERR, fm_ERR),
            'RAT+SV': MatchTup3(fm_ERR, fs_ERR, fm_ERR),
            'TOP': MatchTup2(fm_ERR, fs_ERR),
            'TOP+SV': MatchTup2(fm_ERR, fs_ERR),
        }
        output_metdata = {
            'H_RAT': H_ERR,
            'H_TOP': H_ERR,
        }

    return matches, output_metdata