Exemple #1
0
def get_namescore_nonvoting_feature_flags(fm_list, fs_list, dnid_list, name_groupxs, kpts1=None):
    r"""
    fm_list = [fm[:min(len(fm), 10)] for fm in fm_list]
    fs_list = [fs[:min(len(fs), 10)] for fs in fs_list]
    """
    fx1_list = [fm.T[0] for fm in fm_list]
    # Group annotation matches by name
    name_grouped_fx1_list = vt.apply_grouping_(fx1_list, name_groupxs)
    name_grouped_fs_list  = vt.apply_grouping_(fs_list,  name_groupxs)
    # Stack up all matches to a particular name, keep track of original indicies via offets
    name_invertable_flat_fx1_list = list(map(ut.invertible_flatten2_numpy, name_grouped_fx1_list))
    name_grouped_fx1_flat = ut.get_list_column(name_invertable_flat_fx1_list, 0)
    name_grouped_invertable_cumsum_list = ut.get_list_column(name_invertable_flat_fx1_list, 1)
    name_grouped_fs_flat = list(map(np.hstack, name_grouped_fs_list))
    if kpts1 is not None:
        xys1_ = vt.get_xys(kpts1).T
        kpts_xyid_list = vt.compute_unique_data_ids(xys1_)
        # Make nested group for every name by query feature index (accounting for duplicate orientation)
        name_grouped_xyid_flat = list(kpts_xyid_list.take(fx1) for fx1 in name_grouped_fx1_flat)
        xyid_groupxs_list = list(vt.group_indices(xyid_flat)[1] for xyid_flat in name_grouped_xyid_flat)
        name_group_fx1_groupxs_list = xyid_groupxs_list
    else:
        # Make nested group for every name by query feature index
        fx1_groupxs_list = [vt.group_indices(fx1_flat)[1] for fx1_flat in name_grouped_fx1_flat]
        name_group_fx1_groupxs_list = fx1_groupxs_list
    name_grouped_fid_grouped_fs_list = [
        vt.apply_grouping(fs_flat, fid_groupxs)
        for fs_flat, fid_groupxs in zip(name_grouped_fs_flat, name_group_fx1_groupxs_list)
    ]

    # Flag which features are valid in this grouped space. Only one keypoint should be able to vote
    # for each group
    name_grouped_fid_grouped_isvalid_list = [
        np.array([fs_group.max() == fs_group for fs_group in fid_grouped_fs_list])
        for fid_grouped_fs_list in name_grouped_fid_grouped_fs_list
    ]

    # Go back to being grouped only in name space
    #dtype = np.bool
    name_grouped_isvalid_flat_list = [
        vt.invert_apply_grouping2(fid_grouped_isvalid_list, fid_groupxs, dtype=np.bool)
        for fid_grouped_isvalid_list, fid_groupxs in zip(name_grouped_fid_grouped_isvalid_list, name_group_fx1_groupxs_list)
    ]

    name_grouped_isvalid_unflat_list = [
        ut.unflatten2(isvalid_flat, invertable_cumsum_list)
        for isvalid_flat, invertable_cumsum_list in zip(name_grouped_isvalid_flat_list, name_grouped_invertable_cumsum_list)
    ]

    # Reports which features were valid in name scoring for every annotation
    featflag_list = vt.invert_apply_grouping(name_grouped_isvalid_unflat_list, name_groupxs)
    return featflag_list
Exemple #2
0
def sparse_grid_coverage(kpts, chipsize, weights, pxl_per_bin=.3, grid_steps=1, grid_sigma=1.6):
    r"""
    Args:
        kpts (ndarray[float32_t, ndim=2]):  keypoint
        chipsize (tuple):
        weights (ndarray):

    CommandLine:
        python -m vtool.coverage_grid --test-sparse_grid_coverage --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from vtool.coverage_grid import *  # NOQA
        >>> kpts, chipsize, weights = coverage_kpts.testdata_coverage()
        >>> chipsize = (chipsize[0] + 50, chipsize[1])
        >>> pxl_per_bin = 3
        >>> grid_steps = 2
        >>> grid_sigma = 1.6
        >>> coverage_gridtup = sparse_grid_coverage(kpts, chipsize, weights, pxl_per_bin, grid_steps, grid_sigma)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> show_coverage_grid(*coverage_gridtup)
        >>> pt.show_if_requested()
    """
    import vtool as vt
    # Compute grid size and stride
    chip_w, chip_h = chipsize
    # find enough rows to fit pxl_per_bin pixels into a grid dimension
    num_rows = max(vt.iround(chip_h / pxl_per_bin), 1)
    num_cols = max(vt.iround(chip_w / pxl_per_bin), 1)
    # stride is roughly equal in each direction, depending on rounding errors
    chipstride = np.array((chip_w / num_cols, chip_h / num_rows))
    # Find keypoint subbin locations relative to edge
    xy_arr = vt.get_xys(kpts)
    subbin_xy_arr = np.divide(xy_arr, chipstride[:, None])
    # Find subbin locations relative to center
    frac_subbin_index = np.subtract(subbin_xy_arr, .5)
    neighbor_bin_xy_indices = get_subbin_xy_neighbors(frac_subbin_index, grid_steps, num_cols, num_rows)
    # Find center
    neighbor_bin_centers = np.add(neighbor_bin_xy_indices, .5)
    # compute distance to neighbor
    neighbor_subbin_sqrddist_arr = compute_subbin_to_bins_dist(neighbor_bin_centers, subbin_xy_arr)
    # scale weights using guassia falloff
    neighbor_bin_weights = weighted_gaussian_falloff(neighbor_subbin_sqrddist_arr, weights, grid_sigma)
    # convert to rowcol
    neighbor_bin_indices = neighbor_bin_rc_indices = neighbor_bin_xy_indices[:, :, ::-1]  # NOQA

    coverage_gridtup = (
        num_rows, num_cols, subbin_xy_arr, neighbor_bin_centers,
        neighbor_bin_weights, neighbor_bin_indices)
    return coverage_gridtup
Exemple #3
0
def warp_patch_onto_kpts(
        kpts, patch, chipshape,
        weights=None,
        out=None,
        cov_scale_factor=.2,
        cov_agg_mode='max',
        cov_remove_shape=False,
        cov_remove_scale=False,
        cov_size_penalty_on=True,
        cov_size_penalty_power=.5,
        cov_size_penalty_frac=.1):
    r"""
    Overlays the source image onto a destination image in each keypoint location

    Args:
        kpts (ndarray[float32_t, ndim=2]):  keypoints
        patch (ndarray): patch to warp (like gaussian)
        chipshape (tuple):
        weights (ndarray): score for every keypoint

    Kwargs:
        cov_scale_factor (float):

    Returns:
        ndarray: mask

    CommandLine:
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show --hole
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show --square
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show --square --hole

    Example:
        >>> # ENABLE_DOCTEST
        >>> from vtool.coverage_kpts import *  # NOQA
        >>> import vtool as vt
        >>> import pyhesaff
        >>> img_fpath    = ut.grab_test_imgpath('carl.jpg')
        >>> (kpts, vecs) = pyhesaff.detect_feats(img_fpath)
        >>> kpts = kpts[::15]
        >>> chip = vt.imread(img_fpath)
        >>> chipshape = chip.shape
        >>> weights = np.ones(len(kpts))
        >>> cov_scale_factor = 1.0
        >>> srcshape = (19, 19)
        >>> radius = srcshape[0] / 2.0
        >>> sigma = 0.4 * radius
        >>> SQUARE = ut.get_argflag('--square')
        >>> HOLE = ut.get_argflag('--hole')
        >>> if SQUARE:
        >>>     patch = np.ones(srcshape)
        >>> else:
        >>>     patch = ptool.gaussian_patch(shape=srcshape, sigma=sigma) #, norm_01=False)
        >>>     patch = patch / patch.max()
        >>> if HOLE:
        >>>     patch[int(patch.shape[0] / 2), int(patch.shape[1] / 2)] = 0
        >>> # execute function
        >>> dstimg = warp_patch_onto_kpts(kpts, patch, chipshape, weights, cov_scale_factor=cov_scale_factor)
        >>> # verify results
        >>> print('dstimg stats %r' % (ut.get_stats_str(dstimg, axis=None)),)
        >>> print('patch stats %r' % (ut.get_stats_str(patch, axis=None)),)
        >>> #print(patch.sum())
        >>> assert np.all(ut.inbounds(dstimg, 0, 1, eq=True))
        >>> # show results
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> mask = dstimg
        >>> show_coverage_map(chip, mask, patch, kpts)
        >>> pt.show_if_requested()
    """
    import vtool as vt
    #if len(kpts) == 0:
    #    return None
    chip_scale_h = int(np.ceil(chipshape[0] * cov_scale_factor))
    chip_scale_w = int(np.ceil(chipshape[1] * cov_scale_factor))
    if len(kpts) == 0:
        dstimg =  np.zeros((chip_scale_h, chip_scale_w))
        return dstimg
    if weights is None:
        weights = np.ones(len(kpts))
    dsize = (chip_scale_w, chip_scale_h)
    # Allocate destination image
    patch_shape = patch.shape
    # Scale keypoints into destination image
    # <HACK>
    if cov_remove_shape:
        # disregard affine information in keypoints
        # i still dont understand why we are trying this
        (patch_h, patch_w) = patch_shape
        half_width  = (patch_w / 2.0)  # - .5
        half_height = (patch_h / 2.0)  # - .5
        # Center src image
        T1 = vt.translation_mat3x3(-half_width + .5, -half_height + .5)
        # Scale src to the unit circle
        if not cov_remove_scale:
            S1 = vt.scale_mat3x3(1.0 / half_width, 1.0 / half_height)
        # Transform the source image to the keypoint ellipse
        kpts_T = np.array([vt.translation_mat3x3(x, y) for (x, y) in vt.get_xys(kpts).T])
        if not cov_remove_scale:
            kpts_S = np.array([vt.scale_mat3x3(np.sqrt(scale))
                               for scale in vt.get_scales(kpts).T])
        # Adjust for the requested scale factor
        S2 = vt.scale_mat3x3(cov_scale_factor, cov_scale_factor)
        #perspective_list = [S2.dot(A).dot(S1).dot(T1) for A in invVR_aff2Ds]
        if not cov_remove_scale:
            M_list = reduce(vt.matrix_multiply, (S2, kpts_T, kpts_S, S1, T1))
        else:
            M_list = reduce(vt.matrix_multiply, (S2, kpts_T, T1))
    # </HACK>
    else:
        M_list = ktool.get_transforms_from_patch_image_kpts(kpts, patch_shape,
                                                            cov_scale_factor)
    affmat_list = M_list[:, 0:2, :]
    weight_list = weights
    # For each keypoint warp a gaussian scaled by the feature score into the image
    warped_patch_iter = warped_patch_generator(
        patch, dsize, affmat_list, weight_list,
        cov_size_penalty_on=cov_size_penalty_on,
        cov_size_penalty_power=cov_size_penalty_power,
        cov_size_penalty_frac=cov_size_penalty_frac)
    # Either max or sum
    if cov_agg_mode == 'max':
        dstimg = vt.iter_reduce_ufunc(np.maximum, warped_patch_iter, out=out)
    elif cov_agg_mode == 'sum':
        dstimg = vt.iter_reduce_ufunc(np.add, warped_patch_iter, out=out)
        # HACK FOR SUM: DO NOT DO THIS FOR MAX
        dstimg[dstimg > 1.0] = 1.0
    else:
        raise AssertionError('Unknown cov_agg_mode=%r' % (cov_agg_mode,))
    return dstimg
Exemple #4
0
def compute_nsum_score(cm, qreq_=None):
    r"""
    nsum

    Args:
        cm (ibeis.ChipMatch):

    Returns:
        tuple: (unique_nids, nsum_score_list)

    CommandLine:
        python -m ibeis.algo.hots.name_scoring --test-compute_nsum_score
        python -m ibeis.algo.hots.name_scoring --test-compute_nsum_score:0
        python -m ibeis.algo.hots.name_scoring --test-compute_nsum_score:2
        utprof.py -m ibeis.algo.hots.name_scoring --test-compute_nsum_score:2
        utprof.py -m ibeis.algo.hots.pipeline --test-request_ibeis_query_L0:0 --db PZ_Master1 -a timectrl:qindex=0:256

    Example0:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.name_scoring import *  # NOQA
        >>> # build test data
        >>> cm = testdata_chipmatch()
        >>> # execute function
        >>> (unique_nids, nsum_score_list) = compute_nsum_score(cm)
        >>> result = ut.list_str((unique_nids, nsum_score_list), label_list=['unique_nids', 'nsum_score_list'], with_dtype=False)
        >>> print(result)
        unique_nids = np.array([1, 2, 3])
        nsum_score_list = np.array([ 4.,  7.,  5.])

    Example1:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.name_scoring import *  # NOQA
        >>> #ibs, qreq_, cm_list = plh.testdata_pre_sver('testdb1', qaid_list=[1])
        >>> ibs, qreq_, cm_list = plh.testdata_post_sver('PZ_MTEST', qaid_list=[18])
        >>> cm = cm_list[0]
        >>> cm.evaluate_dnids(qreq_.ibs)
        >>> cm._cast_scores()
        >>> #cm.qnid = 1   # Hack for testdb1 names
        >>> nsum_nid_list, nsum_score_list = compute_nsum_score(cm, qreq_)
        >>> assert np.all(nsum_nid_list == cm.unique_nids), 'nids out of alignment'
        >>> flags = (nsum_nid_list == cm.qnid)
        >>> max_true = nsum_score_list[flags].max()
        >>> max_false = nsum_score_list[~flags].max()
        >>> assert max_true > max_false, 'is this truely a hard case?'
        >>> assert max_true > 1.2, 'score=%r should be higher for aid=18' % (max_true,)
        >>> nsum_nid_list2, nsum_score_list2, _ = compute_nsum_score2(cm, qreq_)
        >>> assert np.allclose(nsum_score_list2, nsum_score_list), 'something is very wrong'
        >>> #assert np.all(nsum_score_list2 == nsum_score_list), 'could be a percision issue'

    Example2:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.name_scoring import *  # NOQA
        >>> #ibs, qreq_, cm_list = plh.testdata_pre_sver('testdb1', qaid_list=[1])
        >>> ibs, qreq_, cm_list = plh.testdata_post_sver('PZ_MTEST', qaid_list=[18], cfgdict=dict(augment_queryside_hack=True))
        >>> cm = cm_list[0]
        >>> cm.score_nsum(qreq_)
        >>> #cm.evaluate_dnids(qreq_.ibs)
        >>> #cm.qnid = 1   # Hack for testdb1 names
        >>> #nsum_nid_list, nsum_score_list = compute_nsum_score(cm, qreq_=qreq_)
        >>> ut.quit_if_noshow()
        >>> cm.show_ranked_matches(qreq_, ori=True)

    Example3:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.name_scoring import *  # NOQA
        >>> #ibs, qreq_, cm_list = plh.testdata_pre_sver('testdb1', qaid_list=[1])
        >>> ibs, qreq_, cm_list = plh.testdata_post_sver('testdb1', qaid_list=[1], cfgdict=dict(augment_queryside_hack=True))
        >>> cm = cm_list[0]
        >>> cm.score_nsum(qreq_)
        >>> #cm.evaluate_dnids(qreq_.ibs)
        >>> #cm.qnid = 1   # Hack for testdb1 names
        >>> #nsum_nid_list, nsum_score_list = compute_nsum_score(cm, qreq_=qreq_)
        >>> ut.quit_if_noshow()
        >>> cm.show_ranked_matches(qreq_, ori=True)

    Example4:
        >>> # ENABLE_DOCTEST
        >>> # FIXME: breaks when fg_on=True
        >>> from ibeis.algo.hots.name_scoring import *  # NOQA
        >>> from ibeis.algo.hots import name_scoring
        >>> from ibeis.algo.hots import scoring
        >>> import ibeis
        >>> # Test to make sure name score and chips score are equal when per_name=1
        >>> qreq_, args = plh.testdata_pre(
        >>>     'spatial_verification', defaultdb='PZ_MTEST',
        >>>     a=['default:dpername=1,qsize=1,dsize=10'],
        >>>     p=['default:K=1,fg_on=True,sqrd_dist_on=True'])
        >>> cm = args.cm_list_FILT[0]
        >>> ibs = qreq_.ibs
        >>> # Ensure there is only one aid per database name
        >>> assert isinstance(ibs, ibeis.control.IBEISControl.IBEISController)
        >>> #stats_dict = ibs.get_annot_stats_dict(qreq_.get_external_daids(), prefix='d')
        >>> #stats = stats_dict['dper_name']
        >>> stats = ibs.get_annot_per_name_stats(qreq_.get_external_daids())
        >>> print('per_name_stats = %s' % (ut.dict_str(stats, nl=False),))
        >>> assert stats['mean'] == 1 and stats['std'] == 0, 'this test requires one annot per name in the database'
        >>> cm.evaluate_dnids(qreq_.ibs)
        >>> cm.assert_self(qreq_)
        >>> cm._cast_scores()
        >>> # cm.fs_list = cm.fs_list.astype(np.float)
        >>> nsum_nid_list, nsum_score_list = name_scoring.compute_nsum_score(cm, qreq_)
        >>> nsum_nid_list2, nsum_score_list2, _ = name_scoring.compute_nsum_score2(cm, qreq_)
        >>> csum_score_list = scoring.compute_csum_score(cm)
        >>> vt.asserteq(nsum_score_list, csum_score_list)
        >>> vt.asserteq(nsum_score_list, csum_score_list, thresh=0, iswarning=True)
        >>> vt.asserteq(nsum_score_list2, csum_score_list, thresh=0, iswarning=True)
        >>> #assert np.allclose(nsum_score_list, csum_score_list), 'should be the same when K=1 and per_name=1'
        >>> #assert all(nsum_score_list  == csum_score_list), 'should be the same when K=1 and per_name=1'
        >>> #assert all(nsum_score_list2 == csum_score_list), 'should be the same when K=1 and per_name=1'
        >>> # Evaluate parts of the sourcecode


    Ignore:
        assert all(nsum_score_list3 == csum_score_list), 'should be the same when K=1 and per_name=1'
        fm_list = fm_list[0:1]
        fs_list = fs_list[0:1]
        featflag_list2 = featflag_list2[0:1]
        dnid_list = dnid_list[0:1]
        name_groupxs2 = name_groupxs2[0:1]
        nsum_nid_list2 = nsum_nid_list2[0:1]

    """
    #assert qreq_ is not None
    try:
        HACK_SINGLE_ORI =  qreq_ is not None and (qreq_.qparams.augment_queryside_hack or qreq_.qparams.rotation_invariance)
    except AttributeError:
        HACK_SINGLE_ORI =  qreq_ is not None and (qreq_.config.augment_queryside_hack or qreq_.config.feat_cfg.rotation_invariance)
        pass
    # The core for each feature match
    #
    # The query feature index for each feature match
    fm_list = cm.fm_list
    fs_list = cm.get_fsv_prod_list()
    dnid_list = cm.dnid_list
    #--
    fx1_list = [fm.T[0] for fm in fm_list]
    """
    # Try a rebase?
    fx1_list = list(map(vt.compute_unique_data_ids_, fx1_list))
    """
    # Group annotation matches by name
    nsum_nid_list, name_groupxs = vt.group_indices(dnid_list)
    name_grouped_fx1_list = vt.apply_grouping_(fx1_list, name_groupxs)
    name_grouped_fs_list  = vt.apply_grouping_(fs_list,  name_groupxs)
    # Stack up all matches to a particular name
    name_grouped_fx1_flat = list(map(np.hstack, name_grouped_fx1_list))
    name_grouped_fs_flat  = list(map(np.hstack, name_grouped_fs_list))
    """
    assert np.all(name_grouped_fs_list[0][0] == fs_list[0])
    assert np.all(name_grouped_fs_flat[0] == fs_list[0])
    """
    if HACK_SINGLE_ORI:
        # keypoints with the same xy can only have one of them vote
        kpts1 = qreq_.ibs.get_annot_kpts(cm.qaid, config2_=qreq_.get_external_query_config2())
        xys1_ = vt.get_xys(kpts1).T
        kpts_xyid_list = vt.compute_unique_arr_dataids(xys1_)
        # Make nested group for every name by query feature index (accounting for duplicate orientation)
        name_grouped_xyid_flat = [kpts_xyid_list.take(fx1) for fx1 in name_grouped_fx1_flat]
        feat_groupxs_list = [vt.group_indices(xyid_flat)[1] for xyid_flat in name_grouped_xyid_flat]
    else:
        # make unique indicies using feature indexes
        feat_groupxs_list = [vt.group_indices(fx1_flat)[1] for fx1_flat in name_grouped_fx1_flat]
    # Make nested group for every name by unique query feature index
    feat_grouped_fs_list = [[fs_flat.take(xs, axis=0) for xs in feat_groupxs]
                            for fs_flat, feat_groupxs in zip(name_grouped_fs_flat, feat_groupxs_list)]
    """
    np.array(feat_grouped_fs_list)[0].T[0] == fs_list
    """
    if False:
        valid_fs_list = [
            np.array([group.max() for group in grouped_fs])
            #np.array([group[group.argmax()] for group in grouped_fs])
            for grouped_fs in feat_grouped_fs_list
        ]
        nsum_score_list4 = np.array([valid_fs.sum() for valid_fs in valid_fs_list])  # NOQA
    # Prevent a feature from voting twice:
    # take only the max score that a query feature produced
    #name_grouped_valid_fs_list1 =[np.array([fs_group.max() for fs_group in feat_grouped_fs])
    #                            for feat_grouped_fs in feat_grouped_fs_list]
    nsum_score_list = np.array([np.sum([fs_group.max() for fs_group in feat_grouped_fs])
                                for feat_grouped_fs in feat_grouped_fs_list])
    return nsum_nid_list, nsum_score_list
Exemple #5
0
def warp_patch_onto_kpts(kpts,
                         patch,
                         chipshape,
                         weights=None,
                         out=None,
                         cov_scale_factor=.2,
                         cov_agg_mode='max',
                         cov_remove_shape=False,
                         cov_remove_scale=False,
                         cov_size_penalty_on=True,
                         cov_size_penalty_power=.5,
                         cov_size_penalty_frac=.1):
    r"""
    Overlays the source image onto a destination image in each keypoint location

    Args:
        kpts (ndarray[float32_t, ndim=2]):  keypoints
        patch (ndarray): patch to warp (like gaussian)
        chipshape (tuple):
        weights (ndarray): score for every keypoint

    Kwargs:
        cov_scale_factor (float):

    Returns:
        ndarray: mask

    CommandLine:
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show --hole
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show --square
        python -m vtool.coverage_kpts --test-warp_patch_onto_kpts --show --square --hole

    Example:
        >>> # ENABLE_DOCTEST
        >>> from vtool.coverage_kpts import *  # NOQA
        >>> import vtool as vt
        >>> import pyhesaff
        >>> img_fpath    = ut.grab_test_imgpath('carl.jpg')
        >>> (kpts, vecs) = pyhesaff.detect_feats(img_fpath)
        >>> kpts = kpts[::15]
        >>> chip = vt.imread(img_fpath)
        >>> chipshape = chip.shape
        >>> weights = np.ones(len(kpts))
        >>> cov_scale_factor = 1.0
        >>> srcshape = (19, 19)
        >>> radius = srcshape[0] / 2.0
        >>> sigma = 0.4 * radius
        >>> SQUARE = ut.get_argflag('--square')
        >>> HOLE = ut.get_argflag('--hole')
        >>> if SQUARE:
        >>>     patch = np.ones(srcshape)
        >>> else:
        >>>     patch = ptool.gaussian_patch(shape=srcshape, sigma=sigma) #, norm_01=False)
        >>>     patch = patch / patch.max()
        >>> if HOLE:
        >>>     patch[int(patch.shape[0] / 2), int(patch.shape[1] / 2)] = 0
        >>> # execute function
        >>> dstimg = warp_patch_onto_kpts(kpts, patch, chipshape, weights, cov_scale_factor=cov_scale_factor)
        >>> # verify results
        >>> print('dstimg stats %r' % (ut.get_stats_str(dstimg, axis=None)),)
        >>> print('patch stats %r' % (ut.get_stats_str(patch, axis=None)),)
        >>> #print(patch.sum())
        >>> assert np.all(ut.inbounds(dstimg, 0, 1, eq=True))
        >>> # show results
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> mask = dstimg
        >>> show_coverage_map(chip, mask, patch, kpts)
        >>> pt.show_if_requested()
    """
    import vtool as vt
    #if len(kpts) == 0:
    #    return None
    chip_scale_h = int(np.ceil(chipshape[0] * cov_scale_factor))
    chip_scale_w = int(np.ceil(chipshape[1] * cov_scale_factor))
    if len(kpts) == 0:
        dstimg = np.zeros((chip_scale_h, chip_scale_w))
        return dstimg
    if weights is None:
        weights = np.ones(len(kpts))
    dsize = (chip_scale_w, chip_scale_h)
    # Allocate destination image
    patch_shape = patch.shape
    # Scale keypoints into destination image
    # <HACK>
    if cov_remove_shape:
        # disregard affine information in keypoints
        # i still dont understand why we are trying this
        (patch_h, patch_w) = patch_shape
        half_width = (patch_w / 2.0)  # - .5
        half_height = (patch_h / 2.0)  # - .5
        # Center src image
        T1 = vt.translation_mat3x3(-half_width + .5, -half_height + .5)
        # Scale src to the unit circle
        if not cov_remove_scale:
            S1 = vt.scale_mat3x3(1.0 / half_width, 1.0 / half_height)
        # Transform the source image to the keypoint ellipse
        kpts_T = np.array(
            [vt.translation_mat3x3(x, y) for (x, y) in vt.get_xys(kpts).T])
        if not cov_remove_scale:
            kpts_S = np.array([
                vt.scale_mat3x3(np.sqrt(scale))
                for scale in vt.get_scales(kpts).T
            ])
        # Adjust for the requested scale factor
        S2 = vt.scale_mat3x3(cov_scale_factor, cov_scale_factor)
        #perspective_list = [S2.dot(A).dot(S1).dot(T1) for A in invVR_aff2Ds]
        if not cov_remove_scale:
            M_list = reduce(vt.matrix_multiply, (S2, kpts_T, kpts_S, S1, T1))
        else:
            M_list = reduce(vt.matrix_multiply, (S2, kpts_T, T1))
    # </HACK>
    else:
        M_list = ktool.get_transforms_from_patch_image_kpts(
            kpts, patch_shape, cov_scale_factor)
    affmat_list = M_list[:, 0:2, :]
    weight_list = weights
    # For each keypoint warp a gaussian scaled by the feature score into the image
    warped_patch_iter = warped_patch_generator(
        patch,
        dsize,
        affmat_list,
        weight_list,
        cov_size_penalty_on=cov_size_penalty_on,
        cov_size_penalty_power=cov_size_penalty_power,
        cov_size_penalty_frac=cov_size_penalty_frac)
    # Either max or sum
    if cov_agg_mode == 'max':
        dstimg = vt.iter_reduce_ufunc(np.maximum, warped_patch_iter, out=out)
    elif cov_agg_mode == 'sum':
        dstimg = vt.iter_reduce_ufunc(np.add, warped_patch_iter, out=out)
        # HACK FOR SUM: DO NOT DO THIS FOR MAX
        dstimg[dstimg > 1.0] = 1.0
    else:
        raise AssertionError('Unknown cov_agg_mode=%r' % (cov_agg_mode, ))
    return dstimg
Exemple #6
0
def compute_fmech_score(cm, qreq_=None, hack_single_ori=False):
    r"""
    nsum. This is the fmech scoring mechanism.


    Args:
        cm (ibeis.ChipMatch):

    Returns:
        tuple: (unique_nids, nsum_score_list)

    CommandLine:
        python -m ibeis.algo.hots.name_scoring --test-compute_fmech_score
        python -m ibeis.algo.hots.name_scoring --test-compute_fmech_score:0
        python -m ibeis.algo.hots.name_scoring --test-compute_fmech_score:2
        utprof.py -m ibeis.algo.hots.name_scoring --test-compute_fmech_score:2
        utprof.py -m ibeis.algo.hots.pipeline --test-request_ibeis_query_L0:0 --db PZ_Master1 -a timectrl:qindex=0:256

    Example0:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.name_scoring import *  # NOQA
        >>> cm = testdata_chipmatch()
        >>> nsum_score_list = compute_fmech_score(cm)
        >>> assert np.all(nsum_score_list == [ 4.,  7.,  5.])

    Example1:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.name_scoring import *  # NOQA
        >>> ibs, qreq_, cm_list = plh.testdata_post_sver('PZ_MTEST', qaid_list=[18])
        >>> cm = cm_list[0]
        >>> cm.evaluate_dnids(qreq_)
        >>> cm._cast_scores()
        >>> #cm.qnid = 1   # Hack for testdb1 names
        >>> nsum_score_list = compute_fmech_score(cm, qreq_)
        >>> #assert np.all(nsum_nid_list == cm.unique_nids), 'nids out of alignment'
        >>> flags = (cm.unique_nids == cm.qnid)
        >>> max_true = nsum_score_list[flags].max()
        >>> max_false = nsum_score_list[~flags].max()
        >>> assert max_true > max_false, 'is this truely a hard case?'
        >>> assert max_true > 1.2, 'score=%r should be higher for aid=18' % (max_true,)

    Example2:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.name_scoring import *  # NOQA
        >>> ibs, qreq_, cm_list = plh.testdata_post_sver('PZ_MTEST', qaid_list=[18], cfgdict=dict(query_rotation_heuristic=True))
        >>> cm = cm_list[0]
        >>> cm.score_name_nsum(qreq_)
        >>> ut.quit_if_noshow()
        >>> cm.show_ranked_matches(qreq_, ori=True)

    Example3:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.name_scoring import *  # NOQA
        >>> #ibs, qreq_, cm_list = plh.testdata_pre_sver('testdb1', qaid_list=[1])
        >>> ibs, qreq_, cm_list = plh.testdata_post_sver('testdb1', qaid_list=[1], cfgdict=dict(query_rotation_heuristic=True))
        >>> cm = cm_list[0]
        >>> cm.score_name_nsum(qreq_)
        >>> ut.quit_if_noshow()
        >>> cm.show_ranked_matches(qreq_, ori=True)
    """
    #assert qreq_ is not None
    if hack_single_ori is None:
        try:
            hack_single_ori = qreq_ is not None and (
                qreq_.qparams.query_rotation_heuristic
                or qreq_.qparams.rotation_invariance)
        except AttributeError:
            hack_single_ori = True
    # The core for each feature match
    #
    # The query feature index for each feature match
    fm_list = cm.fm_list
    fs_list = cm.get_fsv_prod_list()
    fx1_list = [fm.T[0] for fm in fm_list]
    if hack_single_ori:
        # Group keypoints with the same xy-coordinate.
        # Combine these feature so each only recieves one vote
        kpts1 = qreq_.ibs.get_annot_kpts(cm.qaid,
                                         config2_=qreq_.extern_query_config2)
        xys1_ = vt.get_xys(kpts1).T
        fx1_to_comboid = vt.compute_unique_arr_dataids(xys1_)
        fcombo_ids = [fx1_to_comboid.take(fx1) for fx1 in fx1_list]
    else:
        # use the feature index itself as a combo id
        # so each feature only recieves one vote
        fcombo_ids = fx1_list

    if False:
        import ubelt as ub
        for ids in fcombo_ids:
            ub.find_duplicates(ids)

    # Group annotation matches by name
    # nsum_nid_list, name_groupxs = vt.group_indices(cm.dnid_list)
    # nsum_nid_list = cm.unique_nids
    name_groupxs = cm.name_groupxs

    nsum_score_list = []
    # For all indicies matched to a particular name
    for name_idxs in name_groupxs:
        # Get feat indicies and scores corresponding to the name's annots
        name_combo_ids = ut.take(fcombo_ids, name_idxs)
        name_fss = ut.take(fs_list, name_idxs)
        # Flatten over annots in the name
        fs = np.hstack(name_fss)
        if len(fs) == 0:
            nsum_score_list.append(0)
            continue
        combo_ids = np.hstack(name_combo_ids)
        # Features (with the same id) can't vote for this name twice
        group_idxs = vt.group_indices(combo_ids)[1]
        flagged_idxs = [idxs[fs.take(idxs).argmax()] for idxs in group_idxs]
        # Detail: sorting the idxs preseveres summation order
        # this fixes the numerical issue where nsum and csum were off
        flagged_idxs = np.sort(flagged_idxs)
        name_score = fs.take(flagged_idxs).sum()

        nsum_score_list.append(name_score)
    nsum_score_list = np.array(nsum_score_list)

    return nsum_score_list