示例#1
11
def gridsearch_coverage_grid_mask():
    """
    CommandLine:
        python -m vtool.coverage_grid --test-gridsearch_coverage_grid_mask --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from vtool.coverage_grid import *  # NOQA
        >>> import plottool as pt
        >>> gridsearch_coverage_grid_mask()
        >>> pt.show_if_requested()
    """
    import plottool as pt
    cfgdict_list, cfglbl_list = get_coverage_grid_gridsearch_configs()
    kpts, chipsize, weights = coverage_kpts.testdata_coverage('easy1.png')
    gridmask_list = [
        255 *  make_grid_coverage_mask(kpts, chipsize, weights, **cfgdict)
        for cfgdict in ut.ProgressIter(cfgdict_list, lbl='coverage grid')
    ]
    NORMHACK = False
    if NORMHACK:
        gridmask_list = [
            255 * (gridmask / gridmask.max()) for gridmask in gridmask_list
        ]

    fnum = 1
    ut.interact_gridsearch_result_images(
        pt.imshow, cfgdict_list, cfglbl_list,
        gridmask_list, fnum=fnum, figtitle='coverage grid', unpack=False,
        max_plots=25)

    pt.iup()
示例#2
0
def gridsearch_image_function(param_info,
                              test_func,
                              args=tuple(),
                              show_func=None):
    """
    gridsearch for a function that produces a single image
    """
    import plottool as pt
    cfgdict_list, cfglbl_list = param_info.get_gridsearch_input(
        defaultslice=slice(0, 10))
    fnum = pt.ensure_fnum(None)
    if show_func is None:
        show_func = pt.imshow
    lbl = ut.get_funcname(test_func)
    cfgresult_list = [
        test_func(*args, **cfgdict)
        for cfgdict in ut.ProgressIter(cfgdict_list, lbl=lbl)
    ]
    onclick_func = None
    ut.interact_gridsearch_result_images(show_func,
                                         cfgdict_list,
                                         cfglbl_list,
                                         cfgresult_list,
                                         fnum=fnum,
                                         figtitle=lbl,
                                         unpack=False,
                                         max_plots=25,
                                         onclick_func=onclick_func)
    pt.iup()
示例#3
0
def gridsearch_coverage_grid():
    """
    CommandLine:
        python -m vtool.coverage_grid --test-gridsearch_coverage_grid --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from vtool.coverage_grid import *  # NOQA
        >>> import plottool as pt
        >>> gridsearch_coverage_grid()
        >>> pt.show_if_requested()
    """
    import plottool as pt
    fname = None  # 'easy1.png'
    kpts, chipsize, weights = coverage_kpts.testdata_coverage(fname)
    if len(kpts) > 100:
        kpts = kpts[::100]
        weights = weights[::100]
    cfgdict_list, cfglbl_list = get_coverage_grid_gridsearch_configs()
    coverage_gridtup_list = [
        sparse_grid_coverage(kpts, chipsize, weights, **cfgdict)
        for cfgdict in ut.ProgressIter(cfgdict_list, lbl='coverage grid')
    ]

    fnum = 1
    with ut.Timer('plotting gridsearch'):
        ut.interact_gridsearch_result_images(
            show_coverage_grid, cfgdict_list, cfglbl_list,
            coverage_gridtup_list, fnum=fnum, figtitle='coverage grid', unpack=True,
            max_plots=25)

    pt.iup()
示例#4
0
def gridsearch_kpts_coverage_mask():
    """
    testing function

    CommandLine:
        python -m vtool.coverage_kpts --test-gridsearch_kpts_coverage_mask --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from vtool.coverage_kpts import *  # NOQA
        >>> import plottool as pt
        >>> gridsearch_kpts_coverage_mask()
        >>> pt.show_if_requested()
    """
    import plottool as pt
    cfgdict_list, cfglbl_list = get_coverage_kpts_gridsearch_configs()
    kpts, chipsize, weights = testdata_coverage('easy1.png')
    imgmask_list = [
        255 *  make_kpts_coverage_mask(kpts, chipsize, weights,
                                       return_patch=False, **cfgdict)
        for cfgdict in ut.ProgressIter(cfgdict_list, lbl='coverage grid')
    ]
    #NORMHACK = True
    #if NORMHACK:
    #    imgmask_list = [
    #        255 * (mask / mask.max()) for mask in imgmask_list
    #    ]
    fnum = pt.next_fnum()
    ut.interact_gridsearch_result_images(
        pt.imshow, cfgdict_list, cfglbl_list,
        imgmask_list, fnum=fnum, figtitle='coverage image', unpack=False,
        max_plots=25)
    pt.iup()
示例#5
0
def visualize_vocab_word(ibs, invassign, wx, fnum=None):
    """

    Example:
        >>> from ibeis.new_annots import *  # NOQA
        >>> import plottool as pt
        >>> pt.qt4ensure()
        >>> ibs, aid_list, vocab = testdata_vocab()
        >>> #aid_list = aid_list[0:1]
        >>> fstack = StackedFeatures(ibs, aid_list)
        >>> nAssign = 2
        >>> invassign = fstack.inverted_assignment(vocab, nAssign)
        >>> sortx = ut.argsort(invassign.num_list)[::-1]
        >>> wx_list = ut.take(invassign.wx_list, sortx)
        >>> wx = wx_list[0]
    """
    import plottool as pt
    pt.qt4ensure()
    vecs = invassign.get_vecs(wx)
    word = invassign.vocab.wx2_word[wx]

    word_patches = invassign.get_patches(wx)
    average_patch = np.mean(word_patches, axis=0)

    average_vec = vecs.mean(axis=0)
    average_vec = word

    word

    with_sift = True
    fnum = 2
    fnum = pt.ensure_fnum(fnum)
    if with_sift:
        patch_img = pt.render_sift_on_patch(average_patch, average_vec)
        #sift_word_patches = [pt.render_sift_on_patch(patch, vec) for patch, vec in ut.ProgIter(list(zip(word_patches, vecs)))]
        #stacked_patches = vt.stack_square_images(word_patches)
        #stacked_patches = vt.stack_square_images(sift_word_patches)
    else:
        patch_img = average_patch
    stacked_patches = vt.stack_square_images(word_patches)
    solidbar = np.zeros((patch_img.shape[0], int(patch_img.shape[1] * .1), 3),
                        dtype=patch_img.dtype)
    border_color = (100, 10, 10)  # bgr, darkblue
    if ut.is_float(solidbar):
        solidbar[:, :, :] = (np.array(border_color) / 255)[None, None]
    else:
        solidbar[:, :, :] = np.array(border_color)[None, None]
    word_img = vt.stack_image_list([patch_img, solidbar, stacked_patches],
                                   vert=False,
                                   modifysize=True)
    pt.imshow(word_img, fnum=fnum)
    #pt.imshow(patch_img, pnum=(1, 2, 1), fnum=fnum)
    #patch_size = 64
    #half_size = patch_size / 2
    #pt.imshow(stacked_patches, pnum=(1, 2, 2), fnum=fnum)
    pt.iup()
示例#6
0
def visualize_vocab_word(ibs, invassign, wx, fnum=None):
    """

    Example:
        >>> from ibeis.new_annots import *  # NOQA
        >>> import plottool as pt
        >>> pt.qt4ensure()
        >>> ibs, aid_list, vocab = testdata_vocab()
        >>> #aid_list = aid_list[0:1]
        >>> fstack = StackedFeatures(ibs, aid_list)
        >>> nAssign = 2
        >>> invassign = fstack.inverted_assignment(vocab, nAssign)
        >>> sortx = ut.argsort(invassign.num_list)[::-1]
        >>> wx_list = ut.take(invassign.wx_list, sortx)
        >>> wx = wx_list[0]
    """
    import plottool as pt
    pt.qt4ensure()
    vecs = invassign.get_vecs(wx)
    word = invassign.vocab.wx2_word[wx]

    word_patches = invassign.get_patches(wx)
    average_patch = np.mean(word_patches, axis=0)

    average_vec = vecs.mean(axis=0)
    average_vec = word

    word

    with_sift = True
    fnum = 2
    fnum = pt.ensure_fnum(fnum)
    if with_sift:
        patch_img = pt.render_sift_on_patch(average_patch, average_vec)
        #sift_word_patches = [pt.render_sift_on_patch(patch, vec) for patch, vec in ut.ProgIter(list(zip(word_patches, vecs)))]
        #stacked_patches = vt.stack_square_images(word_patches)
        #stacked_patches = vt.stack_square_images(sift_word_patches)
    else:
        patch_img = average_patch
    stacked_patches = vt.stack_square_images(word_patches)
    solidbar = np.zeros((patch_img.shape[0], int(patch_img.shape[1] * .1), 3), dtype=patch_img.dtype)
    border_color = (100, 10, 10)  # bgr, darkblue
    if ut.is_float(solidbar):
        solidbar[:, :, :] = (np.array(border_color) / 255)[None, None]
    else:
        solidbar[:, :, :] = np.array(border_color)[None, None]
    word_img = vt.stack_image_list([patch_img, solidbar, stacked_patches], vert=False, modifysize=True)
    pt.imshow(word_img, fnum=fnum)
    #pt.imshow(patch_img, pnum=(1, 2, 1), fnum=fnum)
    #patch_size = 64
    #half_size = patch_size / 2
    #pt.imshow(stacked_patches, pnum=(1, 2, 2), fnum=fnum)
    pt.iup()
示例#7
0
文件: blend.py 项目: Erotemic/vtool
def gridsearch_image_function(param_info, test_func, args=tuple(), show_func=None):
    """
    gridsearch for a function that produces a single image
    """
    import plottool as pt
    cfgdict_list, cfglbl_list = param_info.get_gridsearch_input(defaultslice=slice(0, 10))
    fnum = pt.ensure_fnum(None)
    if show_func is None:
        show_func = pt.imshow
    lbl = ut.get_funcname(test_func)
    cfgresult_list = [
        test_func(*args, **cfgdict)
        for cfgdict in ut.ProgressIter(cfgdict_list, lbl=lbl)
    ]
    onclick_func = None
    ut.interact_gridsearch_result_images(
        show_func, cfgdict_list, cfglbl_list,
        cfgresult_list, fnum=fnum,
        figtitle=lbl, unpack=False,
        max_plots=25, onclick_func=onclick_func)
    pt.iup()
示例#8
0
def gridsearch_kpts_coverage_mask():
    """
    testing function

    CommandLine:
        python -m vtool.coverage_kpts --test-gridsearch_kpts_coverage_mask --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from vtool.coverage_kpts import *  # NOQA
        >>> import plottool as pt
        >>> gridsearch_kpts_coverage_mask()
        >>> pt.show_if_requested()
    """
    import plottool as pt
    cfgdict_list, cfglbl_list = get_coverage_kpts_gridsearch_configs()
    kpts, chipsize, weights = testdata_coverage('easy1.png')
    imgmask_list = [
        255 * make_kpts_coverage_mask(
            kpts, chipsize, weights, return_patch=False, **cfgdict)
        for cfgdict in ut.ProgressIter(cfgdict_list, lbl='coverage grid')
    ]
    #NORMHACK = True
    #if NORMHACK:
    #    imgmask_list = [
    #        255 * (mask / mask.max()) for mask in imgmask_list
    #    ]
    fnum = pt.next_fnum()
    ut.interact_gridsearch_result_images(pt.imshow,
                                         cfgdict_list,
                                         cfglbl_list,
                                         imgmask_list,
                                         fnum=fnum,
                                         figtitle='coverage image',
                                         unpack=False,
                                         max_plots=25)
    pt.iup()
示例#9
0
def theano_gradient_funtimes():
    import theano
    import numpy as np
    import theano.tensor as T
    import lasagne
    import ibeis_cnn.theano_ext as theano_ext

    TEST = True

    x_data = np.linspace(-10, 10, 100).astype(np.float32)[:, None, None, None]
    y_data = (x_data**2).flatten()[:, None]

    X = T.tensor4('x')
    y = T.matrix('y')

    #x_data_batch =
    #y_data_batch =
    inputs_to_value = {X: x_data[0:16], y: y_data[0:16]}

    l_in = lasagne.layers.InputLayer((16, 1, 1, 1))
    l_out = lasagne.layers.DenseLayer(
        l_in,
        num_units=1,
        nonlinearity=lasagne.nonlinearities.rectify,
        W=lasagne.init.Orthogonal())

    network_output = lasagne.layers.get_output(l_out, X)

    # TEST NETWORK OUTPUT

    if TEST:
        result = theano_ext.eval_symbol(network_output, inputs_to_value)
        print('network_output = %r' % (result, ))

    loss_function = lasagne.objectives.squared_error
    #def loss_function(network_output, labels):
    #    return (network_output - labels) ** 2

    losses = loss_function(network_output, y)
    if TEST:
        result = theano_ext.eval_symbol(losses, inputs_to_value)
        print('losses = %r' % (result, ))

    loss = lasagne.objectives.aggregate(losses, mode='mean')

    if TEST:
        result = theano_ext.eval_symbol(loss, inputs_to_value)
        print('loss = %r' % (result, ))

    L2 = lasagne.regularization.regularize_network_params(
        l_out, lasagne.regularization.l2)
    weight_decay = .0001
    loss_regularized = loss + weight_decay * L2
    loss_regularized.name = 'loss_regularized'

    parameters = lasagne.layers.get_all_params(l_out)

    gradients_regularized = theano.grad(loss_regularized,
                                        parameters,
                                        add_names=True)

    if TEST:
        if False:
            s = T.sum(1 / (1 + T.exp(-X)))
            s.name = 's'
            gs = T.grad(s, X, add_names=True)
            theano.pp(gs)
            inputs_to_value = {X: x_data[0:16], y: y_data[0:16]}
            result = theano_ext.eval_symbol(gs, inputs_to_value)
            print('%s = %r' % (
                gs.name,
                result,
            ))
            inputs_to_value = {X: x_data[16:32], y: y_data[16:32]}
            result = theano_ext.eval_symbol(gs, inputs_to_value)
            print('%s = %r' % (
                gs.name,
                result,
            ))

        for grad in gradients_regularized:
            result = theano_ext.eval_symbol(grad, inputs_to_value)
            print('%s = %r' % (
                grad.name,
                result,
            ))

        grad_on_losses = theano.grad(losses, parameters, add_names=True)

    learning_rate_theano = .0001
    momentum = .9
    updates = lasagne.updates.nesterov_momentum(gradients_regularized,
                                                parameters,
                                                learning_rate_theano, momentum)

    X_batch = T.tensor4('x_batch')
    y_batch = T.fvector('y_batch')

    func = theano.function(
        inputs=[theano.Param(X_batch),
                theano.Param(y_batch)],
        outputs=[network_output, losses],
        #updates=updates,
        givens={
            X: X_batch,
            y: y_batch,
        },
    )

    y_predict_batch, loss_batch = func(inputs_to_value[X], inputs_to_value[y])

    if ut.inIPython():
        import IPython
        IPython.get_ipython().magic('pylab qt4')

    import plottool as pt
    pt.plot(x_data, y_predict)
    pt.iup()
    pass
示例#10
0
def myquery():
    r"""

    BUG::
        THERE IS A BUG SOMEWHERE: HOW IS THIS POSSIBLE?
        if everything is weightd ) how di the true positive even get a score
        while the true negative did not
        qres_copy.filtkey_list = ['ratio', 'fg', 'homogerr', 'distinctiveness']
        CORRECT STATS
        {
            'max'  : [0.832, 0.968, 0.604, 0.000],
            'min'  : [0.376, 0.524, 0.000, 0.000],
            'mean' : [0.561, 0.924, 0.217, 0.000],
            'std'  : [0.114, 0.072, 0.205, 0.000],
            'nMin' : [1, 1, 1, 51],
            'nMax' : [1, 1, 1, 1],
            'shape': (52, 4),
        }
        INCORRECT STATS
        {
            'max'  : [0.759, 0.963, 0.264, 0.000],
            'min'  : [0.379, 0.823, 0.000, 0.000],
            'mean' : [0.506, 0.915, 0.056, 0.000],
            'std'  : [0.125, 0.039, 0.078, 0.000],
            'nMin' : [1, 1, 1, 24],
            'nMax' : [1, 1, 1, 1],
            'shape': (26, 4),
        #   score_diff,  tp_score,  tn_score,       p,   K,  dcvs_clip_max,  fg_power,  homogerr_power
             0.494,     0.494,     0.000,  73.000,   2,          0.500,     0.100,          10.000

    see how seperability changes as we very things

    CommandLine:
        python -m ibeis.algo.hots.devcases --test-myquery
        python -m ibeis.algo.hots.devcases --test-myquery --show --index 0
        python -m ibeis.algo.hots.devcases --test-myquery --show --index 1
        python -m ibeis.algo.hots.devcases --test-myquery --show --index 2

    References:
        http://en.wikipedia.org/wiki/Pareto_distribution <- look into

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.all_imports import *  # NOQA
        >>> from ibeis.algo.hots.devcases import *  # NOQA
        >>> ut.dev_ipython_copypaster(myquery) if ut.inIPython() else myquery()
        >>> pt.show_if_requested()
    """
    from ibeis.algo.hots import special_query  # NOQA
    from ibeis.algo.hots import distinctiveness_normalizer  # NOQA
    from ibeis import viz  # NOQA
    import plottool as pt
    index = ut.get_argval('--index', int, 0)
    ibs, aid1, aid2, tn_aid = testdata_my_exmaples(index)
    qaids = [aid1]
    daids = [aid2] + [tn_aid]
    qvuuid = ibs.get_annot_visual_uuids(aid1)

    cfgdict_vsone = dict(
        sv_on=True,
        #sv_on=False,
        #codename='vsone_unnorm_dist_ratio_extern_distinctiveness',
        codename='vsone_unnorm_ratio_extern_distinctiveness',
        sver_output_weighting=True,
    )

    use_cache = False
    save_qcache = False

    qres_list, qreq_ = ibs.query_chips(qaids,
                                       daids,
                                       cfgdict=cfgdict_vsone,
                                       return_request=True,
                                       use_cache=use_cache,
                                       save_qcache=save_qcache,
                                       verbose=True)

    qreq_.load_distinctiveness_normalizer()
    qres = qres_list[0]
    top_aids = qres.get_top_aids()  # NOQA
    qres_orig = qres  # NOQA

    def test_config(qreq_, qres_orig, cfgdict):
        """ function to grid search over """
        qres_copy = copy.deepcopy(qres_orig)
        qreq_vsone_ = qreq_
        qres_vsone = qres_copy
        filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        newfsv_list, newscore_aids = special_query.get_extern_distinctiveness(
            qreq_, qres_copy, **cfgdict)
        special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_vsone,
                                                   newfsv_list, newscore_aids,
                                                   filtkey)
        tp_score = qres_copy.aid2_score[aid2]
        tn_score = qres_copy.aid2_score[tn_aid]
        return qres_copy, tp_score, tn_score

    #[.01, .1, .2, .5, .6, .7, .8, .9, 1.0]),
    #FiltKeys = hstypes.FiltKeys
    # FIXME: Use other way of doing gridsearch
    grid_basis = distinctiveness_normalizer.DCVS_DEFAULT.get_grid_basis()
    gridsearch = ut.GridSearch(grid_basis, label='qvuuid=%r' % (qvuuid, ))
    print('Begin Grid Search')
    for cfgdict in ut.ProgressIter(gridsearch, lbl='GridSearch'):
        qres_copy, tp_score, tn_score = test_config(qreq_, qres_orig, cfgdict)
        gridsearch.append_result(tp_score, tn_score)
    print('Finish Grid Search')

    # Get best result
    best_cfgdict = gridsearch.get_rank_cfgdict()
    qres_copy, tp_score, tn_score = test_config(qreq_, qres_orig, best_cfgdict)

    # Examine closely what you can do with scores
    if False:
        qres_copy = copy.deepcopy(qres_orig)
        qreq_vsone_ = qreq_
        filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        newfsv_list, newscore_aids = special_query.get_extern_distinctiveness(
            qreq_, qres_copy, **cfgdict)
        ut.embed()

        def make_cm_very_old_tuple(qres_copy):
            assert ut.listfind(qres_copy.filtkey_list, filtkey) is None
            weight_filters = hstypes.WEIGHT_FILTERS
            weight_filtxs, nonweight_filtxs = special_query.index_partition(
                qres_copy.filtkey_list, weight_filters)

            aid2_fsv = {}
            aid2_fs = {}
            aid2_score = {}

            for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids):
                pass
                break
                #scorex_vsone  = ut.listfind(qres_copy.filtkey_list, filtkey)
                #if scorex_vsone is None:
                # TODO: add spatial verification as a filter score
                # augment the vsone scores
                # TODO: paramaterize
                weighted_ave_score = True
                if weighted_ave_score:
                    # weighted average scoring
                    new_fs_vsone = special_query.weighted_average_scoring(
                        new_fsv_vsone, weight_filtxs, nonweight_filtxs)
                else:
                    # product scoring
                    new_fs_vsone = special_query.product_scoring(new_fsv_vsone)
                new_score_vsone = new_fs_vsone.sum()
                aid2_fsv[daid] = new_fsv_vsone
                aid2_fs[daid] = new_fs_vsone
                aid2_score[daid] = new_score_vsone
            return aid2_fsv, aid2_fs, aid2_score

        # Look at plot of query products
        for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids):
            new_fs_vsone = special_query.product_scoring(new_fsv_vsone)
            scores_list = np.array(new_fs_vsone)[:, None].T
            pt.plot_sorted_scores(scores_list,
                                  logscale=False,
                                  figtitle=str(daid))
        pt.iup()
        special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_copy,
                                                   newfsv_list, newscore_aids,
                                                   filtkey)

    # PRINT INFO
    import functools
    #ut.rrrr()
    get_stats_str = functools.partial(ut.get_stats_str,
                                      axis=0,
                                      newlines=True,
                                      precision=3)
    tp_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[aid2]), ':')
    tn_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[tn_aid]), ':')
    info_str_list = []
    info_str_list.append('qres_copy.filtkey_list = %r' %
                         (qres_copy.filtkey_list, ))
    info_str_list.append('CORRECT STATS')
    info_str_list.append(tp_stats_str)
    info_str_list.append('INCORRECT STATS')
    info_str_list.append(tn_stats_str)
    info_str = '\n'.join(info_str_list)
    print(info_str)

    # SHOW BEST RESULT
    #qres_copy.ishow_top(ibs, fnum=pt.next_fnum())
    #qres_orig.ishow_top(ibs, fnum=pt.next_fnum())

    # Text Informatio
    param_lbl = 'dcvs_power'
    param_stats_str = gridsearch.get_dimension_stats_str(param_lbl)
    print(param_stats_str)

    csvtext = gridsearch.get_csv_results(10)
    print(csvtext)

    # Paramter visuzliation
    fnum = pt.next_fnum()
    # plot paramter influence
    param_label_list = gridsearch.get_param_lbls()
    pnum_ = pt.get_pnum_func(2, len(param_label_list))
    for px, param_label in enumerate(param_label_list):
        gridsearch.plot_dimension(param_label, fnum=fnum, pnum=pnum_(px))
    # plot match figure
    pnum2_ = pt.get_pnum_func(2, 2)
    qres_copy.show_matches(ibs, aid2, fnum=fnum, pnum=pnum2_(2))
    qres_copy.show_matches(ibs, tn_aid, fnum=fnum, pnum=pnum2_(3))
    # Add figure labels
    figtitle = 'Effect of parameters on vsone separation for a single case'
    subtitle = 'qvuuid = %r' % (qvuuid)
    figtitle += '\n' + subtitle
    pt.set_figtitle(figtitle)
    # Save Figure
    #fig_fpath = pt.save_figure(usetitle=True)
    #print(fig_fpath)
    # Write CSV Results
    #csv_fpath = fig_fpath + '.csv.txt'
    #ut.write_to(csv_fpath, csvtext)

    #qres_copy.ishow_top(ibs)
    #from matplotlib import pyplot as plt
    #plt.show()
    #print(ut.list_str()))
    # TODO: plot max variation dims
    #import plottool as pt
    #pt.plot(p_list, diff_list)
    """
示例#11
0
def flann_index_time_experiment():
    r"""

    Shows a plot of how long it takes to build a flann index for a given number of KD-trees

    CommandLine:
        python -m vtool.nearest_neighbors --test-flann_index_time_experiment

    Example:
        >>> # SLOW_DOCTEST
        >>> from vtool.nearest_neighbors import *  # NOQA
        >>> result = flann_index_time_experiment()
        >>> print(result)
    """
    import vtool as vt
    import pyflann
    import itertools

    class TestDataPool(object):
        """
        Perform only a few allocations of test data
        """
        def __init__(self):
            self.num = 10000
            self.data_pool = None
            self.alloc_pool(1000000)

        def alloc_pool(self, num):
            print('[alloc] num = %r' % (num, ))
            self.num = num
            self.data_pool = vt.tests.dummy.testdata_dummy_sift(num)
            print('[alloc] object size ' +
                  ut.get_object_size_str(self.data_pool, 'data_pool'))

        def get_testdata(self, num):
            if len(self.data_pool) < num:
                self.alloc_pool(2 * self.num)
            return self.data_pool[0:num]

    pool = TestDataPool()

    def get_buildtime_data(**kwargs):
        flann_params = vt.get_flann_params(**kwargs)
        print('flann_params = %r' % (ut.dict_str(flann_params), ))
        data_list = []
        num = 1000
        print('-----')
        for count in ut.ProgressIter(itertools.count(),
                                     nTotal=-1,
                                     freq=1,
                                     autoadjust=False):
            num = int(num * 1.2)
            print('num = %r' % (num, ))
            #if num > 1E6:
            #    break
            data = pool.get_testdata(num)
            print('object size ' + ut.get_object_size_str(data, 'data'))
            flann = pyflann.FLANN(**flann_params)
            with ut.Timer(verbose=False) as t:
                flann.build_index(data)
            print('t.ellapsed = %r' % (t.ellapsed, ))
            if t.ellapsed > 5 or count > 1000:
                break
            data_list.append((count, num, t.ellapsed))
            print('-----')
        return data_list, flann_params

    data_list1, params1 = get_buildtime_data(trees=1)

    data_list2, params2 = get_buildtime_data(trees=2)

    data_list4, params4 = get_buildtime_data(trees=4)

    data_list8, params8 = get_buildtime_data(trees=8)

    data_list16, params16 = get_buildtime_data(trees=16)

    import plottool as pt

    def plotdata(data_list):
        count_arr = ut.get_list_column(data_list, 1)
        time_arr = ut.get_list_column(data_list, 2)
        pt.plot2(count_arr,
                 time_arr,
                 marker='-o',
                 equal_aspect=False,
                 x_label='num_vectors',
                 y_label='FLANN build time')

    plotdata(data_list1)
    plotdata(data_list2)
    plotdata(data_list4)
    plotdata(data_list8)
    plotdata(data_list16)

    pt.iup()
示例#12
0
def find_location_disparate_splits(ibs):
    """
    CommandLine:
        python -m ibeis.other.ibsfuncs --test-find_location_disparate_splits

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.other.ibsfuncs import *  # NOQA
        >>> import ibeis
        >>> # build test data
        >>> ibs = ibeis.opendb('NNP_Master3')
        >>> # execute function
        >>> offending_nids = find_location_disparate_splits(ibs)
        >>> # verify results
        >>> print('offending_nids = %r' % (offending_nids,))

    """
    import scipy.spatial.distance as spdist
    import functools
    #aid_list_count = ibs.get_valid_aids()
    aid_list_count = ibs.filter_aids_count()
    nid_list, gps_track_list, aid_track_list = ibs.get_name_gps_tracks(
        aid_list=aid_list_count)

    # Filter to only multitons
    has_multiple_list = [len(gps_track) > 1 for gps_track in gps_track_list]
    gps_track_list_ = ut.list_compress(gps_track_list, has_multiple_list)
    aid_track_list_ = ut.list_compress(aid_track_list, has_multiple_list)
    nid_list_ = ut.list_compress(nid_list, has_multiple_list)

    # Other properties
    unixtime_track_list_ = ibs.unflat_map(
        ibs.get_annot_image_unixtimes_asfloat, aid_track_list_)

    # Move into arrays
    gpsarr_track_list_ = list(map(np.array, gps_track_list_))
    unixtimearr_track_list_ = [
        np.array(unixtimes)[:, None] for unixtimes in unixtime_track_list_
    ]

    def unixtime_hourdiff(x, y):
        return np.abs(np.subtract(x, y)) / (60**2)

    haversin_pdist = functools.partial(spdist.pdist, metric=ut.haversine)
    unixtime_pdist = functools.partial(spdist.pdist, metric=unixtime_hourdiff)
    # Get distances
    gpsdist_vector_list = list(map(haversin_pdist, gpsarr_track_list_))
    hourdist_vector_list = list(map(unixtime_pdist, unixtimearr_track_list_))

    # Get the speed in kilometers per hour for each animal
    speed_vector_list = [
        gpsdist / hourdist
        for gpsdist, hourdist in zip(gpsdist_vector_list, hourdist_vector_list)
    ]

    #maxhourdist_list = np.array([hourdist_vector.max() for hourdist_vector in hourdist_vector_list])
    maxgpsdist_list = np.array(
        [gpsdist_vector.max() for gpsdist_vector in gpsdist_vector_list])
    maxspeed_list = np.array(
        [speed_vector.max() for speed_vector in speed_vector_list])
    sortx = maxspeed_list.argsort()
    sorted_maxspeed_list = maxspeed_list[sortx]
    #sorted_nid_list = np.array(ut.list_take(nid_list_, sortx))

    if False:
        import plottool as pt
        pt.plot(sorted_maxspeed_list)
        allgpsdist_list = np.array(ut.flatten(gpsdist_vector_list))
        alltimedist_list = np.array(ut.flatten(hourdist_vector_list))

        pt.figure(fnum1=1, doclf=True, docla=True)
        alltime_sortx = alltimedist_list.argsort()
        pt.plot(allgpsdist_list[alltime_sortx])
        pt.plot(alltimedist_list[alltime_sortx])
        pt.iup()

        pt.figure(fnum1=2, doclf=True, docla=True)
        allgps_sortx = allgpsdist_list.argsort()
        pt.plot(allgpsdist_list[allgps_sortx])
        pt.plot(alltimedist_list[allgps_sortx])
        pt.iup()

        #maxgps_sortx = maxgpsdist_list.argsort()
        #pt.plot(maxgpsdist_list[maxgps_sortx])
        pt.iup()

    maxgps_sortx = maxgpsdist_list.argsort()
    gpsdist_thresh = 15
    sorted_maxgps_list = maxgpsdist_list[maxgps_sortx]
    offending_sortx = maxgps_sortx.compress(
        sorted_maxgps_list > gpsdist_thresh)

    speed_thresh_kph = 6  # kilometers per hour
    offending_sortx = sortx.compress(sorted_maxspeed_list > speed_thresh_kph)
    #sorted_isoffending = sorted_maxspeed_list > speed_thresh_kph
    #offending_nids = sorted_nid_list.compress(sorted_isoffending)
    offending_nids = ut.list_take(nid_list_, offending_sortx)
    #offending_speeds = ut.list_take(maxspeed_list, offending_sortx)
    print('offending_nids = %r' % (offending_nids, ))

    for index in offending_sortx:
        print('\n\n--- Offender index=%d ---' % (index, ))
        # Inspect a specific index
        aids = aid_track_list_[index]
        nid = nid_list_[index]
        assert np.all(np.array(ibs.get_annot_name_rowids(aids)) == nid)

        aid1_list, aid2_list = zip(*list(ut.product(aids, aids)))
        annotmatch_rowid_list = ibs.get_annotmatch_rowid_from_superkey(
            aid1_list, aid2_list)
        annotmatch_truth_list = ibs.get_annotmatch_truth(annotmatch_rowid_list)
        annotmatch_truth_list = ut.replace_nones(annotmatch_truth_list, -1)
        truth_mat = np.array(annotmatch_truth_list).reshape(
            (len(aids), len(aids)))

        contrib_rowids = ibs.get_image_contributor_rowid(
            ibs.get_annot_gids(aids))
        contrib_tags = ibs.get_contributor_tag(contrib_rowids)

        print('nid = %r' % (nid, ))
        print('maxspeed = %.2f km/h' % (maxspeed_list[index], ))
        print('aids = %r' % (aids, ))
        print('gpss = %s' % (ut.list_str(gps_track_list_[index]), ))
        print('contribs = %s' % (ut.list_str(contrib_tags), ))

        print('speedist_mat = \n' + ut.numpy_str(
            spdist.squareform(speed_vector_list[index]), precision=2))
        truth_mat_str = ut.numpy_str(truth_mat, precision=2)
        truth_mat_str = truth_mat_str.replace('-1', ' _')

        print('truth_mat = \n' + truth_mat_str)
        print('gpsdist_mat  = \n' + ut.numpy_str(
            spdist.squareform(gpsdist_vector_list[index]), precision=2))
        print('hourdist_mat = \n' + ut.numpy_str(
            spdist.squareform(hourdist_vector_list[index]), precision=2))

    return offending_nids
示例#13
0
def myquery():
    r"""

    BUG::
        THERE IS A BUG SOMEWHERE: HOW IS THIS POSSIBLE?
        if everything is weightd ) how di the true positive even get a score
        while the true negative did not
        qres_copy.filtkey_list = ['ratio', 'fg', 'homogerr', 'distinctiveness']
        CORRECT STATS
        {
            'max'  : [0.832, 0.968, 0.604, 0.000],
            'min'  : [0.376, 0.524, 0.000, 0.000],
            'mean' : [0.561, 0.924, 0.217, 0.000],
            'std'  : [0.114, 0.072, 0.205, 0.000],
            'nMin' : [1, 1, 1, 51],
            'nMax' : [1, 1, 1, 1],
            'shape': (52, 4),
        }
        INCORRECT STATS
        {
            'max'  : [0.759, 0.963, 0.264, 0.000],
            'min'  : [0.379, 0.823, 0.000, 0.000],
            'mean' : [0.506, 0.915, 0.056, 0.000],
            'std'  : [0.125, 0.039, 0.078, 0.000],
            'nMin' : [1, 1, 1, 24],
            'nMax' : [1, 1, 1, 1],
            'shape': (26, 4),
        #   score_diff,  tp_score,  tn_score,       p,   K,  dcvs_clip_max,  fg_power,  homogerr_power
             0.494,     0.494,     0.000,  73.000,   2,          0.500,     0.100,          10.000

    see how seperability changes as we very things

    CommandLine:
        python -m ibeis.algo.hots.devcases --test-myquery
        python -m ibeis.algo.hots.devcases --test-myquery --show --index 0
        python -m ibeis.algo.hots.devcases --test-myquery --show --index 1
        python -m ibeis.algo.hots.devcases --test-myquery --show --index 2

    References:
        http://en.wikipedia.org/wiki/Pareto_distribution <- look into

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.all_imports import *  # NOQA
        >>> from ibeis.algo.hots.devcases import *  # NOQA
        >>> ut.dev_ipython_copypaster(myquery) if ut.inIPython() else myquery()
        >>> pt.show_if_requested()
    """
    from ibeis.algo.hots import special_query  # NOQA
    from ibeis.algo.hots import distinctiveness_normalizer  # NOQA
    from ibeis import viz  # NOQA
    import plottool as pt
    index = ut.get_argval('--index', int, 0)
    ibs, aid1, aid2, tn_aid = testdata_my_exmaples(index)
    qaids = [aid1]
    daids = [aid2] + [tn_aid]
    qvuuid = ibs.get_annot_visual_uuids(aid1)

    cfgdict_vsone = dict(
        sv_on=True,
        #sv_on=False,
        #codename='vsone_unnorm_dist_ratio_extern_distinctiveness',
        codename='vsone_unnorm_ratio_extern_distinctiveness',
        sver_output_weighting=True,
    )

    use_cache   = False
    save_qcache = False

    qres_list, qreq_ = ibs.query_chips(qaids, daids, cfgdict=cfgdict_vsone,
                                       return_request=True, use_cache=use_cache,
                                       save_qcache=save_qcache, verbose=True)

    qreq_.load_distinctiveness_normalizer()
    qres = qres_list[0]
    top_aids = qres.get_top_aids()  # NOQA
    qres_orig = qres  # NOQA

    def test_config(qreq_, qres_orig, cfgdict):
        """ function to grid search over """
        qres_copy = copy.deepcopy(qres_orig)
        qreq_vsone_ = qreq_
        qres_vsone = qres_copy
        filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        newfsv_list, newscore_aids = special_query.get_extern_distinctiveness(qreq_, qres_copy, **cfgdict)
        special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_vsone, newfsv_list, newscore_aids, filtkey)
        tp_score  = qres_copy.aid2_score[aid2]
        tn_score  = qres_copy.aid2_score[tn_aid]
        return qres_copy, tp_score, tn_score

    #[.01, .1, .2, .5, .6, .7, .8, .9, 1.0]),
    #FiltKeys = hstypes.FiltKeys
    # FIXME: Use other way of doing gridsearch
    grid_basis = distinctiveness_normalizer.DCVS_DEFAULT.get_grid_basis()
    gridsearch = ut.GridSearch(grid_basis, label='qvuuid=%r' % (qvuuid,))
    print('Begin Grid Search')
    for cfgdict in ut.ProgressIter(gridsearch, lbl='GridSearch'):
        qres_copy, tp_score, tn_score = test_config(qreq_, qres_orig, cfgdict)
        gridsearch.append_result(tp_score, tn_score)
    print('Finish Grid Search')

    # Get best result
    best_cfgdict = gridsearch.get_rank_cfgdict()
    qres_copy, tp_score, tn_score = test_config(qreq_, qres_orig, best_cfgdict)

    # Examine closely what you can do with scores
    if False:
        qres_copy = copy.deepcopy(qres_orig)
        qreq_vsone_ = qreq_
        filtkey = hstypes.FiltKeys.DISTINCTIVENESS
        newfsv_list, newscore_aids = special_query.get_extern_distinctiveness(qreq_, qres_copy, **cfgdict)
        ut.embed()
        def make_cm_very_old_tuple(qres_copy):
            assert ut.listfind(qres_copy.filtkey_list, filtkey) is None
            weight_filters = hstypes.WEIGHT_FILTERS
            weight_filtxs, nonweight_filtxs = special_query.index_partition(qres_copy.filtkey_list, weight_filters)

            aid2_fsv = {}
            aid2_fs = {}
            aid2_score = {}

            for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids):
                pass
                break
                #scorex_vsone  = ut.listfind(qres_copy.filtkey_list, filtkey)
                #if scorex_vsone is None:
                # TODO: add spatial verification as a filter score
                # augment the vsone scores
                # TODO: paramaterize
                weighted_ave_score = True
                if weighted_ave_score:
                    # weighted average scoring
                    new_fs_vsone = special_query.weighted_average_scoring(new_fsv_vsone, weight_filtxs, nonweight_filtxs)
                else:
                    # product scoring
                    new_fs_vsone = special_query.product_scoring(new_fsv_vsone)
                new_score_vsone = new_fs_vsone.sum()
                aid2_fsv[daid]   = new_fsv_vsone
                aid2_fs[daid]    = new_fs_vsone
                aid2_score[daid] = new_score_vsone
            return aid2_fsv, aid2_fs, aid2_score

        # Look at plot of query products
        for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids):
            new_fs_vsone = special_query.product_scoring(new_fsv_vsone)
            scores_list = np.array(new_fs_vsone)[:, None].T
            pt.plot_sorted_scores(scores_list, logscale=False, figtitle=str(daid))
        pt.iup()
        special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_copy, newfsv_list, newscore_aids, filtkey)

    # PRINT INFO
    import functools
    #ut.rrrr()
    get_stats_str = functools.partial(ut.get_stats_str, axis=0, newlines=True, precision=3)
    tp_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[aid2]), ':')
    tn_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[tn_aid]), ':')
    info_str_list = []
    info_str_list.append('qres_copy.filtkey_list = %r' % (qres_copy.filtkey_list,))
    info_str_list.append('CORRECT STATS')
    info_str_list.append(tp_stats_str)
    info_str_list.append('INCORRECT STATS')
    info_str_list.append(tn_stats_str)
    info_str = '\n'.join(info_str_list)
    print(info_str)

    # SHOW BEST RESULT
    #qres_copy.ishow_top(ibs, fnum=pt.next_fnum())
    #qres_orig.ishow_top(ibs, fnum=pt.next_fnum())

    # Text Informatio
    param_lbl = 'dcvs_power'
    param_stats_str = gridsearch.get_dimension_stats_str(param_lbl)
    print(param_stats_str)

    csvtext = gridsearch.get_csv_results(10)
    print(csvtext)

    # Paramter visuzliation
    fnum = pt.next_fnum()
    # plot paramter influence
    param_label_list = gridsearch.get_param_lbls()
    pnum_ = pt.get_pnum_func(2, len(param_label_list))
    for px, param_label in enumerate(param_label_list):
        gridsearch.plot_dimension(param_label, fnum=fnum, pnum=pnum_(px))
    # plot match figure
    pnum2_ = pt.get_pnum_func(2, 2)
    qres_copy.show_matches(ibs, aid2, fnum=fnum, pnum=pnum2_(2))
    qres_copy.show_matches(ibs, tn_aid, fnum=fnum, pnum=pnum2_(3))
    # Add figure labels
    figtitle = 'Effect of parameters on vsone separation for a single case'
    subtitle = 'qvuuid = %r' % (qvuuid)
    figtitle += '\n' + subtitle
    pt.set_figtitle(figtitle)
    # Save Figure
    #fig_fpath = pt.save_figure(usetitle=True)
    #print(fig_fpath)
    # Write CSV Results
    #csv_fpath = fig_fpath + '.csv.txt'
    #ut.write_to(csv_fpath, csvtext)

    #qres_copy.ishow_top(ibs)
    #from matplotlib import pyplot as plt
    #plt.show()
    #print(ut.list_str()))
    # TODO: plot max variation dims
    #import plottool as pt
    #pt.plot(p_list, diff_list)
    """
示例#14
0
def find_location_disparate_splits(ibs):
    """
    CommandLine:
        python -m ibeis.ibsfuncs --test-find_location_disparate_splits

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.ibsfuncs import *  # NOQA
        >>> import ibeis
        >>> # build test data
        >>> ibs = ibeis.opendb('NNP_Master3')
        >>> # execute function
        >>> offending_nids = find_location_disparate_splits(ibs)
        >>> # verify results
        >>> print('offending_nids = %r' % (offending_nids,))

    """
    import scipy.spatial.distance as spdist
    import functools
    #aid_list_count = ibs.get_valid_aids()
    aid_list_count = ibs.filter_aids_count()
    nid_list, gps_track_list, aid_track_list = ibs.get_name_gps_tracks(aid_list=aid_list_count)

    # Filter to only multitons
    has_multiple_list = [len(gps_track) > 1 for gps_track in gps_track_list]
    gps_track_list_ = ut.list_compress(gps_track_list, has_multiple_list)
    aid_track_list_ = ut.list_compress(aid_track_list, has_multiple_list)
    nid_list_ = ut.list_compress(nid_list, has_multiple_list)

    # Other properties
    unixtime_track_list_ = ibs.unflat_map(ibs.get_annot_image_unixtimes_asfloat, aid_track_list_)

    # Move into arrays
    gpsarr_track_list_ = list(map(np.array, gps_track_list_))
    unixtimearr_track_list_ = [np.array(unixtimes)[:, None] for unixtimes in unixtime_track_list_]

    def unixtime_hourdiff(x, y):
        return np.abs(np.subtract(x, y)) / (60 ** 2)

    haversin_pdist = functools.partial(spdist.pdist, metric=ut.haversine)
    unixtime_pdist = functools.partial(spdist.pdist, metric=unixtime_hourdiff)
    # Get distances
    gpsdist_vector_list = list(map(haversin_pdist, gpsarr_track_list_))
    hourdist_vector_list = list(map(unixtime_pdist, unixtimearr_track_list_))

    # Get the speed in kilometers per hour for each animal
    speed_vector_list = [gpsdist / hourdist for gpsdist, hourdist in
                         zip(gpsdist_vector_list, hourdist_vector_list)]

    #maxhourdist_list = np.array([hourdist_vector.max() for hourdist_vector in hourdist_vector_list])
    maxgpsdist_list  = np.array([gpsdist_vector.max() for gpsdist_vector in gpsdist_vector_list])
    maxspeed_list = np.array([speed_vector.max() for speed_vector in speed_vector_list])
    sortx  = maxspeed_list.argsort()
    sorted_maxspeed_list = maxspeed_list[sortx]
    #sorted_nid_list = np.array(ut.list_take(nid_list_, sortx))

    if False:
        import plottool as pt
        pt.plot(sorted_maxspeed_list)
        allgpsdist_list = np.array(ut.flatten(gpsdist_vector_list))
        alltimedist_list = np.array(ut.flatten(hourdist_vector_list))

        pt.figure(fnum1=1, doclf=True, docla=True)
        alltime_sortx = alltimedist_list.argsort()
        pt.plot(allgpsdist_list[alltime_sortx])
        pt.plot(alltimedist_list[alltime_sortx])
        pt.iup()

        pt.figure(fnum1=2, doclf=True, docla=True)
        allgps_sortx = allgpsdist_list.argsort()
        pt.plot(allgpsdist_list[allgps_sortx])
        pt.plot(alltimedist_list[allgps_sortx])
        pt.iup()

        #maxgps_sortx = maxgpsdist_list.argsort()
        #pt.plot(maxgpsdist_list[maxgps_sortx])
        pt.iup()

    maxgps_sortx = maxgpsdist_list.argsort()
    gpsdist_thresh = 15
    sorted_maxgps_list = maxgpsdist_list[maxgps_sortx]
    offending_sortx = maxgps_sortx.compress(sorted_maxgps_list > gpsdist_thresh)

    speed_thresh_kph = 6  # kilometers per hour
    offending_sortx = sortx.compress(sorted_maxspeed_list > speed_thresh_kph)
    #sorted_isoffending = sorted_maxspeed_list > speed_thresh_kph
    #offending_nids = sorted_nid_list.compress(sorted_isoffending)
    offending_nids = ut.list_take(nid_list_, offending_sortx)
    #offending_speeds = ut.list_take(maxspeed_list, offending_sortx)
    print('offending_nids = %r' % (offending_nids,))

    for index in offending_sortx:
        print('\n\n--- Offender index=%d ---' % (index,))
        # Inspect a specific index
        aids = aid_track_list_[index]
        nid = nid_list_[index]
        assert np.all(np.array(ibs.get_annot_name_rowids(aids)) == nid)

        aid1_list, aid2_list = zip(*list(ut.product(aids, aids)))
        annotmatch_rowid_list = ibs.get_annotmatch_rowid_from_superkey(aid1_list, aid2_list)
        annotmatch_truth_list = ibs.get_annotmatch_truth(annotmatch_rowid_list)
        annotmatch_truth_list = ut.replace_nones(annotmatch_truth_list, -1)
        truth_mat = np.array(annotmatch_truth_list).reshape((len(aids), len(aids)))

        contrib_rowids = ibs.get_image_contributor_rowid(ibs.get_annot_gids(aids))
        contrib_tags = ibs.get_contributor_tag(contrib_rowids)

        print('nid = %r' % (nid,))
        print('maxspeed = %.2f km/h' % (maxspeed_list[index],))
        print('aids = %r' % (aids,))
        print('gpss = %s' % (ut.list_str(gps_track_list_[index]),))
        print('contribs = %s' % (ut.list_str(contrib_tags),))

        print('speedist_mat = \n' + ut.numpy_str(spdist.squareform(speed_vector_list[index]), precision=2))
        truth_mat_str = ut.numpy_str(truth_mat, precision=2)
        truth_mat_str = truth_mat_str.replace('-1' , ' _')

        print('truth_mat = \n' + truth_mat_str)
        print('gpsdist_mat  = \n' + ut.numpy_str(spdist.squareform(gpsdist_vector_list[index]), precision=2))
        print('hourdist_mat = \n' + ut.numpy_str(spdist.squareform(hourdist_vector_list[index]), precision=2))

    return offending_nids
示例#15
0
def flann_index_time_experiment():
    r"""

    Shows a plot of how long it takes to build a flann index for a given number of KD-trees

    CommandLine:
        python -m vtool.nearest_neighbors --test-flann_index_time_experiment

    Example:
        >>> # SLOW_DOCTEST
        >>> from vtool.nearest_neighbors import *  # NOQA
        >>> result = flann_index_time_experiment()
        >>> print(result)
    """
    import vtool as vt
    import pyflann
    import itertools

    class TestDataPool(object):
        """
        Perform only a few allocations of test data
        """
        def __init__(self):
            self.num = 10000
            self.data_pool = None
            self.alloc_pool(1000000)

        def alloc_pool(self, num):
            print('[alloc] num = %r' % (num,))
            self.num = num
            self.data_pool = vt.tests.dummy.testdata_dummy_sift(num)
            print('[alloc] object size ' + ut.get_object_size_str(self.data_pool, 'data_pool'))

        def get_testdata(self, num):
            if len(self.data_pool) < num:
                self.alloc_pool(2 * self.num)
            return self.data_pool[0:num]

    pool = TestDataPool()

    def get_buildtime_data(**kwargs):
        flann_params = vt.get_flann_params(**kwargs)
        print('flann_params = %r' % (ut.dict_str(flann_params),))
        data_list = []
        num = 1000
        print('-----')
        for count in ut.ProgressIter(itertools.count(), nTotal=-1, freq=1, autoadjust=False):
            num = int(num * 1.2)
            print('num = %r' % (num,))
            #if num > 1E6:
            #    break
            data = pool.get_testdata(num)
            print('object size ' + ut.get_object_size_str(data, 'data'))
            flann = pyflann.FLANN(**flann_params)
            with ut.Timer(verbose=False) as t:
                flann.build_index(data)
            print('t.ellapsed = %r' % (t.ellapsed,))
            if t.ellapsed > 5 or count > 1000:
                break
            data_list.append((count, num, t.ellapsed))
            print('-----')
        return data_list, flann_params

    data_list1, params1 = get_buildtime_data(trees=1)

    data_list2, params2 = get_buildtime_data(trees=2)

    data_list4, params4 = get_buildtime_data(trees=4)

    data_list8, params8 = get_buildtime_data(trees=8)

    data_list16, params16 = get_buildtime_data(trees=16)

    import plottool as pt

    def plotdata(data_list):
        count_arr = ut.get_list_column(data_list, 1)
        time_arr  = ut.get_list_column(data_list, 2)
        pt.plot2(count_arr, time_arr, marker='-o', equal_aspect=False,
                 x_label='num_vectors', y_label='FLANN build time')

    plotdata(data_list1)
    plotdata(data_list2)
    plotdata(data_list4)
    plotdata(data_list8)
    plotdata(data_list16)

    pt.iup()
示例#16
0
文件: chip.py 项目: Erotemic/vtool
def gridsearch_chipextract():
    r"""
    CommandLine:
        python -m vtool.chip --test-gridsearch_chipextract --show

    Example:
        >>> # GRIDSEARCH
        >>> from vtool.chip import *  # NOQA
        >>> gridsearch_chipextract()
        >>> ut.show_if_requested()
    """
    import cv2
    test_func = extract_chip_from_img
    if False:
        gpath = ut.grab_test_imgpath('carl.jpg')
        bbox = (100, 3, 100, 100)
        theta = 0.0
        new_size = (58, 34)
    else:
        gpath = '/media/raid/work/GZ_Master1/_ibsdb/images/1524525d-2131-8770-d27c-3a5f9922e9e9.jpg'
        bbox = (450, 373, 2062, 1124)
        theta = 0.0
        old_size = bbox[2:4]
        #target_area = 700 ** 2
        target_area = 1200 ** 2
        new_size = get_scaled_sizes_with_area(target_area, [old_size])[0]
        print('old_size = %r' % (old_size,))
        print('new_size = %r' % (new_size,))
        #new_size = (677, 369)
    imgBGR = gtool.imread(gpath)
    args = (imgBGR, bbox, theta, new_size)
    param_info = ut.ParamInfoList('extract_params', [
        ut.ParamInfo('interpolation', cv2.INTER_LANCZOS4,
                     varyvals=[
                         cv2.INTER_LANCZOS4,
                         cv2.INTER_CUBIC,
                         cv2.INTER_LINEAR,
                         cv2.INTER_NEAREST,
                         #cv2.INTER_AREA
                     ],)
    ])
    show_func = None
    # Generalize
    import plottool as pt
    pt.imshow(imgBGR)  # HACK
    cfgdict_list, cfglbl_list = param_info.get_gridsearch_input(defaultslice=slice(0, 10))
    fnum = pt.ensure_fnum(None)
    if show_func is None:
        show_func = pt.imshow
    lbl = ut.get_funcname(test_func)
    cfgresult_list = [
        test_func(*args, **cfgdict)
        for cfgdict in ut.ProgressIter(cfgdict_list, lbl=lbl)
    ]
    onclick_func = None
    ut.interact_gridsearch_result_images(
        show_func, cfgdict_list, cfglbl_list,
        cfgresult_list, fnum=fnum,
        figtitle=lbl, unpack=False,
        max_plots=25, onclick_func=onclick_func)
    pt.iup()
示例#17
0
def gridsearch_chipextract():
    r"""
    CommandLine:
        python -m vtool.chip --test-gridsearch_chipextract --show

    Example:
        >>> # GRIDSEARCH
        >>> from vtool.chip import *  # NOQA
        >>> gridsearch_chipextract()
        >>> ut.show_if_requested()
    """
    import cv2
    test_func = extract_chip_from_img
    if False:
        gpath = ut.grab_test_imgpath('carl.jpg')
        bbox = (100, 3, 100, 100)
        theta = 0.0
        new_size = (58, 34)
    else:
        gpath = '/media/raid/work/GZ_Master1/_ibsdb/images/1524525d-2131-8770-d27c-3a5f9922e9e9.jpg'
        bbox = (450, 373, 2062, 1124)
        theta = 0.0
        old_size = bbox[2:4]
        #target_area = 700 ** 2
        target_area = 1200**2
        new_size = get_scaled_sizes_with_area(target_area, [old_size])[0]
        print('old_size = %r' % (old_size, ))
        print('new_size = %r' % (new_size, ))
        #new_size = (677, 369)
    imgBGR = gtool.imread(gpath)
    args = (imgBGR, bbox, theta, new_size)
    param_info = ut.ParamInfoList(
        'extract_params',
        [
            ut.ParamInfo(
                'interpolation',
                cv2.INTER_LANCZOS4,
                varyvals=[
                    cv2.INTER_LANCZOS4,
                    cv2.INTER_CUBIC,
                    cv2.INTER_LINEAR,
                    cv2.INTER_NEAREST,
                    #cv2.INTER_AREA
                ],
            )
        ])
    show_func = None
    # Generalize
    import plottool as pt
    pt.imshow(imgBGR)  # HACK
    cfgdict_list, cfglbl_list = param_info.get_gridsearch_input(
        defaultslice=slice(0, 10))
    fnum = pt.ensure_fnum(None)
    if show_func is None:
        show_func = pt.imshow
    lbl = ut.get_funcname(test_func)
    cfgresult_list = [
        test_func(*args, **cfgdict)
        for cfgdict in ut.ProgressIter(cfgdict_list, lbl=lbl)
    ]
    onclick_func = None
    ut.interact_gridsearch_result_images(show_func,
                                         cfgdict_list,
                                         cfglbl_list,
                                         cfgresult_list,
                                         fnum=fnum,
                                         figtitle=lbl,
                                         unpack=False,
                                         max_plots=25,
                                         onclick_func=onclick_func)
    pt.iup()