Esempio n. 1
0
def show_toy_distributions(toy_params):
    import vtool as vt
    import wbia.plottool as pt

    pt.ensureqt()
    xdata = np.linspace(0, 8, 1000)
    tp_pdf = vt.gauss_func1d(xdata, **toy_params[True])
    fp_pdf = vt.gauss_func1d(xdata, **toy_params[False])
    pt.plot_probabilities(
        [tp_pdf, fp_pdf],
        ['TP', 'TF'],
        prob_colors=[pt.TRUE_BLUE, pt.FALSE_RED],
        xdata=xdata,
        figtitle='Toy Distributions',
    )
Esempio n. 2
0
def show_top_featmatches(qreq_, cm_list):
    """
    Args:
        qreq_ (wbia.QueryRequest):  query request object with hyper-parameters
        cm_list (list):

    SeeAlso:
        python -m wbia --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST -t best:lnbnn_on=True,lnbnn_normalizer=normlnbnn-test -a default --sephack

        python -m wbia --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 -t best:lnbnn_on=True -a timectrl --sephack
        python -m wbia --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST -t best:lnbnn_on=True -a default:size=30 --sephack
        python -m wbia --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST -t best:K=1,Knorm=5,lnbnn_on=True -a default:size=30 --sephack
        python -m wbia --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST -t best:K=1,Knorm=3,lnbnn_on=True -a default --sephack


    CommandLine:
        python -m wbia.viz.viz_nearest_descriptors --exec-show_top_featmatches --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.viz.viz_nearest_descriptors import *  # NOQA
        >>> import wbia
        >>> cm_list, qreq_ = wbia.testdata_cmlist(defaultdb='PZ_MTEST',
        >>>                                        a=['default:has_none=mother,size=30'])
        >>> show_top_featmatches(qreq_, cm_list)
        >>> ut.quit_if_noshow()
        >>> import wbia.plottool as pt
        >>> ut.show_if_requested()
    """
    # for cm in cm_list:
    #     cm.score_annot_csum(qreq_)
    import numpy as np
    import vtool as vt
    from functools import partial

    # Stack chipmatches
    ibs = qreq_.ibs
    infos = [cm.get_flat_fm_info() for cm in cm_list]
    flat_metadata = dict([(k, np.concatenate(v))
                          for k, v in ut.dict_stack2(infos).items()])
    fsv_flat = flat_metadata['fsv']
    flat_metadata['fs'] = fsv_flat.prod(axis=1)
    aids1 = flat_metadata['aid1'][:, None]
    aids2 = flat_metadata['aid2'][:, None]
    flat_metadata['aid_pairs'] = np.concatenate([aids1, aids2], axis=1)

    # Take sample of metadata
    sortx = flat_metadata['fs'].argsort()[::-1]
    num = len(cm_list) * 3
    # num = 10
    taker = partial(np.take, indices=sortx[:num], axis=0)
    flat_metadata_top = ut.map_dict_vals(taker, flat_metadata)
    aid1s, aid2s, fms = ut.dict_take(flat_metadata_top, ['aid1', 'aid2', 'fm'])

    annots = {}
    aids = np.unique(np.hstack((aid1s, aid2s)))
    annots = {
        aid: ibs.get_annot_lazy_dict(aid, config2_=qreq_.qparams)
        for aid in aids
    }

    label_lists = (ibs.get_match_truths(
        aid1s, aid2s) == ibs.const.EVIDENCE_DECISION.POSITIVE)
    patch_size = 64

    def extract_patches(annots, aid, fxs):
        """ custom_func(lazydict, key, subkeys) for multigroup_lookup """
        annot = annots[aid]
        kpts = annot['kpts']
        rchip = annot['rchip']
        kpts_m = kpts.take(fxs, axis=0)
        warped_patches, warped_subkpts = vt.get_warped_patches(
            rchip, kpts_m, patch_size=patch_size)
        return warped_patches

    data_lists = vt.multigroup_lookup(annots, [aid1s, aid2s], fms.T,
                                      extract_patches)

    import wbia.plottool as pt  # NOQA

    pt.ensureqt()
    import wbia_cnn

    inter = wbia_cnn.draw_results.interact_patches(
        label_lists,
        data_lists,
        flat_metadata_top,
        chunck_sizes=(2, 4),
        ibs=ibs,
        hack_one_per_aid=False,
        sortby='fs',
        qreq_=qreq_,
    )
    inter.show()
Esempio n. 3
0
def crftest():
    """
    pip install pyqpbo
    pip install pystruct

    http://taku910.github.io/crfpp/#install

    cd ~/tmp
    #wget https://drive.google.com/folderview?id=0B4y35FiV1wh7fngteFhHQUN2Y1B5eUJBNHZUemJYQV9VWlBUb3JlX0xBdWVZTWtSbVBneU0&usp=drive_web#list
    7z x CRF++-0.58.tar.gz
    7z x CRF++-0.58.tar
    cd CRF++-0.58
    chmod +x configure
    ./configure
    make

    """
    import pystruct
    import pystruct.models

    inference_method_options = ['lp', 'max-product']
    inference_method = inference_method_options[1]

    # graph = pystruct.models.GraphCRF(
    #    n_states=None,
    #    n_features=None,
    #    inference_method=inference_method,
    #    class_weight=None,
    #    directed=False,
    # )

    num_annots = 5
    num_names = num_annots

    aids = np.arange(5)
    rng = np.random.RandomState(0)
    hidden_nids = rng.randint(0, num_names, num_annots)
    unique_nids, groupxs = ut.group_indices(hidden_nids)

    # Indicator vector indicating the name
    node_features = np.zeros((num_annots, num_names))
    node_features[(aids, hidden_nids)] = 1

    toy_params = {True: {'mu': 1.0, 'sigma': 2.2}, False: {'mu': 7.0, 'sigma': 0.9}}
    if False:
        import vtool as vt
        import wbia.plottool as pt

        pt.ensureqt()
        xdata = np.linspace(0, 100, 1000)
        tp_pdf = vt.gauss_func1d(xdata, **toy_params[True])
        fp_pdf = vt.gauss_func1d(xdata, **toy_params[False])
        pt.plot_probabilities([tp_pdf, fp_pdf], ['TP', 'TF'], xdata=xdata)

    def metric(aidx1, aidx2, hidden_nids=hidden_nids, toy_params=toy_params):
        if aidx1 == aidx2:
            return 0
        rng = np.random.RandomState(int(aidx1 + aidx2))
        same = hidden_nids[int(aidx1)] == hidden_nids[int(aidx2)]
        mu, sigma = ut.dict_take(toy_params[same], ['mu', 'sigma'])
        return np.clip(rng.normal(mu, sigma), 0, np.inf)

    pairwise_aidxs = list(ut.iprod(range(num_annots), range(num_annots)))
    pairwise_labels = np.array(  # NOQA
        [hidden_nids[a1] == hidden_nids[a2] for a1, a2 in pairwise_aidxs]
    )
    pairwise_scores = np.array([metric(*zz) for zz in pairwise_aidxs])
    pairwise_scores_mat = pairwise_scores.reshape(num_annots, num_annots)  # NOQA

    graph = pystruct.models.EdgeFeatureGraphCRF(  # NOQA
        n_states=num_annots,
        n_features=num_names,
        n_edge_features=1,
        inference_method=inference_method,
    )

    import opengm

    numVar = 10
    unaries = np.ones([numVar, 3], dtype=opengm.value_type)
    gm = opengm.gm(np.ones(numVar, dtype=opengm.label_type) * 3)
    unary_fids = gm.addFunctions(unaries)
    gm.addFactors(unary_fids, np.arange(numVar))
    infParam = opengm.InfParam(workflow=ut.ensure_ascii('(IC)(TTC-I,CC-I)'))
    inf = opengm.inference.Multicut(gm, parameter=infParam)
    visitor = inf.verboseVisitor(printNth=1, multiline=False)
    inf.infer(visitor)
    arg = inf.arg()

    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)
    # regularizer = opengm.pottsFunction([3, 3], 0.0, beta)
    # gridVariableIndices = opengm.secondOrderGridVis(img.shape[0], img.shape[1])
    # fid = gm.addFunction(regularizer)
    # gm.addFactors(fid, gridVariableIndices)

    unaries = np.random.rand(10, 10, 2)
    potts = opengm.PottsFunction([2, 2], 0.0, 0.4)
    gm = opengm.grid2d2Order(unaries=unaries, regularizer=potts)

    inf = opengm.inference.GraphCut(gm)
    inf.infer()
    arg = inf.arg()  # NOQA
Esempio n. 4
0
def learn_prob_score(num_scores=5, pad=55, ret_enc=False, use_cache=None):
    r"""
    Args:
        num_scores (int): (default = 5)

    Returns:
        tuple: (discr_domain, discr_p_same)

    CommandLine:
        python -m wbia.unstable.demobayes --exec-learn_prob_score --show

    Example:
        >>> # ENABLE_DOCTEST
        >>> from wbia.unstable.demobayes import *  # NOQA
        >>> num_scores = 2
        >>> (discr_domain, discr_p_same, encoder) = learn_prob_score(num_scores, ret_enc=True, use_cache=False)
        >>> print('discr_p_same = %r' % (discr_p_same,))
        >>> ut.quit_if_noshow()
        >>> import wbia.plottool as pt
        >>> encoder.visualize()
        >>> ut.show_if_requested()
    """
    num_annots_train = 200
    num_names_train = 5
    toy_data = get_toy_data_1v1(num_annots_train, num_names_train)
    # pairwise_aidxs, pairwise_scores, pairwise_matches = ut.dict_take(
    #    toy_data, 'pairwise_aidxs, pairwise_scores, pairwise_matches'.split(', '))

    diag_scores, diag_labels = ut.dict_take(
        toy_data, 'diag_scores, diag_labels'.split(', '))
    # is_diag = [r < c for r, c, in pairwise_aidxs]
    # diag_scores = pairwise_scores.compress(is_diag)
    # diag_labels = pairwise_matches.compress(is_diag)

    # Learn P(S_{ij} | M_{ij})
    import vtool as vt

    encoder = vt.ScoreNormalizer(
        reverse=True,
        monotonize=True,
        adjust=4,
    )
    encoder.fit(X=diag_scores, y=diag_labels, verbose=True)

    if False:
        import wbia.plottool as pt

        pt.ensureqt()
        encoder.visualize()
        # show_toy_distributions()

    def discretize_probs(encoder):
        p_tp_given_score = encoder.p_tp_given_score / encoder.p_tp_given_score.sum(
        )
        bins = len(p_tp_given_score) - (pad * 2)
        stride = int(np.ceil(bins / num_scores))
        idxs = np.arange(0, bins, stride) + pad
        discr_p_same = p_tp_given_score.take(idxs)
        discr_p_same = discr_p_same / discr_p_same.sum()
        discr_domain = encoder.score_domain.take(idxs)
        return discr_domain, discr_p_same

    discr_domain, discr_p_same = discretize_probs(encoder)
    if ret_enc:
        return discr_domain, discr_p_same, encoder
    return discr_domain, discr_p_same