Пример #1
0
    def show_each_dstncvs_chip(self, dodraw=True):
        """
        CommandLine:
            python -m ibeis.viz.interact.interact_matches --test-show_each_dstncvs_chip --show

        Example:
            >>> # DISABLE_DOCTEST
            >>> from ibeis.viz.interact.interact_matches import *  # NOQA
            >>> self = testdata_match_interact(mx=1)
            >>> self.show_each_dstncvs_chip(dodraw=False)
            >>> pt.show_if_requested()
        """
        dstncvs1, dstncvs2 = scoring.get_kpts_distinctiveness(self.ibs,
                                                              [self.qaid,
                                                               self.daid])
        print('dstncvs1_stats = ' + ut.get_stats_str(dstncvs1))
        print('dstncvs2_stats = ' + ut.get_stats_str(dstncvs2))
        weight_label = 'dstncvs'
        showkw = dict(weight_label=weight_label, ell=False, pts=True)
        viz_chip.show_chip(self.ibs, self.qaid, weights=dstncvs1,
                           fnum=pt.next_fnum(), **showkw)
        viz_chip.show_chip(self.ibs, self.daid, weights=dstncvs2,
                           fnum=pt.next_fnum(), **showkw)
        if dodraw:
            viz.draw()
Пример #2
0
    def show_each_dstncvs_chip(self, dodraw=True):
        """
        CommandLine:
            python -m ibeis.viz.interact.interact_matches --test-show_each_dstncvs_chip --show

        Example:
            >>> # DISABLE_DOCTEST
            >>> from ibeis.viz.interact.interact_matches import *  # NOQA
            >>> self = testdata_match_interact(mx=1)
            >>> self.show_each_dstncvs_chip(dodraw=False)
            >>> pt.show_if_requested()
        """
        dstncvs1, dstncvs2 = scoring.get_kpts_distinctiveness(
            self.ibs, [self.qaid, self.daid])
        print('dstncvs1_stats = ' + ut.get_stats_str(dstncvs1))
        print('dstncvs2_stats = ' + ut.get_stats_str(dstncvs2))
        weight_label = 'dstncvs'
        showkw = dict(weight_label=weight_label, ell=False, pts=True)
        viz_chip.show_chip(self.ibs,
                           self.qaid,
                           weights=dstncvs1,
                           fnum=pt.next_fnum(),
                           **showkw)
        viz_chip.show_chip(self.ibs,
                           self.daid,
                           weights=dstncvs2,
                           fnum=pt.next_fnum(),
                           **showkw)
        if dodraw:
            viz.draw()
Пример #3
0
 def jagged_stats_info(arr_, lbl, col_lbls):
     arr = ut.recursive_replace(arr_, np.inf, np.nan)
     # Treat infinite as nan
     stat_dict = ut.get_jagged_stats(arr, use_nan=True, use_sum=True)
     sel_stat_dict, sel_indices = ut.find_interesting_stats(stat_dict, col_lbls)
     sel_col_lbls = ut.take(col_lbls, sel_indices)
     statstr_kw   = dict(precision=3, newlines=True, lbl=lbl, align=True)
     stat_str     = ut.get_stats_str(stat_dict=stat_dict, **statstr_kw)
     sel_stat_str = ut.get_stats_str(stat_dict=sel_stat_dict, **statstr_kw)
     sel_stat_str = 'sel_col_lbls = %s' % (ut.list_str(sel_col_lbls),) + '\n' + sel_stat_str
     return stat_str, sel_stat_str
Пример #4
0
 def jagged_stats_info(arr_, lbl, col_lbls):
     arr = ut.recursive_replace(arr_, np.inf, np.nan)
     # Treat infinite as nan
     stat_dict = ut.get_jagged_stats(arr, use_nan=True, use_sum=True)
     sel_stat_dict, sel_indices = ut.find_interesting_stats(
         stat_dict, col_lbls)
     sel_col_lbls = ut.take(col_lbls, sel_indices)
     statstr_kw = dict(precision=3, newlines=True, lbl=lbl, align=True)
     stat_str = ut.get_stats_str(stat_dict=stat_dict, **statstr_kw)
     sel_stat_str = ut.get_stats_str(stat_dict=sel_stat_dict, **statstr_kw)
     sel_stat_str = 'sel_col_lbls = %s' % (
         ut.list_str(sel_col_lbls), ) + '\n' + sel_stat_str
     return stat_str, sel_stat_str
Пример #5
0
def check_daid2_sccw(daid2_sccw, verbose=True):
    daid2_sccw_values = daid2_sccw
    assert not np.any(np.isnan(daid2_sccw_values)), 'sccws are nan'
    if verbose:
        print('database sccws are not nan')
        print('database sccw stats:')
        print(ut.get_stats_str(daid2_sccw_values, newlines=True))
Пример #6
0
def print_annotationmatch_scores_map(orgres2_scores):
    print('+-----------------------------')
    print('| CHIPMATCH SCORES:')
    for orgtype, scores in six.iteritems(orgres2_scores):
        print('| orgtype(%r)' % (orgtype, ))
        print('|     scores: %s' % (ut.get_stats_str(scores)))
    print('L-----------------------------')
Пример #7
0
def print_annotationmatch_scores_map(orgres2_scores):
    print('+-----------------------------')
    print('| CHIPMATCH SCORES:')
    for orgtype, scores in six.iteritems(orgres2_scores):
        print('| orgtype(%r)' % (orgtype,))
        print('|     scores: %s' % (ut.get_stats_str(scores)))
    print('L-----------------------------')
Пример #8
0
def check_daid2_sccw(daid2_sccw, verbose=True):
    daid2_sccw_values = daid2_sccw
    assert not np.any(np.isnan(daid2_sccw_values)), 'sccws are nan'
    if verbose:
        print('database sccws are not nan')
        print('database sccw stats:')
        print(ut.get_stats_str(daid2_sccw_values, newlines=True))
Пример #9
0
def display_info(ibs, invindex, annots_df):
    from vtool import clustering2 as clustertool
    ################
    from ibeis.other import dbinfo
    print(ibs.get_infostr())
    dbinfo.get_dbinfo(ibs, verbose=True)
    ################
    print('Inverted Index Stats: vectors per word')
    print(ut.get_stats_str(map(len, invindex.wx2_idxs.values())))
    ################
    #qfx2_vec     = annots_df['vecs'][1]
    centroids = invindex.words
    num_pca_dims = 2  # 3
    whiten = False
    kwd = dict(
        num_pca_dims=num_pca_dims,
        whiten=whiten,
    )

    #clustertool.rrr()
    def makeplot_(fnum, prefix, data, labels='centroids', centroids=centroids):
        return clustertool.plot_centroids(data,
                                          centroids,
                                          labels=labels,
                                          fnum=fnum,
                                          prefix=prefix + '\n',
                                          **kwd)

    makeplot_(1, 'centroid vecs', centroids)
Пример #10
0
def print_desc_distances_map(orgres2_distmap):
    print('+-----------------------------')
    print('| DESCRIPTOR MATCHE DISTANCES:')
    for orgtype, distmap in six.iteritems(orgres2_distmap):
        print('| orgtype(%r)' % (orgtype,))
        for disttype, dists in six.iteritems(distmap):
            print('|     disttype(%12r): %s' % (disttype, ut.get_stats_str(dists)))
    print('L-----------------------------')
Пример #11
0
def print_desc_distances_map(orgres2_distmap):
    print('+-----------------------------')
    print('| DESCRIPTOR MATCHE DISTANCES:')
    for orgtype, distmap in six.iteritems(orgres2_distmap):
        print('| orgtype(%r)' % (orgtype, ))
        for disttype, dists in six.iteritems(distmap):
            print('|     disttype(%12r): %s' %
                  (disttype, ut.get_stats_str(dists)))
    print('L-----------------------------')
Пример #12
0
 def parse_img_from_arg(argstr_):
     fpath = ut.get_argval(argstr_, type_=str, default='None')
     if fpath is not None and fpath != 'None':
         img = vt.imread(fpath, grayscale=True)
         print('Reading %s with stats %s' % (fpath, ut.get_stats_str(img, axis=None)))
     else:
         print('Did not read %s' % (fpath))
         img = None
     return img
Пример #13
0
 def parse_img_from_arg(argstr_):
     fpath = ut.get_argval(argstr_, type_=str, default='None')
     if fpath is not None and fpath != 'None':
         img = vt.imread(fpath, grayscale=True)
         print('Reading %s with stats %s' %
               (fpath, ut.get_stats_str(img, axis=None)))
     else:
         print('Did not read %s' % (fpath))
         img = None
     return img
Пример #14
0
def learn_k():
    r"""
    CommandLine:
        python -m ibeis.other.optimize_k --test-learn_k
        python -m ibeis.other.optimize_k --test-learn_k --show
        python -m ibeis.other.optimize_k --test-learn_k --show --dummy

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.other.optimize_k import *  # NOQA
        >>> import plottool as pt
        >>> # build test data
        >>> # execute function
        >>> known_nd_data, known_target_points, given_data_dims, opt_model_params = learn_k()
        >>> # verify results
        >>> ut.quit_if_noshow()
        >>> plot_search_surface(known_nd_data, known_target_points, given_data_dims, opt_model_params)
        >>> pt.all_figures_bring_to_front()
        >>> pt.show_if_requested()
    """
    # Compute Training Data
    varydict = {
        #'K': [4, 7, 10, 13, 16, 19, 22, 25][:4],
        #'K': [1, 2, 3, 4, 8, 10, 13, 15],
        'K': [1, 2, 4, 8, 16],
        #'nDaids': [20, 100, 250, 500, 750, 1000],
    }
    nDaids_basis = [20, 30, 50, 75, 100, 200, 250, 300, 325, 350, 400, 500, 600, 750, 800, 900, 1000, 1500]
    DUMMY = ut.get_argflag('--dummy')
    if DUMMY:
        nDaids_list, K_list, nError_list = test_training_data(varydict, nDaids_basis)
        nError_list = nError_list.astype(np.float32) / nError_list.max()
    else:
        dbname = ut.get_argval('--db', default='PZ_Master0')
        ibs = ibeis.opendb(dbname)
        verbose = False
        qaids, daids_list = collect_ibeis_training_annotations(ibs, nDaids_basis, verbose=verbose)
        nDaids_list, K_list, nError_list = evaluate_training_data(ibs, qaids, daids_list, varydict, nDaids_basis, verbose=verbose)
        nError_list = nError_list.astype(np.float32) / len(qaids)
        print('\nFinished Get Training Data')
        print('len(qaids) = %r' % (len(qaids)))
        print(ut.get_stats_str(nError_list))

    #unique_nDaids = np.unique(nDaids_list)

    # Alias to general optimization problem
    known_nd_data = np.vstack([nDaids_list, K_list]).T
    known_target_points = nError_list
    # Mark the data we are given vs what we want to learn
    given_data_dims = [0]
    #learn_data_dims = [1]

    # Minimize K params
    opt_model_params = minimize_compute_K_params(known_nd_data, known_target_points, given_data_dims)
    return known_nd_data, known_target_points, given_data_dims, opt_model_params
Пример #15
0
 def plot_weights(self, event=None):
     scalars = self.infr.get_scalars()
     import plottool as pt
     inter = pt.ExpandableInteraction(fnum=1)
     for px, (key, vals) in enumerate(scalars.items()):
         print(key + ' = ' + ut.get_stats_str(vals, use_nan=True))
         args = (np.arange(len(vals)), sorted(vals))
         kw = dict(title=key, y_label=key, marker='-o', equal_aspect=False)
         inter.append_partial(pt.plot2, *args, **kw)
     inter.start()
     inter.show_page()
     pt.update()
Пример #16
0
    def get_inspect_str(qres, ibs=None, name_scoring=False):
        qres.assert_self()
        #ut.embed()

        top_lbls = [' top aids', ' scores', ' rawscores', ' ranks']

        top_aids   = np.array(qres.get_top_aids(num=6, name_scoring=name_scoring, ibs=ibs), dtype=np.int32)
        top_scores = np.array(qres.get_aid_scores(top_aids), dtype=np.float64)
        top_rawscores = np.array(qres.get_aid_scores(top_aids, rawscore=True), dtype=np.float64)
        top_ranks  = np.array(qres.get_aid_ranks(top_aids), dtype=np.int32)
        top_list   = [top_aids, top_scores, top_rawscores, top_ranks]

        if ibs is not None:
            top_lbls += [' isgt']
            istrue = qres.get_aid_truth(ibs, top_aids)
            top_list.append(np.array(istrue, dtype=np.int32))
        if name_scoring:
            top_lbls = ['top nid'] + top_lbls
            top_list = [ibs.get_annot_name_rowids(top_aids)] + top_list

        top_stack = np.vstack(top_list)
        #top_stack = np.array(top_stack, dtype=object)
        top_stack = np.array(top_stack, dtype=np.float32)
        #np.int32)
        top_str = np.array_str(top_stack, precision=3, suppress_small=True, max_line_width=200)

        top_lbl = '\n'.join(top_lbls)
        inspect_list = ['QueryResult',
                        qres.cfgstr,
                        ]
        if ibs is not None:
            gt_ranks  = qres.get_gt_ranks(ibs=ibs)
            gt_scores = qres.get_gt_scores(ibs=ibs)
            inspect_list.append('gt_ranks = %r' % gt_ranks)
            inspect_list.append('gt_scores = %r' % gt_scores)

        nFeatMatch_list = get_num_feats_in_matches(qres)
        nFeatMatch_stats_str = ut.get_stats_str(nFeatMatch_list, newlines=True, exclude_keys=('nMin', 'nMax'))

        inspect_list.extend([
            'qaid=%r ' % qres.qaid,
            ut.hz_str(top_lbl, ' ', top_str),
            'num feat matches per annotation stats:',
            #ut.indent(ut.dict_str(nFeatMatch_stats)),
            ut.indent(nFeatMatch_stats_str),
        ])

        inspect_str = '\n'.join(inspect_list)

        #inspect_str = ut.indent(inspect_str, '[INSPECT] ')
        return inspect_str
Пример #17
0
    def get_inspect_str(qres, ibs=None, name_scoring=False):
        qres.assert_self()
        #ut.embed()

        top_lbls = [' top aids', ' scores', ' rawscores', ' ranks']

        top_aids   = np.array(qres.get_top_aids(num=6, name_scoring=name_scoring, ibs=ibs), dtype=np.int32)
        top_scores = np.array(qres.get_aid_scores(top_aids), dtype=np.float64)
        top_rawscores = np.array(qres.get_aid_scores(top_aids, rawscore=True), dtype=np.float64)
        top_ranks  = np.array(qres.get_aid_ranks(top_aids), dtype=np.int32)
        top_list   = [top_aids, top_scores, top_rawscores, top_ranks]

        if ibs is not None:
            top_lbls += [' isgt']
            istrue = qres.get_aid_truth(ibs, top_aids)
            top_list.append(np.array(istrue, dtype=np.int32))
        if name_scoring:
            top_lbls = ['top nid'] + top_lbls
            top_list = [ibs.get_annot_name_rowids(top_aids)] + top_list

        top_stack = np.vstack(top_list)
        #top_stack = np.array(top_stack, dtype=object)
        top_stack = np.array(top_stack, dtype=np.float32)
        #np.int32)
        top_str = np.array_str(top_stack, precision=3, suppress_small=True, max_line_width=200)

        top_lbl = '\n'.join(top_lbls)
        inspect_list = ['QueryResult',
                        qres.cfgstr,
                        ]
        if ibs is not None:
            gt_ranks  = qres.get_gt_ranks(ibs=ibs)
            gt_scores = qres.get_gt_scores(ibs=ibs)
            inspect_list.append('gt_ranks = %r' % gt_ranks)
            inspect_list.append('gt_scores = %r' % gt_scores)

        nFeatMatch_list = get_num_feats_in_matches(qres)
        nFeatMatch_stats_str = ut.get_stats_str(nFeatMatch_list, newlines=True, exclude_keys=('nMin', 'nMax'))

        inspect_list.extend([
            'qaid=%r ' % qres.qaid,
            ut.hz_str(top_lbl, ' ', top_str),
            'num feat matches per annotation stats:',
            #ut.indent(ut.dict_str(nFeatMatch_stats)),
            ut.indent(nFeatMatch_stats_str),
        ])

        inspect_str = '\n'.join(inspect_list)

        #inspect_str = ut.indent(inspect_str, '[INSPECT] ')
        return inspect_str
Пример #18
0
def display_info(ibs, invindex, annots_df):
    from vtool import clustering2 as clustertool
    ################
    from ibeis.other import dbinfo
    print(ibs.get_infostr())
    dbinfo.get_dbinfo(ibs, verbose=True)
    ################
    print('Inverted Index Stats: vectors per word')
    print(ut.get_stats_str(map(len, invindex.wx2_idxs.values())))
    ################
    #qfx2_vec     = annots_df['vecs'][1]
    centroids    = invindex.words
    num_pca_dims = 2  # 3
    whiten       = False
    kwd = dict(num_pca_dims=num_pca_dims,
               whiten=whiten,)
    #clustertool.rrr()
    def makeplot_(fnum, prefix, data, labels='centroids', centroids=centroids):
        return clustertool.plot_centroids(data, centroids, labels=labels,
                                          fnum=fnum, prefix=prefix + '\n', **kwd)
    makeplot_(1, 'centroid vecs', centroids)
Пример #19
0
    def select_ith_match(self, mx):
        """
        Selects the ith match and visualizes and prints information concerning
        features weights, keypoint details, and sift descriptions
        """
        import plottool as pt
        from plottool import viz_featrow
        from plottool import interact_helpers as ih

        # <CLOSURE VARS>
        fnum = self.fnum
        same_fig = self.same_fig
        rchip1 = self.rchip1
        rchip2 = self.rchip2
        # </CLOSURE VARS>
        self.mx = mx
        print("+--- SELECT --- ")
        print("... selecting mx-th=%r feature match" % mx)
        fsv = self.fsv
        fs = self.fs
        print("score stats:")
        print(ut.get_stats_str(fsv, axis=0, newlines=True))
        print("fsv[mx] = %r" % (fsv[mx],))
        print("fs[mx] = %r" % (fs[mx],))
        # ----------------------
        # Get info for the select_ith_match plot
        self.mode = 1
        # Get the mx-th feature match
        fx1, fx2 = self.fm[mx]

        # Older info
        fscore2 = self.fs[mx]
        fk2 = None
        kp1, kp2 = self.kpts1[fx1], self.kpts2[fx2]
        vecs1, vecs2 = self.vecs1[fx1], self.vecs2[fx2]
        info1 = "\nquery"
        info2 = "\nk=%r fscore=%r" % (fk2, fscore2)
        # self.last_fx = fx1
        self.last_fx = fx1

        # Extracted keypoints to draw
        extracted_list = [(rchip1, kp1, vecs1, fx1, "aid1", info1), (rchip2, kp2, vecs2, fx2, "aid2", info2)]
        # Normalizng Keypoint
        # if hasattr(cm, 'filt2_meta') and 'lnbnn' in cm.filt2_meta:
        #    qfx2_norm = cm.filt2_meta['lnbnn']
        #    # Normalizing chip and feature
        #    (aid3, fx3, normk) = qfx2_norm[fx1]
        #    rchip3 = ibs.get_annot_chips(aid3)
        #    kp3 = ibs.get_annot_kpts(aid3)[fx3]
        #    sift3 = ibs.get_annot_vecs(aid3)[fx3]
        #    info3 = '\nnorm %s k=%r' % (vh.get_aidstrs(aid3), normk)
        #    extracted_list.append((rchip3, kp3, sift3, fx3, aid3, info3))
        # else:
        #    pass
        #    #print('WARNING: meta doesnt exist')

        # ----------------------
        # Draw the select_ith_match plot
        nRows, nCols = len(extracted_list) + same_fig, 3
        # Draw matching chips and features
        sel_fm = np.array([(fx1, fx2)])
        pnum1 = (nRows, 1, 1) if same_fig else (1, 1, 1)
        vert = self.vert if self.vert is not None else False
        self.chipmatch_view(pnum=pnum1, ell_alpha=0.4, ell_linewidth=1.8, colors=pt.BLUE, sel_fm=sel_fm, vert=vert)
        # Draw selected feature matches
        px = nCols * same_fig  # plot offset
        prevsift = None
        if not same_fig:
            # fnum2 = fnum + len(viz.FNUMS)
            fnum2 = self.fnum2
            fig2 = pt.figure(fnum=fnum2, docla=True, doclf=True)
        else:
            fnum2 = fnum

        for (rchip, kp, sift, fx, aid, info) in extracted_list:
            px = viz_featrow.draw_feat_row(
                rchip, fx, kp, sift, fnum2, nRows, nCols, px, prevsift=prevsift, aid=aid, info=info
            )
            prevsift = sift
        if not same_fig:
            ih.connect_callback(fig2, "button_press_event", self.on_click)
Пример #20
0
def viz_annot_with_metrics(ibs, invindex, aid, metrics,
                           metric_keys=['wx2_nMembers',
                                        ('wx2_pdist_stats', 'mean'),
                                        ('wx2_wdist_stats', 'mean')],
                           show_orig=True,
                           show_idf=True,
                           show_words=False,
                           show_analysis=True,
                           show_aveprecision=True,
                           show_featweights=True,
                           qfx2_closest_k_list=None,
                           show_word_correct_assignments=False,
                           qres_list=None):
    """
    Args:
        ibs (IBEISController):
        invindex (InvertedIndex): object for fast vocab lookup
        aid (int):
        metrics (namedtuple):

    Example:
        >>> from ibeis.algo.hots.smk.smk_plots import *  # NOQA
        >>> from ibeis.algo.hots.smk import smk_debug
        >>> from ibeis.algo.hots.smk import smk_repr
        >>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=64000)
        >>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=8000)
        >>> #tup = smk_debug.testdata_raw_internals0(db='PZ_Master0', nWords=64000)
        >>> tup = smk_debug.testdata_raw_internals0(db='PZ_Mothers', nWords=8000)
        >>> ibs, annots_df, daids, qaids, invindex, qreq_ = tup
        >>> smk_repr.compute_data_internals_(invindex, qreq_.qparams, delete_rawvecs=False)
        >>> invindex.idx2_wxs = np.array(invindex.idx2_wxs)
        >>> metric_keys=['wx2_nMembers', ('wx2_pdist_stats', 'mean'), ('wx2_wdist_stats', 'mean')]
        >>> metrics = compute_word_metrics(invindex)
        >>> aid = 1

    """
    #viz_chip.rrr()
    #df2.rrr()
    kpts = ibs.get_annot_kpts(aid)
    if ut.VERBOSE:
        ut.super_print(kpts)

    if show_word_correct_assignments or show_idf:
        # Get only the first assigned word
        # FIXME: need to look at multi-assignment
        _mask = invindex.idx2_daid == aid
        fxs = invindex.idx2_dfx[_mask]
        wxs = invindex.idx2_wxs[_mask].T[0].T

        assert len(fxs) == len(kpts)
        assert len(fxs) == len(wxs)

    fnum = 1

    dbname = ibs.get_dbname()
    def _plot(metric, fnum=1, lbl='', annote=True, darken=.1, colortype='score', **kwargs):
        print('ploting fnum=%r' % fnum)
        #lblaug = ' db=%r, nWords = %r' % (dbname, nWords)
        lblaug = ' db=%r' % (dbname)
        figtitle = lbl + lblaug
        lbl = lbl
        plot_chip_metric(ibs, aid, metric=metric, fnum=fnum, lbl=lbl, figtitle=figtitle,
                         annote=annote, darken=darken, colortype=colortype, **kwargs)
        return fnum + 1

    # Original Plot
    if show_orig:
        fnum = _plot(None, fnum=fnum, lbl='Orig Chip', annote=False, darken=None)

    # IDF Plot
    if show_idf:
        idf_list = np.array(list(ut.dict_take_gen(invindex.wx2_idf, wxs)))
        fnum = _plot(idf_list, fnum=fnum, lbl='IDF')
        print('stats(idf_list) = ' + ut.get_stats_str(idf_list))

    # Word Plot
    if show_words:
        fnum = _plot(wxs, fnum=fnum, lbl='Words', colortype='label')

    # LNBNN Result Plots
    if qfx2_closest_k_list is not None:
        for qres, qfx2_closest_k in zip(qres_list, qfx2_closest_k_list):
            print('  --- qres item ---')
            if qres is not None:
                from ibeis.algo.hots.hots_query_result import QueryResult
                assert isinstance(qres, QueryResult)
                if show_analysis:
                    qres.show_analysis(ibs=ibs, fnum=fnum, figtitle=qres.make_smaller_title())
                    fnum += 1
                if show_aveprecision:
                    qres.show_precision_recall_curve(ibs=ibs, fnum=fnum)
                    fnum += 1

            if qfx2_closest_k is not None:
                # Plot ranked positions
                qfx2_closest_k = np.array(qfx2_closest_k)
                qfx2_closest_k_qeq0 = qfx2_closest_k[qfx2_closest_k >= 0]
                qfx2_closest_k_lt0  = qfx2_closest_k[qfx2_closest_k < 0]
                print('stats(qfx2_closest_k_qeq0) = ' + ut.get_stats_str(qfx2_closest_k_qeq0))
                print('stats(qfx2_closest_k_lt0)  = ' + ut.get_stats_str(qfx2_closest_k_lt0))
                fnum = _plot(qfx2_closest_k, fnum=fnum, lbl='Correct Ranks ' + qres.make_smaller_title(), colortype='custom', reverse_cmap=True)

    # Correct word assignment plots
    if show_word_correct_assignments:
        unique_wxs, unique_inverse = np.unique(wxs, return_inverse=True)
        # Get the aids that belong to each word
        _idxs_list = ut.dict_take(invindex.wx2_idxs, unique_wxs)
        _aids_list = [invindex.idx2_daid.take(idxs) for idxs in _idxs_list]
        # Check if this word will provide a correct assignment -
        # two ground truth chip exist within the same word
        gt_aids = np.array(ibs.get_annot_groundtruth(aid))
        _hastp_list = np.array([len(np.intersect1d(aids, gt_aids)) > 0 for aids in _aids_list])
        # Map back to the space of features
        # mark each feature match as having a correct word mapping or not
        hascorrectmatch = _hastp_list[unique_inverse]
        hascorrectmatch_ = hascorrectmatch.astype(np.int32) * 3 - 2
        lbl = 'Correct Words ' + qres.make_smaller_title() + '\n Yellow means the word contains a correct match in the word\'s invindex. Blue is the opposite.'
        fnum = _plot(hascorrectmatch_, fnum=fnum, lbl=lbl, colortype='custom', reverse_cmap=False)

    # Feature Weight Plots
    if show_featweights:
        from ibeis.algo.preproc import preproc_featweight
        featweights = preproc_featweight.compute_fgweights(ibs, [aid])[0]
        # plot rf feature weights
        detect_cfgstr = ibs.cfg.detect_cfg.get_cfgstr()
        fnum = _plot(featweights, fnum=fnum, lbl='Feature Weights ' + detect_cfgstr, colortype='score')

    # Word Metric Plots
    for count, metrickey in enumerate(metric_keys):
        if isinstance(metrickey, tuple):
            #lbl = repr(metrickey)
            def fixstr(str_):
                return str_.replace('wx2_', '').replace('_stats', '')
            lbl = '%s(%s)' % (metrickey[1].upper(), fixstr(metrickey[0]))
        else:
            lbl = str(metrickey)
        metric_list = metric_clamped_stat(metrics, wxs, metrickey)
        fnum = _plot(metric_list, fnum=fnum, lbl=lbl)
Пример #21
0
def learn_k():
    r"""
    CommandLine:
        python -m ibeis.other.optimize_k --test-learn_k
        python -m ibeis.other.optimize_k --test-learn_k --show
        python -m ibeis.other.optimize_k --test-learn_k --show --dummy

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.other.optimize_k import *  # NOQA
        >>> import plottool as pt
        >>> # build test data
        >>> # execute function
        >>> known_nd_data, known_target_points, given_data_dims, opt_model_params = learn_k()
        >>> # verify results
        >>> ut.quit_if_noshow()
        >>> plot_search_surface(known_nd_data, known_target_points, given_data_dims, opt_model_params)
        >>> pt.all_figures_bring_to_front()
        >>> pt.show_if_requested()
    """
    # Compute Training Data
    varydict = {
        #'K': [4, 7, 10, 13, 16, 19, 22, 25][:4],
        #'K': [1, 2, 3, 4, 8, 10, 13, 15],
        'K': [1, 2, 4, 8, 16],
        #'nDaids': [20, 100, 250, 500, 750, 1000],
    }
    nDaids_basis = [
        20, 30, 50, 75, 100, 200, 250, 300, 325, 350, 400, 500, 600, 750, 800,
        900, 1000, 1500
    ]
    DUMMY = ut.get_argflag('--dummy')
    if DUMMY:
        nDaids_list, K_list, nError_list = test_training_data(
            varydict, nDaids_basis)
        nError_list = nError_list.astype(np.float32) / nError_list.max()
    else:
        dbname = ut.get_argval('--db', default='PZ_Master0')
        ibs = ibeis.opendb(dbname)
        verbose = False
        qaids, daids_list = collect_ibeis_training_annotations(ibs,
                                                               nDaids_basis,
                                                               verbose=verbose)
        nDaids_list, K_list, nError_list = evaluate_training_data(
            ibs, qaids, daids_list, varydict, nDaids_basis, verbose=verbose)
        nError_list = nError_list.astype(np.float32) / len(qaids)
        print('\nFinished Get Training Data')
        print('len(qaids) = %r' % (len(qaids)))
        print(ut.get_stats_str(nError_list))

    #unique_nDaids = np.unique(nDaids_list)

    # Alias to general optimization problem
    known_nd_data = np.vstack([nDaids_list, K_list]).T
    known_target_points = nError_list
    # Mark the data we are given vs what we want to learn
    given_data_dims = [0]
    #learn_data_dims = [1]

    # Minimize K params
    opt_model_params = minimize_compute_K_params(known_nd_data,
                                                 known_target_points,
                                                 given_data_dims)
    return known_nd_data, known_target_points, given_data_dims, opt_model_params
Пример #22
0
    def select_ith_match(self, mx):
        """
        Selects the ith match and visualizes and prints information concerning
        features weights, keypoint details, and sift descriptions

        Args:
            mx (int) - the ith match to visualize
            qaid (int) - query annotation id
            aid (int) - database annotation id

        CommandLine:
            python -m ibeis.viz.interact.interact_matches --test-select_ith_match --show

        Example:
            >>> # DISABLE_DOCTEST
            >>> from ibeis.viz.interact.interact_matches import *  # NOQA
            >>> self = testdata_match_interact(mx=1)
            >>> pt.show_if_requested()
        """
        ibs = self.ibs
        qaid = self.qaid
        aid = self.daid
        fnum = self.fnum
        figtitle = self.figtitle
        rchip1 = self.rchip1
        rchip2 = self.rchip2
        aid = self.daid
        same_fig = self.same_fig
        self.mx = mx
        print('+--- SELECT --- ')
        print('qaid=%r, daid=%r' % (qaid, aid))
        print('... selecting mx-th=%r feature match' % mx)
        if False:
            print('score stats:')
            print(ut.get_stats_str(self.fsv, axis=0, newlines=True))
            print('fsv[mx] = %r' % (self.fsv[mx], ))
            print('fs[mx] = %r' % (self.fs[mx], ))
        """
        # test feature weights of actual chips
        fx1, fx2 = fm[mx]
        daid = aid
        ibs.get_annot_fgweights([daid])[0][fx2]
        ibs.get_annot_fgweights([qaid])[0][fx1]
        """
        #----------------------
        # Get info for the select_ith_match plot
        self.mode = 1
        # Get the mx-th feature match
        fx1, fx2 = self.fm[mx]
        fscore2 = self.fs[mx]
        fk2 = self.fk[mx]
        kpts1 = ibs.get_annot_kpts([self.qaid],
                                   config2_=self.query_config2_)[0]
        kpts2 = ibs.get_annot_kpts([self.daid], config2_=self.data_config2_)[0]
        desc1 = ibs.get_annot_vecs([self.qaid],
                                   config2_=self.query_config2_)[0]
        desc2 = ibs.get_annot_vecs([self.daid], config2_=self.data_config2_)[0]
        kp1, kp2 = kpts1[fx1], kpts2[fx2]
        sift1, sift2 = desc1[fx1], desc2[fx2]
        info1 = '\nquery'
        info2 = '\nk=%r fscore=%r' % (fk2, fscore2)
        #last_state.last_fx = fx1
        self.last_fx = fx1
        # Extracted keypoints to draw
        extracted_list = [(rchip1, kp1, sift1, fx1, self.qaid, info1),
                          (rchip2, kp2, sift2, fx2, self.daid, info2)]
        # Normalizng Keypoint
        #if hasattr(cm, 'filt2_meta') and 'lnbnn' in cm.filt2_meta:
        #    qfx2_norm = cm.filt2_meta['lnbnn']
        #    # Normalizing chip and feature
        #    (aid3, fx3, normk) = qfx2_norm[fx1]
        #    rchip3 = ibs.get_annot_chips(aid3)
        #    kp3 = ibs.get_annot_kpts(aid3)[fx3]
        #    sift3 = ibs.get_annot_vecs(aid3)[fx3]
        #    info3 = '\nnorm %s k=%r' % (vh.get_aidstrs(aid3), normk)
        #    extracted_list.append((rchip3, kp3, sift3, fx3, aid3, info3))
        #else:
        #    pass
        # print('WARNING: meta doesnt exist')

        #----------------------
        # Draw the select_ith_match plot
        nRows, nCols = len(extracted_list) + same_fig, 3
        # Draw matching chips and features
        sel_fm = np.array([(fx1, fx2)])
        pnum1 = (nRows, 1, 1) if same_fig else (1, 1, 1)
        vert = self.vert if self.vert is not None else False
        self.chipmatch_view(pnum1,
                            ell_alpha=.4,
                            ell_linewidth=1.8,
                            colors=df2.BLUE,
                            sel_fm=sel_fm,
                            vert=vert)
        # Draw selected feature matches
        px = nCols * same_fig  # plot offset
        prevsift = None
        if not same_fig:
            #fnum2 = fnum + len(viz.FNUMS)
            fnum2 = self.fnum2
            fig2 = df2.figure(fnum=fnum2, docla=True, doclf=True)
        else:
            fnum2 = fnum
        for (rchip, kp, sift, fx, aid, info) in extracted_list:
            px = viz_featrow.draw_feat_row(rchip,
                                           fx,
                                           kp,
                                           sift,
                                           fnum2,
                                           nRows,
                                           nCols,
                                           px,
                                           prevsift=prevsift,
                                           aid=aid,
                                           info=info)
            prevsift = sift
        if not same_fig:
            ih.connect_callback(fig2, 'button_press_event', self.on_click)
            df2.set_figtitle(figtitle + vh.get_vsstr(qaid, aid))
Пример #23
0
 def print_weights(self, event=None):
     scalars = self.infr.get_scalars()
     for px, (key, vals) in enumerate(scalars.items()):
         print(key + ' = ' + ut.get_stats_str(vals, use_nan=True))
Пример #24
0
 def imgstats(img):
     print('stats:')
     print('    dtype = %r ' % (img.dtype,))
     print('    ' + ut.get_stats_str(img, axis=None))
Пример #25
0
 def imgstats(img):
     print('stats:')
     print('    dtype = %r ' % (img.dtype, ))
     print('    ' + ut.get_stats_str(img, axis=None))
Пример #26
0
    def select_ith_match(self, mx):
        """
        Selects the ith match and visualizes and prints information concerning
        features weights, keypoint details, and sift descriptions
        """
        import plottool as pt
        from plottool import viz_featrow
        from plottool import interact_helpers as ih
        # <CLOSURE VARS>
        fnum       = self.fnum
        same_fig   = self.same_fig
        rchip1     = self.rchip1
        rchip2     = self.rchip2
        # </CLOSURE VARS>
        self.mx    = mx
        print('+--- SELECT --- ')
        print('... selecting mx-th=%r feature match' % mx)
        fsv = self.fsv
        fs  = self.fs
        print('score stats:')
        print(ut.get_stats_str(fsv, axis=0, newlines=True))
        print('fsv[mx] = %r' % (fsv[mx],))
        print('fs[mx] = %r' % (fs[mx],))
        #----------------------
        # Get info for the select_ith_match plot
        self.mode = 1
        # Get the mx-th feature match
        fx1, fx2 = self.fm[mx]

        # Older info
        fscore2  = self.fs[mx]
        fk2      = None
        kp1, kp2     = self.kpts1[fx1], self.kpts2[fx2]
        vecs1, vecs2 = self.vecs1[fx1], self.vecs2[fx2]
        info1 = '\nquery'
        info2 = '\nk=%r fscore=%r' % (fk2, fscore2)
        #self.last_fx = fx1
        self.last_fx = fx1

        # Extracted keypoints to draw
        extracted_list = [(rchip1, kp1, vecs1, fx1, 'aid1', info1),
                          (rchip2, kp2, vecs2, fx2, 'aid2', info2)]
        # Normalizng Keypoint
        #if hasattr(cm, 'filt2_meta') and 'lnbnn' in cm.filt2_meta:
        #    qfx2_norm = cm.filt2_meta['lnbnn']
        #    # Normalizing chip and feature
        #    (aid3, fx3, normk) = qfx2_norm[fx1]
        #    rchip3 = ibs.get_annot_chips(aid3)
        #    kp3 = ibs.get_annot_kpts(aid3)[fx3]
        #    sift3 = ibs.get_annot_vecs(aid3)[fx3]
        #    info3 = '\nnorm %s k=%r' % (vh.get_aidstrs(aid3), normk)
        #    extracted_list.append((rchip3, kp3, sift3, fx3, aid3, info3))
        #else:
        #    pass
        #    #print('WARNING: meta doesnt exist')

        #----------------------
        # Draw the select_ith_match plot
        nRows, nCols = len(extracted_list) + same_fig, 3
        # Draw matching chips and features
        sel_fm = np.array([(fx1, fx2)])
        pnum1 = (nRows, 1, 1) if same_fig else (1, 1, 1)
        vert = self.vert if self.vert is not None else False
        self.chipmatch_view(pnum=pnum1, ell_alpha=.4, ell_linewidth=1.8,
                            colors=pt.BLUE, sel_fm=sel_fm, vert=vert)
        # Draw selected feature matches
        px = nCols * same_fig  # plot offset
        prevsift = None
        if not same_fig:
            #fnum2 = fnum + len(viz.FNUMS)
            fnum2 = self.fnum2
            fig2 = pt.figure(fnum=fnum2, docla=True, doclf=True)
        else:
            fnum2 = fnum

        for (rchip, kp, sift, fx, aid, info) in extracted_list:
            px = viz_featrow.draw_feat_row(
                rchip, fx, kp, sift, fnum2, nRows, nCols, px,
                prevsift=prevsift, aid=aid, info=info)
            prevsift = sift
        if not same_fig:
            ih.connect_callback(fig2, 'button_press_event', self.on_click)
def flann_add_time_experiment():
    """
    builds plot of number of annotations vs indexer build time.

    TODO: time experiment

    CommandLine:
        python -m wbia.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --show
        python -m wbia.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_Master0 --show
        utprof.py -m wbia.algo.hots._neighbor_experiment --test-flann_add_time_experiment --show

        valgrind --tool=memcheck --suppressions=valgrind-python.supp python -m wbia.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --no-with-reindex

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.algo.hots._neighbor_experiment import *  # NOQA
        >>> import wbia
        >>> #ibs = wbia.opendb('PZ_MTEST')
        >>> result = flann_add_time_experiment()
        >>> # verify results
        >>> print(result)
        >>> ut.show_if_requested()

    """
    import wbia
    import utool as ut
    import numpy as np
    import wbia.plottool as pt

    def make_flann_index(vecs, flann_params):
        flann = pyflann.FLANN()
        flann.build_index(vecs, **flann_params)
        return flann

    db = ut.get_argval('--db')
    ibs = wbia.opendb(db=db)

    # Input
    if ibs.get_dbname() == 'PZ_MTEST':
        initial = 1
        reindex_stride = 16
        addition_stride = 4
        max_ceiling = 120
    elif ibs.get_dbname() == 'PZ_Master0':
        # ibs = wbia.opendb(db='GZ_ALL')
        initial = 32
        reindex_stride = 32
        addition_stride = 16
        max_ceiling = 300001
    else:
        assert False
    # max_ceiling = 32
    all_daids = ibs.get_valid_aids()
    max_num = min(max_ceiling, len(all_daids))
    flann_params = vt.get_flann_params()

    # Output
    count_list, time_list_reindex = [], []
    count_list2, time_list_addition = [], []

    # Setup
    # all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:])
    all_randomize_daids_ = all_daids
    # ensure all features are computed
    ibs.get_annot_vecs(all_randomize_daids_)

    def reindex_step(count, count_list, time_list_reindex):
        daids = all_randomize_daids_[0:count]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        with ut.Timer(verbose=False) as t:
            flann = make_flann_index(vecs, flann_params)  # NOQA
        count_list.append(count)
        time_list_reindex.append(t.ellapsed)

    def addition_step(count, flann, count_list2, time_list_addition):
        daids = all_randomize_daids_[count:count + 1]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        with ut.Timer(verbose=False) as t:
            flann.add_points(vecs)
        count_list2.append(count)
        time_list_addition.append(t.ellapsed)

    def make_initial_index(initial):
        daids = all_randomize_daids_[0:initial + 1]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        flann = make_flann_index(vecs, flann_params)
        return flann

    WITH_REINDEX = not ut.get_argflag('--no-with-reindex')
    if WITH_REINDEX:
        # Reindex Part
        reindex_lbl = 'Reindexing'
        _reindex_iter = range(1, max_num, reindex_stride)
        reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_lbl, freq=1)
        for count in reindex_iter:
            reindex_step(count, count_list, time_list_reindex)

    # Add Part
    flann = make_initial_index(initial)
    addition_lbl = 'Addition'
    _addition_iter = range(initial + 1, max_num, addition_stride)
    addition_iter = ut.ProgressIter(_addition_iter, lbl=addition_lbl)
    for count in addition_iter:
        addition_step(count, flann, count_list2, time_list_addition)

    logger.info('---')
    logger.info('Reindex took time_list_reindex %.2s seconds' %
                sum(time_list_reindex))
    logger.info('Addition took time_list_reindex  %.2s seconds' %
                sum(time_list_addition))
    logger.info('---')
    statskw = dict(precision=2, newlines=True)
    logger.info('Reindex stats ' +
                ut.get_stats_str(time_list_reindex, **statskw))
    logger.info('Addition stats ' +
                ut.get_stats_str(time_list_addition, **statskw))

    logger.info('Plotting')

    # with pt.FigureContext:

    next_fnum = iter(range(0, 2)).next  # python3 PY3
    pt.figure(fnum=next_fnum())
    if WITH_REINDEX:
        pt.plot2(
            count_list,
            time_list_reindex,
            marker='-o',
            equal_aspect=False,
            x_label='num_annotations',
            label=reindex_lbl + ' Time',
            dark=False,
        )

    # pt.figure(fnum=next_fnum())
    pt.plot2(
        count_list2,
        time_list_addition,
        marker='-o',
        equal_aspect=False,
        x_label='num_annotations',
        label=addition_lbl + ' Time',
    )

    pt
    pt.legend()
Пример #28
0
def get_dbinfo(ibs, verbose=True,
               with_imgsize=False,
               with_bytes=False,
               with_contrib=False,
               with_agesex=False,
               with_header=True,
               short=False,
               tag='dbinfo',
               aid_list=None):
    """

    Returns dictionary of digestable database information
    Infostr is a string summary of all the stats. Prints infostr in addition to
    returning locals

    Args:
        ibs (IBEISController):
        verbose (bool):
        with_imgsize (bool):
        with_bytes (bool):

    Returns:
        dict:

    CommandLine:
        python -m ibeis.other.dbinfo --exec-get_dbinfo:0
        python -m ibeis.other.dbinfo --test-get_dbinfo:1
        python -m ibeis.other.dbinfo --test-get_dbinfo:0 --db NNP_Master3
        python -m ibeis.other.dbinfo --test-get_dbinfo:0 --db PZ_Master1
        python -m ibeis.other.dbinfo --test-get_dbinfo:0 --db GZ_ALL
        python -m ibeis.other.dbinfo --exec-get_dbinfo:0 --db PZ_ViewPoints
        python -m ibeis.other.dbinfo --exec-get_dbinfo:0 --db GZ_Master1

        python -m ibeis.other.dbinfo --exec-get_dbinfo:0 -a ctrl
        python -m ibeis.other.dbinfo --exec-get_dbinfo:0 -a default:minqual=ok,require_timestamp=True --dbdir ~/lev/media/danger/LEWA
        python -m ibeis.other.dbinfo --exec-get_dbinfo:0 -a default:minqual=ok,require_timestamp=True --dbdir ~/lev/media/danger/LEWA --loadbackup=0

        python -m ibeis.other.dbinfo --exec-get_dbinfo:0 -a default: --dbdir ~/lev/media/danger/LEWA
        python -m ibeis.other.dbinfo --exec-get_dbinfo:0 -a default: --dbdir ~/lev/media/danger/LEWA --loadbackup=0

    Example1:
        >>> # SCRIPT
        >>> from ibeis.other.dbinfo import *  # NOQA
        >>> import ibeis
        >>> defaultdb = 'testdb1'
        >>> ibs, aid_list = ibeis.testdata_aids(defaultdb, a='default:minqual=ok,view=primary,view_ext1=1')
        >>> kwargs = ut.get_kwdefaults(get_dbinfo)
        >>> kwargs['verbose'] = False
        >>> kwargs['aid_list'] = aid_list
        >>> kwargs = ut.parse_dict_from_argv(kwargs)
        >>> output = get_dbinfo(ibs, **kwargs)
        >>> result = (output['info_str'])
        >>> print(result)
        >>> #ibs = ibeis.opendb(defaultdb='testdb1')
        >>> # <HACK FOR FILTERING>
        >>> #from ibeis.expt import cfghelpers
        >>> #from ibeis.expt import annotation_configs
        >>> #from ibeis.init import filter_annots
        >>> #named_defaults_dict = ut.dict_take(annotation_configs.__dict__,
        >>> #                                   annotation_configs.TEST_NAMES)
        >>> #named_qcfg_defaults = dict(zip(annotation_configs.TEST_NAMES,
        >>> #                               ut.get_list_column(named_defaults_dict, 'qcfg')))
        >>> #acfg = cfghelpers.parse_argv_cfg(('--annot-filter', '-a'), named_defaults_dict=named_qcfg_defaults, default=None)[0]
        >>> #aid_list = ibs.get_valid_aids()
        >>> # </HACK FOR FILTERING>

    Example1:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.other.dbinfo import *  # NOQA
        >>> import ibeis
        >>> verbose = True
        >>> short = True
        >>> #ibs = ibeis.opendb(db='GZ_ALL')
        >>> #ibs = ibeis.opendb(db='PZ_Master0')
        >>> ibs = ibeis.opendb('testdb1')
        >>> assert ibs.get_dbname() == 'testdb1', 'DO NOT DELETE CONTRIBUTORS OF OTHER DBS'
        >>> ibs.delete_contributors(ibs.get_valid_contrib_rowids())
        >>> ibs.delete_empty_nids()
        >>> #ibs = ibeis.opendb(db='PZ_MTEST')
        >>> output = get_dbinfo(ibs, with_contrib=False, verbose=False, short=True)
        >>> result = (output['info_str'])
        >>> print(result)
        +============================
        DB Info:  testdb1
        DB Notes: None
        DB NumContrib: 0
        ----------
        # Names                      = 7
        # Names (unassociated)       = 0
        # Names (singleton)          = 5
        # Names (multiton)           = 2
        ----------
        # Annots                     = 13
        # Annots (unknown)           = 4
        # Annots (singleton)         = 5
        # Annots (multiton)          = 4
        ----------
        # Img                        = 13
        L============================
    """
    # TODO Database size in bytes
    # TODO: occurrence, contributors, etc...

    # Basic variables
    request_annot_subset = False
    _input_aid_list = aid_list  # NOQA
    if aid_list is None:
        valid_aids = ibs.get_valid_aids()
        valid_nids = ibs.get_valid_nids()
        valid_gids = ibs.get_valid_gids()
    else:
        if isinstance(aid_list, str):
            # Hack to get experiment stats on aids
            acfg_name_list = [aid_list]
            print('Specified custom aids via acfgname %s' % (acfg_name_list,))
            from ibeis.expt import experiment_helpers
            acfg_list, expanded_aids_list = experiment_helpers.get_annotcfg_list(
                ibs, acfg_name_list)
            aid_list = sorted(list(set(ut.flatten(ut.flatten(expanded_aids_list)))))
            #aid_list =
        if verbose:
            print('Specified %d custom aids' % (len(aid_list,)))
        request_annot_subset = True
        valid_aids = aid_list
        valid_nids = list(
            set(ibs.get_annot_nids(aid_list, distinguish_unknowns=False)) -
            {const.UNKNOWN_NAME_ROWID}
        )
        valid_gids = list(set(ibs.get_annot_gids(aid_list)))
    #associated_nids = ibs.get_valid_nids(filter_empty=True)  # nids with at least one annotation
    FILTER_HACK = True
    if FILTER_HACK:
        # HUGE HACK - get only images and names with filtered aids
        valid_aids_ = ibs.filter_aids_custom(valid_aids)
        valid_nids_ = ibs.filter_nids_custom(valid_nids)
        valid_gids_ = ibs.filter_gids_custom(valid_gids)
        if verbose:
            print('Filtered %d names' % (len(valid_nids) - len(valid_nids_)))
            print('Filtered %d images' % (len(valid_gids) - len(valid_gids_)))
            print('Filtered %d annots' % (len(valid_aids) - len(valid_aids_)))
        valid_gids = valid_gids_
        valid_nids = valid_nids_
        valid_aids = valid_aids_
        #associated_nids = ut.compress(associated_nids, map(any,
        #ibs.unflat_map(ibs.get_annot_custom_filterflags,
        #               ibs.get_name_aids(associated_nids))))

    # Image info
    if verbose:
        print('Checking Image Info')
    gx2_aids = ibs.get_image_aids(valid_gids)
    if FILTER_HACK:
        gx2_aids = [ibs.filter_aids_custom(aids) for aids in gx2_aids]  # HACK FOR FILTER
    if request_annot_subset:
        # remove annots not in this subset
        valid_aids_set = set(valid_aids)
        gx2_aids = [list(set(aids).intersection(valid_aids_set)) for aids in gx2_aids]

    gx2_nAnnots = np.array(list(map(len, gx2_aids)))
    image_without_annots = len(np.where(gx2_nAnnots == 0)[0])
    gx2_nAnnots_stats  = ut.get_stats_str(gx2_nAnnots, newlines=True, use_median=True)
    image_reviewed_list = ibs.get_image_reviewed(valid_gids)

    # Name stats
    if verbose:
        print('Checking Name Info')
    nx2_aids = ibs.get_name_aids(valid_nids)
    if FILTER_HACK:
        nx2_aids =  [ibs.filter_aids_custom(aids) for aids in nx2_aids]    # HACK FOR FILTER
    if request_annot_subset:
        # remove annots not in this subset
        valid_aids_set = set(valid_aids)
        nx2_aids = [list(set(aids).intersection(valid_aids_set)) for aids in nx2_aids]
    associated_nids = ut.compress(valid_nids, list(map(len, nx2_aids)))

    ibs.check_name_mapping_consistency(nx2_aids)

    # Occurrence Info
    def compute_annot_occurrence_ids(ibs, aid_list):
        from ibeis.algo.preproc import preproc_occurrence
        gid_list = ibs.get_annot_gids(aid_list)
        gid2_aids = ut.group_items(aid_list, gid_list)
        flat_imgsetids, flat_gids = preproc_occurrence.ibeis_compute_occurrences(ibs, gid_list, seconds_thresh=4 * 60 * 60, verbose=False)
        occurid2_gids = ut.group_items(flat_gids, flat_imgsetids)
        occurid2_aids = {oid: ut.flatten(ut.take(gid2_aids, gids)) for oid, gids in occurid2_gids.items()}
        return occurid2_aids

    import utool
    with utool.embed_on_exception_context:
        occurid2_aids = compute_annot_occurrence_ids(ibs, valid_aids)
        occur_nids = ibs.unflat_map(ibs.get_annot_nids, occurid2_aids.values())
        occur_unique_nids = [ut.unique(nids) for nids in occur_nids]
        nid2_occurxs = ut.ddict(list)
        for occurx, nids in enumerate(occur_unique_nids):
            for nid in nids:
                nid2_occurxs[nid].append(occurx)

    nid2_occurx_single = {nid: occurxs for nid, occurxs in nid2_occurxs.items() if len(occurxs) <= 1}
    nid2_occurx_resight = {nid: occurxs for nid, occurxs in nid2_occurxs.items() if len(occurxs) > 1}
    singlesight_encounters = ibs.get_name_aids(nid2_occurx_single.keys())

    singlesight_annot_stats = ut.get_stats(list(map(len, singlesight_encounters)), use_median=True, use_sum=True)
    resight_name_stats = ut.get_stats(list(map(len, nid2_occurx_resight.values())), use_median=True, use_sum=True)

    try:
        aid_pairs = ibs.filter_aidpairs_by_tags(min_num=0)
        undirected_tags = ibs.get_aidpair_tags(aid_pairs.T[0], aid_pairs.T[1], directed=False)
        tagged_pairs = list(zip(aid_pairs.tolist(), undirected_tags))
        tag_dict = ut.groupby_tags(tagged_pairs, undirected_tags)
        pair_tag_info = ut.map_dict_vals(len, tag_dict)

        num_reviewed_pairs = sum(ibs.get_annot_pair_is_reviewed(aid_pairs.T[0], aid_pairs.T[1]))
        pair_tag_info['num_reviewed'] = num_reviewed_pairs
    except Exception:
        pair_tag_info = {}

    #print(ut.dict_str(pair_tag_info))

    # Annot Stats
    # TODO: number of images where chips cover entire image
    # TODO: total image coverage of annotation
    # TODO: total annotation overlap
    """
    ax2_unknown = ibs.is_aid_unknown(valid_aids)
    ax2_nid = ibs.get_annot_name_rowids(valid_aids)
    assert all([nid < 0 if unknown else nid > 0 for nid, unknown in
                zip(ax2_nid, ax2_unknown)]), 'bad annot nid'
    """
    #
    if verbose:
        print('Checking Annot Species')
    unknown_aids = ut.compress(valid_aids, ibs.is_aid_unknown(valid_aids))
    species_list = ibs.get_annot_species_texts(valid_aids)
    species2_aids = ut.group_items(valid_aids, species_list)
    species2_nAids = {key: len(val) for key, val in species2_aids.items()}

    if verbose:
        print('Checking Multiton/Singleton Species')
    nx2_nAnnots = np.array(list(map(len, nx2_aids)))
    # Seperate singleton / multitons
    multiton_nxs  = np.where(nx2_nAnnots > 1)[0]
    singleton_nxs = np.where(nx2_nAnnots == 1)[0]
    unassociated_nxs = np.where(nx2_nAnnots == 0)[0]
    assert len(np.intersect1d(singleton_nxs, multiton_nxs)) == 0, 'intersecting names'
    valid_nxs      = np.hstack([multiton_nxs, singleton_nxs])
    num_names_with_gt = len(multiton_nxs)

    # Annot Info
    if verbose:
        print('Checking Annot Info')
    multiton_aids_list = ut.take(nx2_aids, multiton_nxs)
    assert len(set(multiton_nxs)) == len(multiton_nxs)
    if len(multiton_aids_list) == 0:
        multiton_aids = np.array([], dtype=np.int)
    else:
        multiton_aids = np.hstack(multiton_aids_list)
        assert len(set(multiton_aids)) == len(multiton_aids), 'duplicate annot'
    singleton_aids = ut.take(nx2_aids, singleton_nxs)
    multiton_nid2_nannots = list(map(len, multiton_aids_list))

    # Image size stats
    if with_imgsize:
        if verbose:
            print('Checking ImageSize Info')
        gpath_list = ibs.get_image_paths(valid_gids)
        def wh_print_stats(wh_list):
            if len(wh_list) == 0:
                return '{empty}'
            wh_list = np.asarray(wh_list)
            stat_dict = OrderedDict(
                [( 'max', wh_list.max(0)),
                 ( 'min', wh_list.min(0)),
                 ('mean', wh_list.mean(0)),
                 ( 'std', wh_list.std(0))])
            def arr2str(var):
                return ('[' + (
                    ', '.join(list(map(lambda x: '%.1f' % x, var)))
                ) + ']')
            ret = (',\n    '.join([
                '%s:%s' % (key, arr2str(val))
                for key, val in stat_dict.items()
            ]))
            return '{\n    ' + ret + '\n}'

        print('reading image sizes')
        # Image size stats
        img_size_list  = ibs.get_image_sizes(valid_gids)
        img_size_stats  = wh_print_stats(img_size_list)

        # Chip size stats
        annotation_bbox_list = ibs.get_annot_bboxes(valid_aids)
        annotation_bbox_arr = np.array(annotation_bbox_list)
        if len(annotation_bbox_arr) == 0:
            annotation_size_list = []
        else:
            annotation_size_list = annotation_bbox_arr[:, 2:4]
        chip_size_stats = wh_print_stats(annotation_size_list)
        imgsize_stat_lines = [
            (' # Img in dir                 = %d' % len(gpath_list)),
            (' Image Size Stats  = %s' % (img_size_stats,)),
            (' * Chip Size Stats = %s' % (chip_size_stats,)),
        ]
    else:
        imgsize_stat_lines = []

    if verbose:
        print('Building Stats String')

    multiton_stats = ut.get_stats_str(multiton_nid2_nannots, newlines=True, use_median=True)

    # Time stats
    unixtime_list = ibs.get_image_unixtime(valid_gids)
    unixtime_list = ut.list_replace(unixtime_list, -1, float('nan'))
    #valid_unixtime_list = [time for time in unixtime_list if time != -1]
    #unixtime_statstr = ibs.get_image_time_statstr(valid_gids)
    if ut.get_argflag('--hackshow-unixtime'):
        show_time_distributions(ibs, unixtime_list)
        ut.show_if_requested()
    unixtime_statstr = ut.get_timestats_str(unixtime_list, newlines=True, full=True)

    # GPS stats
    gps_list_ = ibs.get_image_gps(valid_gids)
    gpsvalid_list = [gps != (-1, -1) for gps in gps_list_]
    gps_list  = ut.compress(gps_list_, gpsvalid_list)

    def get_annot_age_stats(aid_list):
        annot_age_months_est_min = ibs.get_annot_age_months_est_min(aid_list)
        annot_age_months_est_max = ibs.get_annot_age_months_est_max(aid_list)
        age_dict = ut.ddict((lambda : 0))
        for min_age, max_age in zip(annot_age_months_est_min, annot_age_months_est_max):
            if (min_age is None or min_age < 12) and max_age < 12:
                age_dict['Infant'] += 1
            elif 12 <= min_age and min_age < 36 and 12 <= max_age and max_age < 36:
                age_dict['Juvenile'] += 1
            elif 36 <= min_age and (36 <= max_age or max_age is None):
                age_dict['Adult'] += 1
            else:
                print('Found UNKNOWN Age: %r, %r' % (min_age, max_age, ))
                age_dict['UNKNOWN'] += 1
        return age_dict

    def get_annot_sex_stats(aid_list):
        annot_sextext_list = ibs.get_annot_sex_texts(aid_list)
        sextext2_aids = ut.group_items(aid_list, annot_sextext_list)
        sex_keys = list(ibs.const.SEX_TEXT_TO_INT.keys())
        assert set(sex_keys) >= set(annot_sextext_list), 'bad keys: ' + str(set(annot_sextext_list) - set(sex_keys))
        sextext2_nAnnots = ut.odict([(key, len(sextext2_aids.get(key, []))) for key in sex_keys])
        # Filter 0's
        sextext2_nAnnots = {key: val for key, val in six.iteritems(sextext2_nAnnots) if val != 0}
        return sextext2_nAnnots

    if verbose:
        print('Checking Other Annot Stats')

    qualtext2_nAnnots = ibs.get_annot_qual_stats(valid_aids)
    yawtext2_nAnnots = ibs.get_annot_yaw_stats(valid_aids)
    agetext2_nAnnots = get_annot_age_stats(valid_aids)
    sextext2_nAnnots = get_annot_sex_stats(valid_aids)

    if verbose:
        print('Checking Contrib Stats')

    # Contributor Statistics
    # hack remove colon for image alignment
    def fix_tag_list(tag_list):
        return [None if tag is None else tag.replace(':', ';') for tag in tag_list]
    image_contrib_tags = fix_tag_list(ibs.get_image_contributor_tag(valid_gids))
    annot_contrib_tags = fix_tag_list(ibs.get_annot_image_contributor_tag(valid_aids))
    contrib_tag_to_gids = ut.group_items(valid_gids, image_contrib_tags)
    contrib_tag_to_aids = ut.group_items(valid_aids, annot_contrib_tags)

    contrib_tag_to_qualstats = {key: ibs.get_annot_qual_stats(aids) for key, aids in six.iteritems(contrib_tag_to_aids)}
    contrib_tag_to_viewstats = {key: ibs.get_annot_yaw_stats(aids) for key, aids in six.iteritems(contrib_tag_to_aids)}

    contrib_tag_to_nImages = {key: len(val) for key, val in six.iteritems(contrib_tag_to_gids)}
    contrib_tag_to_nAnnots = {key: len(val) for key, val in six.iteritems(contrib_tag_to_aids)}

    if verbose:
        print('Summarizing')

    # Summarize stats
    num_names = len(valid_nids)
    num_names_unassociated = len(valid_nids) - len(associated_nids)
    num_names_singleton = len(singleton_nxs)
    num_names_multiton =  len(multiton_nxs)

    num_singleton_annots = len(singleton_aids)
    num_multiton_annots = len(multiton_aids)
    num_unknown_annots = len(unknown_aids)
    num_annots = len(valid_aids)

    if with_bytes:
        if verbose:
            print('Checking Disk Space')
        ibsdir_space   = ut.byte_str2(ut.get_disk_space(ibs.get_ibsdir()))
        dbdir_space    = ut.byte_str2(ut.get_disk_space(ibs.get_dbdir()))
        imgdir_space   = ut.byte_str2(ut.get_disk_space(ibs.get_imgdir()))
        cachedir_space = ut.byte_str2(ut.get_disk_space(ibs.get_cachedir()))

    if True:
        if verbose:
            print('Check asserts')
        try:
            bad_aids = np.intersect1d(multiton_aids, unknown_aids)
            _num_names_total_check = num_names_singleton + num_names_unassociated + num_names_multiton
            _num_annots_total_check = num_unknown_annots + num_singleton_annots + num_multiton_annots
            assert len(bad_aids) == 0, 'intersecting multiton aids and unknown aids'
            assert _num_names_total_check == num_names, 'inconsistent num names'
            #if not request_annot_subset:
            # dont check this if you have an annot subset
            assert _num_annots_total_check == num_annots, 'inconsistent num annots'
        except Exception as ex:
            ut.printex(ex, keys=[
                '_num_names_total_check',
                'num_names',
                '_num_annots_total_check',
                'num_annots',
                'num_names_singleton',
                'num_names_multiton',
                'num_unknown_annots',
                'num_multiton_annots',
                'num_singleton_annots',
            ])
            raise

    # Get contributor statistics
    contrib_rowids = ibs.get_valid_contrib_rowids()
    num_contributors = len(contrib_rowids)

    # print
    num_tabs = 5

    def align2(str_):
        return ut.align(str_, ':', ' :')

    def align_dict2(dict_):
        str_ = ut.dict_str(dict_)
        return align2(str_)

    header_block_lines = (
        [('+============================'), ] + (
            [
                ('+ singleton := single sighting'),
                ('+ multiton  := multiple sightings'),
                ('--' * num_tabs),
            ] if not short and with_header else []
        )
    )

    source_block_lines = [
        ('DB Info:  ' + ibs.get_dbname()),
        ('DB Notes: ' + ibs.get_dbnotes()),
        ('DB NumContrib: %d' % num_contributors),
    ]

    bytes_block_lines = [
        ('--' * num_tabs),
        ('DB Bytes: '),
        ('     +- dbdir nBytes:         ' + dbdir_space),
        ('     |  +- _ibsdb nBytes:     ' + ibsdir_space),
        ('     |  |  +-imgdir nBytes:   ' + imgdir_space),
        ('     |  |  +-cachedir nBytes: ' + cachedir_space),
    ] if with_bytes else []

    name_block_lines = [
        ('--' * num_tabs),
        ('# Names                      = %d' % num_names),
        ('# Names (unassociated)       = %d' % num_names_unassociated),
        ('# Names (singleton)          = %d' % num_names_singleton),
        ('# Names (multiton)           = %d' % num_names_multiton),
    ]

    subset_str = '        ' if not request_annot_subset else '(SUBSET)'

    annot_block_lines = [
        ('--' * num_tabs),
        ('# Annots %s            = %d' % (subset_str, num_annots,)),
        ('# Annots (unknown)           = %d' % num_unknown_annots),
        ('# Annots (singleton)         = %d' % num_singleton_annots),
        ('# Annots (multiton)          = %d' % num_multiton_annots),
    ]

    annot_per_basic_block_lines = [
        ('--' * num_tabs),
        ('# Annots per Name (multiton) = %s' % (align2(multiton_stats),)),
        ('# Annots per Image           = %s' % (align2(gx2_nAnnots_stats),)),
        ('# Annots per Species         = %s' % (align_dict2(species2_nAids),)),
    ] if not short else []

    occurrence_block_lines = [
        ('--' * num_tabs),
        ('# Occurrence Per Name (Resights) = %s' % (align_dict2(resight_name_stats),)),
        ('# Annots per Encounter (Singlesights) = %s' % (align_dict2(singlesight_annot_stats),)),
        ('# Pair Tag Info (annots) = %s' % (align_dict2(pair_tag_info),)),
    ] if not short else []

    annot_per_qualview_block_lines = [
        None if short else '# Annots per Viewpoint = %s' % align_dict2(yawtext2_nAnnots),
        None if short else '# Annots per Quality = %s' % align_dict2(qualtext2_nAnnots),
    ]

    annot_per_agesex_block_lines = [
        '# Annots per Age = %s' % align_dict2(agetext2_nAnnots),
        '# Annots per Sex = %s' % align_dict2(sextext2_nAnnots),
    ] if not short  and with_agesex else []

    contrib_block_lines = [
        '# Images per contributor       = ' + align_dict2(contrib_tag_to_nImages),
        '# Annots per contributor       = ' + align_dict2(contrib_tag_to_nAnnots),
        '# Quality per contributor      = ' + ut.dict_str(contrib_tag_to_qualstats, sorted_=True),
        '# Viewpoint per contributor    = ' + ut.dict_str(contrib_tag_to_viewstats, sorted_=True),
    ] if with_contrib else []

    img_block_lines = [
        ('--' * num_tabs),
        ('# Img                        = %d' % len(valid_gids)),
        None if short else ('# Img reviewed               = %d' % sum(image_reviewed_list)),
        None if short else ('# Img with gps               = %d' % len(gps_list)),
        #('# Img with timestamp         = %d' % len(valid_unixtime_list)),
        None if short else ('Img Time Stats               = %s' % (align2(unixtime_statstr),)),
    ]

    info_str_lines = (
        header_block_lines +
        bytes_block_lines +
        source_block_lines +
        name_block_lines +
        annot_block_lines +
        annot_per_basic_block_lines +
        occurrence_block_lines +
        annot_per_qualview_block_lines +
        annot_per_agesex_block_lines +
        img_block_lines +
        contrib_block_lines +
        imgsize_stat_lines +
        [('L============================'), ]
    )
    info_str = '\n'.join(ut.filter_Nones(info_str_lines))
    info_str2 = ut.indent(info_str, '[{tag}]'.format(tag=tag))
    if verbose:
        print(info_str2)
    locals_ = locals()
    return locals_
Пример #29
0
    def select_ith_match(self, mx):
        """
        Selects the ith match and visualizes and prints information concerning
        features weights, keypoint details, and sift descriptions

        Args:
            mx (int) - the ith match to visualize
            qaid (int) - query annotation id
            aid (int) - database annotation id

        CommandLine:
            python -m ibeis.viz.interact.interact_matches --test-select_ith_match --show

        Example:
            >>> # DISABLE_DOCTEST
            >>> from ibeis.viz.interact.interact_matches import *  # NOQA
            >>> self = testdata_match_interact(mx=1)
            >>> pt.show_if_requested()
        """
        ibs        = self.ibs
        qaid       = self.qaid
        aid        = self.daid
        fnum       = self.fnum
        figtitle   = self.figtitle
        rchip1     = self.rchip1
        rchip2     = self.rchip2
        aid        = self.daid
        same_fig   = self.same_fig
        self.mx    = mx
        print('+--- SELECT --- ')
        print('qaid=%r, daid=%r' % (qaid, aid))
        print('... selecting mx-th=%r feature match' % mx)
        if False:
            print('score stats:')
            print(ut.get_stats_str(self.fsv, axis=0, newlines=True))
            print('fsv[mx] = %r' % (self.fsv[mx],))
            print('fs[mx] = %r' % (self.fs[mx],))
        """
        # test feature weights of actual chips
        fx1, fx2 = fm[mx]
        daid = aid
        ibs.get_annot_fgweights([daid])[0][fx2]
        ibs.get_annot_fgweights([qaid])[0][fx1]
        """
        #----------------------
        # Get info for the select_ith_match plot
        self.mode = 1
        # Get the mx-th feature match
        fx1, fx2 = self.fm[mx]
        fscore2  = self.fs[mx]
        fk2      = self.fk[mx]
        kpts1 = ibs.get_annot_kpts([self.qaid], config2_=self.query_config2_)[0]
        kpts2 = ibs.get_annot_kpts([self.daid], config2_=self.data_config2_)[0]
        desc1 = ibs.get_annot_vecs([self.qaid], config2_=self.query_config2_)[0]
        desc2 = ibs.get_annot_vecs([self.daid], config2_=self.data_config2_)[0]
        kp1, kp2     = kpts1[fx1], kpts2[fx2]
        sift1, sift2 = desc1[fx1], desc2[fx2]
        info1 = '\nquery'
        info2 = '\nk=%r fscore=%r' % (fk2, fscore2)
        #last_state.last_fx = fx1
        self.last_fx = fx1
        # Extracted keypoints to draw
        extracted_list = [(rchip1, kp1, sift1, fx1, self.qaid, info1),
                          (rchip2, kp2, sift2, fx2, self.daid, info2)]
        # Normalizng Keypoint
        #if hasattr(cm, 'filt2_meta') and 'lnbnn' in cm.filt2_meta:
        #    qfx2_norm = cm.filt2_meta['lnbnn']
        #    # Normalizing chip and feature
        #    (aid3, fx3, normk) = qfx2_norm[fx1]
        #    rchip3 = ibs.get_annot_chips(aid3)
        #    kp3 = ibs.get_annot_kpts(aid3)[fx3]
        #    sift3 = ibs.get_annot_vecs(aid3)[fx3]
        #    info3 = '\nnorm %s k=%r' % (vh.get_aidstrs(aid3), normk)
        #    extracted_list.append((rchip3, kp3, sift3, fx3, aid3, info3))
        #else:
        #    pass
        # print('WARNING: meta doesnt exist')

        #----------------------
        # Draw the select_ith_match plot
        nRows, nCols = len(extracted_list) + same_fig, 3
        # Draw matching chips and features
        sel_fm = np.array([(fx1, fx2)])
        pnum1 = (nRows, 1, 1) if same_fig else (1, 1, 1)
        vert = self.vert if self.vert is not None else False
        self.chipmatch_view(pnum1, ell_alpha=.4, ell_linewidth=1.8,
                            colors=df2.BLUE, sel_fm=sel_fm, vert=vert)
        # Draw selected feature matches
        px = nCols * same_fig  # plot offset
        prevsift = None
        if not same_fig:
            #fnum2 = fnum + len(viz.FNUMS)
            fnum2 = self.fnum2
            fig2 = df2.figure(fnum=fnum2, docla=True, doclf=True)
        else:
            fnum2 = fnum
        for (rchip, kp, sift, fx, aid, info) in extracted_list:
            px = viz_featrow.draw_feat_row(rchip, fx, kp, sift, fnum2, nRows, nCols, px,
                                           prevsift=prevsift, aid=aid, info=info)
            prevsift = sift
        if not same_fig:
            ih.connect_callback(fig2, 'button_press_event', self.on_click)
            df2.set_figtitle(figtitle + vh.get_vsstr(qaid, aid))
Пример #30
0
def viz_annot_with_metrics(ibs,
                           invindex,
                           aid,
                           metrics,
                           metric_keys=[
                               'wx2_nMembers', ('wx2_pdist_stats', 'mean'),
                               ('wx2_wdist_stats', 'mean')
                           ],
                           show_orig=True,
                           show_idf=True,
                           show_words=False,
                           show_analysis=True,
                           show_aveprecision=True,
                           qfx2_closest_k_list=None,
                           show_word_correct_assignments=False,
                           qres_list=None):
    """
    Args:
        ibs (IBEISController):
        invindex (InvertedIndex): object for fast vocab lookup
        aid (int):
        metrics (namedtuple):

    Example:
        >>> from ibeis.algo.hots.smk.smk_plots import *  # NOQA
        >>> from ibeis.algo.hots.smk import smk_debug
        >>> from ibeis.algo.hots.smk import smk_repr
        >>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=64000)
        >>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=8000)
        >>> #tup = smk_debug.testdata_raw_internals0(db='PZ_Master0', nWords=64000)
        >>> tup = smk_debug.testdata_raw_internals0(db='PZ_Mothers', nWords=8000)
        >>> ibs, annots_df, daids, qaids, invindex, qreq_ = tup
        >>> smk_repr.compute_data_internals_(invindex, qreq_.qparams, delete_rawvecs=False)
        >>> invindex.idx2_wxs = np.array(invindex.idx2_wxs)
        >>> metric_keys=['wx2_nMembers', ('wx2_pdist_stats', 'mean'), ('wx2_wdist_stats', 'mean')]
        >>> metrics = compute_word_metrics(invindex)
        >>> aid = 1

    """
    #viz_chip.rrr()
    #df2.rrr()
    kpts = ibs.get_annot_kpts(aid)
    if ut.VERBOSE:
        ut.super_print(kpts)

    if show_word_correct_assignments or show_idf:
        # Get only the first assigned word
        # FIXME: need to look at multi-assignment
        _mask = invindex.idx2_daid == aid
        fxs = invindex.idx2_dfx[_mask]
        wxs = invindex.idx2_wxs[_mask].T[0].T

        assert len(fxs) == len(kpts)
        assert len(fxs) == len(wxs)

    fnum = 1

    dbname = ibs.get_dbname()

    def _plot(metric,
              fnum=1,
              lbl='',
              annote=True,
              darken=.1,
              colortype='score',
              **kwargs):
        print('ploting fnum=%r' % fnum)
        #lblaug = ' db=%r, nWords = %r' % (dbname, nWords)
        lblaug = ' db=%r' % (dbname)
        figtitle = lbl + lblaug
        lbl = lbl
        plot_chip_metric(ibs,
                         aid,
                         metric=metric,
                         fnum=fnum,
                         lbl=lbl,
                         figtitle=figtitle,
                         annote=annote,
                         darken=darken,
                         colortype=colortype,
                         **kwargs)
        return fnum + 1

    # Original Plot
    if show_orig:
        fnum = _plot(None,
                     fnum=fnum,
                     lbl='Orig Chip',
                     annote=False,
                     darken=None)

    # IDF Plot
    if show_idf:
        idf_list = np.array(list(ut.dict_take_gen(invindex.wx2_idf, wxs)))
        fnum = _plot(idf_list, fnum=fnum, lbl='IDF')
        print('stats(idf_list) = ' + ut.get_stats_str(idf_list))

    # Word Plot
    if show_words:
        fnum = _plot(wxs, fnum=fnum, lbl='Words', colortype='label')

    # LNBNN Result Plots
    if qfx2_closest_k_list is not None:
        for qres, qfx2_closest_k in zip(qres_list, qfx2_closest_k_list):
            print('  --- qres item ---')
            if qres is not None:
                from ibeis.algo.hots.hots_query_result import QueryResult
                assert isinstance(qres, QueryResult)
                if show_analysis:
                    qres.show_analysis(ibs=ibs,
                                       fnum=fnum,
                                       figtitle=qres.make_smaller_title())
                    fnum += 1
                if show_aveprecision:
                    qres.show_precision_recall_curve(ibs=ibs, fnum=fnum)
                    fnum += 1

            if qfx2_closest_k is not None:
                # Plot ranked positions
                qfx2_closest_k = np.array(qfx2_closest_k)
                qfx2_closest_k_qeq0 = qfx2_closest_k[qfx2_closest_k >= 0]
                qfx2_closest_k_lt0 = qfx2_closest_k[qfx2_closest_k < 0]
                print('stats(qfx2_closest_k_qeq0) = ' +
                      ut.get_stats_str(qfx2_closest_k_qeq0))
                print('stats(qfx2_closest_k_lt0)  = ' +
                      ut.get_stats_str(qfx2_closest_k_lt0))
                fnum = _plot(qfx2_closest_k,
                             fnum=fnum,
                             lbl='Correct Ranks ' + qres.make_smaller_title(),
                             colortype='custom',
                             reverse_cmap=True)

    # Correct word assignment plots
    if show_word_correct_assignments:
        unique_wxs, unique_inverse = np.unique(wxs, return_inverse=True)
        # Get the aids that belong to each word
        _idxs_list = ut.dict_take(invindex.wx2_idxs, unique_wxs)
        _aids_list = [invindex.idx2_daid.take(idxs) for idxs in _idxs_list]
        # Check if this word will provide a correct assignment -
        # two ground truth chip exist within the same word
        gt_aids = np.array(ibs.get_annot_groundtruth(aid))
        _hastp_list = np.array(
            [len(np.intersect1d(aids, gt_aids)) > 0 for aids in _aids_list])
        # Map back to the space of features
        # mark each feature match as having a correct word mapping or not
        hascorrectmatch = _hastp_list[unique_inverse]
        hascorrectmatch_ = hascorrectmatch.astype(np.int32) * 3 - 2
        lbl = 'Correct Words ' + qres.make_smaller_title(
        ) + '\n Yellow means the word contains a correct match in the word\'s invindex. Blue is the opposite.'
        fnum = _plot(hascorrectmatch_,
                     fnum=fnum,
                     lbl=lbl,
                     colortype='custom',
                     reverse_cmap=False)

    # Word Metric Plots
    for count, metrickey in enumerate(metric_keys):
        if isinstance(metrickey, tuple):
            #lbl = repr(metrickey)
            def fixstr(str_):
                return str_.replace('wx2_', '').replace('_stats', '')

            lbl = '%s(%s)' % (metrickey[1].upper(), fixstr(metrickey[0]))
        else:
            lbl = str(metrickey)
        metric_list = metric_clamped_stat(metrics, wxs, metrickey)
        fnum = _plot(metric_list, fnum=fnum, lbl=lbl)
Пример #31
0
def flann_add_time_experiment():
    """
    builds plot of number of annotations vs indexer build time.

    TODO: time experiment

    CommandLine:
        python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --show
        python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_Master0 --show
        utprof.py -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --show

        valgrind --tool=memcheck --suppressions=valgrind-python.supp python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --no-with-reindex

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots._neighbor_experiment import *  # NOQA
        >>> import ibeis
        >>> #ibs = ibeis.opendb('PZ_MTEST')
        >>> result = flann_add_time_experiment()
        >>> # verify results
        >>> print(result)
        >>> ut.show_if_requested()

    """
    import ibeis
    import utool as ut
    import numpy as np
    import plottool as pt

    def make_flann_index(vecs, flann_params):
        flann = pyflann.FLANN()
        flann.build_index(vecs, **flann_params)
        return flann

    db = ut.get_argval('--db')
    ibs = ibeis.opendb(db=db)

    # Input
    if ibs.get_dbname() == 'PZ_MTEST':
        initial = 1
        reindex_stride = 16
        addition_stride = 4
        max_ceiling = 120
    elif ibs.get_dbname() == 'PZ_Master0':
        #ibs = ibeis.opendb(db='GZ_ALL')
        initial = 32
        reindex_stride = 32
        addition_stride = 16
        max_ceiling = 300001
    else:
        assert False
    #max_ceiling = 32
    all_daids = ibs.get_valid_aids()
    max_num = min(max_ceiling, len(all_daids))
    flann_params = ibs.cfg.query_cfg.flann_cfg.get_flann_params()

    # Output
    count_list,  time_list_reindex  = [], []
    count_list2, time_list_addition = [], []

    # Setup
    #all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:])
    all_randomize_daids_ = all_daids
    # ensure all features are computed
    ibs.get_annot_vecs(all_randomize_daids_)

    def reindex_step(count, count_list, time_list_reindex):
        daids    = all_randomize_daids_[0:count]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        with ut.Timer(verbose=False) as t:
            flann = make_flann_index(vecs, flann_params)  # NOQA
        count_list.append(count)
        time_list_reindex.append(t.ellapsed)

    def addition_step(count, flann, count_list2, time_list_addition):
        daids = all_randomize_daids_[count:count + 1]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        with ut.Timer(verbose=False) as t:
            flann.add_points(vecs)
        count_list2.append(count)
        time_list_addition.append(t.ellapsed)

    def make_initial_index(initial):
        daids = all_randomize_daids_[0:initial + 1]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        flann = make_flann_index(vecs, flann_params)
        return flann

    WITH_REINDEX = not ut.get_argflag('--no-with-reindex')
    if WITH_REINDEX:
        # Reindex Part
        reindex_lbl = 'Reindexing'
        _reindex_iter = range(1, max_num, reindex_stride)
        reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_lbl, freq=1)
        for count in reindex_iter:
            reindex_step(count, count_list, time_list_reindex)

    # Add Part
    flann = make_initial_index(initial)
    addition_lbl = 'Addition'
    _addition_iter = range(initial + 1, max_num, addition_stride)
    addition_iter = ut.ProgressIter(_addition_iter, lbl=addition_lbl)
    for count in addition_iter:
        addition_step(count, flann, count_list2, time_list_addition)

    print('---')
    print('Reindex took time_list_reindex %.2s seconds' % sum(time_list_reindex))
    print('Addition took time_list_reindex  %.2s seconds' % sum(time_list_addition))
    print('---')
    statskw = dict(precision=2, newlines=True)
    print('Reindex stats ' + ut.get_stats_str(time_list_reindex, **statskw))
    print('Addition stats ' + ut.get_stats_str(time_list_addition, **statskw))

    print('Plotting')

    #with pt.FigureContext:

    next_fnum = iter(range(0, 2)).next  # python3 PY3
    pt.figure(fnum=next_fnum())
    if WITH_REINDEX:
        pt.plot2(count_list, time_list_reindex, marker='-o', equal_aspect=False,
                 x_label='num_annotations', label=reindex_lbl + ' Time', dark=False)

    #pt.figure(fnum=next_fnum())
    pt.plot2(count_list2, time_list_addition, marker='-o', equal_aspect=False,
             x_label='num_annotations', label=addition_lbl + ' Time')

    pt
    pt.legend()