Beispiel #1
0
 def show_selected(self, event):
     import plottool_ibeis as pt
     print('show_selected')
     from ibeis.viz import viz_chip
     fnum = pt.ensure_fnum(10)
     print('fnum = %r' % (fnum, ))
     pt.figure(fnum=fnum)
     pt.update()
     viz_chip.show_many_chips(self.infr.ibs, self.selected_aids)
     pt.update()
Beispiel #2
0
def show_single_coverage_mask(qreq_,
                              cm,
                              weight_mask_m,
                              weight_mask,
                              daids,
                              fnum=None):
    """
    DEPRICATE
    """
    import plottool_ibeis as pt
    from ibeis import viz
    fnum = pt.ensure_fnum(fnum)
    idx_list = ut.dict_take(cm.daid2_idx, daids)
    nPlots = len(idx_list) + 1
    nRows, nCols = pt.get_square_row_cols(nPlots)
    pnum_ = pt.make_pnum_nextgen(nRows, nCols)
    pt.figure(fnum=fnum, pnum=(1, 2, 1))
    # Draw coverage masks with bbox
    # <FlipHack>
    #weight_mask_m = np.fliplr(np.flipud(weight_mask_m))
    #weight_mask = np.fliplr(np.flipud(weight_mask))
    # </FlipHack>
    stacked_weights, offset_tup, sf_tup = vt.stack_images(weight_mask_m,
                                                          weight_mask,
                                                          return_sf=True)
    (woff, hoff) = offset_tup[1]
    wh1 = weight_mask_m.shape[0:2][::-1]
    wh2 = weight_mask.shape[0:2][::-1]
    pt.imshow(255 * (stacked_weights),
              fnum=fnum,
              pnum=pnum_(0),
              title='(query image) What did match vs what should match')
    pt.draw_bbox((0, 0) + wh1, bbox_color=(0, 0, 1))
    pt.draw_bbox((woff, hoff) + wh2, bbox_color=(0, 0, 1))
    # Get contributing matches
    qaid = cm.qaid
    daid_list = daids
    fm_list = ut.take(cm.fm_list, idx_list)
    fs_list = ut.take(cm.fs_list, idx_list)
    # Draw matches
    for px, (daid, fm, fs) in enumerate(zip(daid_list, fm_list, fs_list),
                                        start=1):
        viz.viz_matches.show_matches2(qreq_.ibs,
                                      qaid,
                                      daid,
                                      fm,
                                      fs,
                                      draw_pts=False,
                                      draw_lines=True,
                                      draw_ell=False,
                                      fnum=fnum,
                                      pnum=pnum_(px),
                                      darken=.5)
    coverage_score = score_matching_mask(weight_mask_m, weight_mask)
    pt.set_figtitle('score=%.4f' % (coverage_score, ))
Beispiel #3
0
    def _chip_view(mode=0, pnum=(1, 1, 1), **kwargs):
        print('... _chip_view mode=%r' % mode_ptr[0])
        kwargs['ell'] = mode_ptr[0] == 1
        kwargs['pts'] = mode_ptr[0] == 2

        if not ischild:
            pt.figure(fnum=fnum, pnum=pnum, docla=True, doclf=True)
        # Toggle no keypoints view
        viz.show_chip(ibs,
                      aid,
                      fnum=fnum,
                      pnum=pnum,
                      config2_=config2_,
                      **kwargs)
        pt.set_figtitle('Chip View')
Beispiel #4
0
def draw_tree_model(model, **kwargs):
    import plottool_ibeis as pt
    import networkx as netx
    if not ut.get_argval('--hackjunc'):
        fnum = pt.ensure_fnum(None)
        fig = pt.figure(fnum=fnum, doclf=True)  # NOQA
        ax = pt.gca()
        #name_nodes = sorted(ut.list_getattr(model.ttype2_cpds[NAME_TTYPE], 'variable'))
        netx_graph = model.to_markov_model()
        #pos = netx.pygraphviz_layout(netx_graph)
        #pos = netx.graphviz_layout(netx_graph)
        #pos = get_hacked_pos(netx_graph, name_nodes, prog='neato')
        pos = netx.nx_pydot.pydot_layout(netx_graph)
        node_color = [pt.WHITE] * len(pos)
        drawkw = dict(pos=pos, ax=ax, with_labels=True, node_color=node_color,
                      node_size=1100)
        netx.draw(netx_graph, **drawkw)
        if kwargs.get('show_title', True):
            pt.set_figtitle('Markov Model')

    if not ut.get_argval('--hackmarkov'):
        fnum = pt.ensure_fnum(None)
        fig = pt.figure(fnum=fnum, doclf=True)  # NOQA
        ax = pt.gca()
        netx_graph = model.to_junction_tree()
        # prettify nodes
        def fixtupkeys(dict_):
            return {
                ', '.join(k) if isinstance(k, tuple) else k: fixtupkeys(v)
                for k, v in dict_.items()
            }
        # FIXME
        n = fixtupkeys(netx_graph.node)
        e = fixtupkeys(netx_graph.edge)
        a = fixtupkeys(netx_graph.adj)
        netx_graph.nodes.update(n)
        netx_graph.edges.update(e)
        netx_graph.adj.update(a)
        #netx_graph = model.to_markov_model()
        #pos = netx.pygraphviz_layout(netx_graph)
        #pos = netx.graphviz_layout(netx_graph)
        pos = netx.nx_pydot.pydot_layout(netx_graph)
        node_color = [pt.WHITE] * len(pos)
        drawkw = dict(pos=pos, ax=ax, with_labels=True, node_color=node_color,
                      node_size=2000)
        netx.draw(netx_graph, **drawkw)
        if kwargs.get('show_title', True):
            pt.set_figtitle('Junction/Clique Tree / Cluster Graph')
Beispiel #5
0
def draw_precision_recall_curve_(recall_range_,
                                 p_interp_curve,
                                 title_pref=None,
                                 fnum=1):
    import plottool_ibeis as pt
    if recall_range_ is None:
        recall_range_ = np.array([])
        p_interp_curve = np.array([])
    fig = pt.figure(fnum=fnum, docla=True, doclf=True)  # NOQA

    if recall_range_ is None:
        ave_p = np.nan
    else:
        ave_p = p_interp_curve.sum() / p_interp_curve.size

    pt.plot2(recall_range_,
             p_interp_curve,
             marker='o--',
             x_label='recall',
             y_label='precision',
             unitbox=True,
             flipx=False,
             color='r',
             title='Interplated Precision Vs Recall\n' + 'avep = %r' % ave_p)
    print('Interplated Precision')
    print(ut.repr2(list(zip(recall_range_, p_interp_curve))))
 def prepare_page(self):
     figkw = {
         'fnum': self.fnum,
         'doclf': True,
         'docla': True,
     }
     self.fig = pt.figure(**figkw)
     ih.disconnect_callback(self.fig, 'button_press_event')
     ih.connect_callback(self.fig, 'button_press_event', self.figure_clicked)
Beispiel #7
0
    def sv_view(self, dodraw=True):
        """ spatial verification view

        """
        #fnum = viz.FNUMS['special']
        aid = self.daid
        fnum = pt.next_fnum()
        fig = pt.figure(fnum=fnum, docla=True, doclf=True)
        ih.disconnect_callback(fig, 'button_press_event')
        viz.viz_sver.show_sver(self.ibs, self.qaid, aid2=aid, fnum=fnum)
        if dodraw:
            #self.draw()
            pt.draw()
Beispiel #8
0
 def prepare_page(self, fulldraw=True):
     figkw = {
         'fnum': self.fnum,
         'doclf': fulldraw,
         'docla': fulldraw,
     }
     if fulldraw:
         self.fig = pt.figure(**figkw)
     ih.disconnect_callback(self.fig, 'button_press_event')
     ih.disconnect_callback(self.fig, 'key_press_event')
     ih.connect_callback(self.fig, 'button_press_event',
                         self.figure_clicked)
     ih.connect_callback(self.fig, 'key_press_event', self.on_key_press)
Beispiel #9
0
def draw_junction_tree(model, fnum=None, **kwargs):
    import plottool_ibeis as pt
    fnum = pt.ensure_fnum(fnum)
    pt.figure(fnum=fnum)
    ax = pt.gca()
    from pgmpy.models import JunctionTree
    if not isinstance(model, JunctionTree):
        netx_graph = model.to_junction_tree()
    else:
        netx_graph = model
    # prettify nodes
    def fixtupkeys(dict_):
        return {
            ', '.join(k) if isinstance(k, tuple) else k: fixtupkeys(v)
            for k, v in dict_.items()
        }

    n = fixtupkeys(netx_graph.nodes)
    e = fixtupkeys(netx_graph.edge)
    a = fixtupkeys(netx_graph.adj)
    netx_graph.nodes = n
    netx_graph.edge = e
    netx_graph.adj = a
    #netx_graph = model.to_markov_model()
    #pos = nx.nx_agraph.pygraphviz_layout(netx_graph)
    #pos = nx.nx_agraph.graphviz_layout(netx_graph)
    pos = nx.pydot_layout(netx_graph)
    node_color = [pt.NEUTRAL] * len(pos)
    drawkw = dict(pos=pos,
                  ax=ax,
                  with_labels=True,
                  node_color=node_color,
                  node_size=2000)
    nx.draw(netx_graph, **drawkw)
    if kwargs.get('show_title', True):
        pt.set_figtitle('Junction / Clique Tree / Cluster Graph')
    def examine(self, aid, event=None):
        print(' examining aid %r against the query result' % aid)
        figtitle = 'Examine a specific image against the query'

        #fnum = 510
        fnum = pt.next_fnum()
        fig = pt.figure(fnum=fnum, pnum=(1, 1, 1), doclf=True, docla=True)
        # can cause freezes should be False
        INTERACT_EXAMINE = False
        if INTERACT_EXAMINE:
            # from ibeis.viz.interact import interact_matches
            #fig = interact_matches.ishow_matches(self.ibs, self.cm, aid, figtitle=figtitle, fnum=fnum)
            fig = self.cm.ishow_matches(self.ibs, aid, figtitle=figtitle, fnum=fnum)
            print('Finished interact')
            # this is only relevant to matplotlib.__version__ < 1.4.2
            #raise Exception(
            #    'BLACK MAGIC: error intentionally included as a workaround that seems'
            #    'to fix a gui hang on certain computers.')
        else:
            viz_matches.show_matches(self.ibs, self.cm, aid, figtitle=figtitle)
            fig.show()
Beispiel #11
0
def draw_markov_model(model, fnum=None, **kwargs):
    import plottool_ibeis as pt
    fnum = pt.ensure_fnum(fnum)
    pt.figure(fnum=fnum, doclf=True)
    ax = pt.gca()
    from pgmpy.models import MarkovModel
    if isinstance(model, MarkovModel):
        markovmodel = model
    else:
        markovmodel = model.to_markov_model()
    # pos = nx.nx_agraph.pydot_layout(markovmodel)
    pos = nx.nx_agraph.pygraphviz_layout(markovmodel)
    # Referenecs:
    # https://groups.google.com/forum/#!topic/networkx-discuss/FwYk0ixLDuY

    # pos = nx.spring_layout(markovmodel)
    # pos = nx.circular_layout(markovmodel)
    # curved-arrow
    # markovmodel.edge_attr['curved-arrow'] = True
    # markovmodel.graph.setdefault('edge', {})['splines'] = 'curved'
    # markovmodel.graph.setdefault('graph', {})['splines'] = 'curved'
    # markovmodel.graph.setdefault('edge', {})['splines'] = 'curved'

    node_color = [pt.NEUTRAL] * len(pos)
    drawkw = dict(
        pos=pos,
        ax=ax,
        with_labels=True,
        node_color=node_color,  # NOQA
        node_size=1100)

    from matplotlib.patches import FancyArrowPatch, Circle
    import numpy as np

    def draw_network(G, pos, ax, sg=None):
        for n in G:
            c = Circle(pos[n], radius=10, alpha=0.5, color=pt.NEUTRAL_BLUE)
            ax.add_patch(c)
            G.nodes[n]['patch'] = c
            x, y = pos[n]
            pt.ax_absolute_text(x, y, n, ha='center', va='center')
        seen = {}
        for (u, v, d) in G.edges(data=True):
            n1 = G.nodes[u]['patch']
            n2 = G.nodes[v]['patch']
            rad = 0.1
            if (u, v) in seen:
                rad = seen.get((u, v))
                rad = (rad + np.sign(rad) * 0.1) * -1
            alpha = 0.5
            color = 'k'

            e = FancyArrowPatch(
                n1.center,
                n2.center,
                patchA=n1,
                patchB=n2,
                # arrowstyle='-|>',
                arrowstyle='-',
                connectionstyle='arc3,rad=%s' % rad,
                mutation_scale=10.0,
                lw=2,
                alpha=alpha,
                color=color)
            seen[(u, v)] = rad
            ax.add_patch(e)
        return e

    # nx.draw(markovmodel, **drawkw)
    draw_network(markovmodel, pos, ax)
    ax.autoscale()
    pt.plt.axis('equal')
    pt.plt.axis('off')

    if kwargs.get('show_title', True):
        pt.set_figtitle('Markov Model')
Beispiel #12
0
def draw_bayesian_model(model,
                        evidence={},
                        soft_evidence={},
                        fnum=None,
                        pnum=None,
                        **kwargs):

    from pgmpy.models import BayesianModel
    if not isinstance(model, BayesianModel):
        model = model.to_bayesian_model()

    import plottool_ibeis as pt
    import networkx as nx
    kwargs = kwargs.copy()
    factor_list = kwargs.pop('factor_list', [])

    ttype_colors, ttype_scalars = make_colorcodes(model)

    textprops = {
        'horizontalalignment': 'left',
        'family': 'monospace',
        'size': 8,
    }

    # build graph attrs
    tup = get_node_viz_attrs(model, evidence, soft_evidence, factor_list,
                             ttype_colors, **kwargs)
    node_color, pos_list, pos_dict, takws = tup

    # draw graph
    has_inferred = evidence or 'factor_list' in kwargs

    if False:
        fig = pt.figure(fnum=fnum, pnum=pnum, doclf=True)  # NOQA
        ax = pt.gca()
        drawkw = dict(pos=pos_dict,
                      ax=ax,
                      with_labels=True,
                      node_size=1100,
                      node_color=node_color)
        nx.draw(model, **drawkw)
    else:
        # BE VERY CAREFUL
        if 1:
            graph = model.copy()
            graph.__class__ = nx.DiGraph
            graph.graph['groupattrs'] = ut.ddict(dict)
            #graph = model.
            if getattr(graph, 'ttype2_cpds', None) is not None:
                # Add invis edges and ttype groups
                for ttype in model.ttype2_cpds.keys():
                    ttype_cpds = model.ttype2_cpds[ttype]
                    # use defined ordering
                    ttype_nodes = ut.list_getattr(ttype_cpds, 'variable')
                    # ttype_nodes = sorted(ttype_nodes)
                    invis_edges = list(ut.itertwo(ttype_nodes))
                    graph.add_edges_from(invis_edges)
                    nx.set_edge_attributes(
                        graph,
                        name='style',
                        values={edge: 'invis'
                                for edge in invis_edges})
                    nx.set_node_attributes(
                        graph,
                        name='groupid',
                        values={node: ttype
                                for node in ttype_nodes})
                    graph.graph['groupattrs'][ttype]['rank'] = 'same'
                    graph.graph['groupattrs'][ttype]['cluster'] = False
        else:
            graph = model
        pt.show_nx(graph,
                   layout_kw={'prog': 'dot'},
                   fnum=fnum,
                   pnum=pnum,
                   verbose=0)
        pt.zoom_factory()
        fig = pt.gcf()
        ax = pt.gca()
        pass
    hacks = [
        pt.draw_text_annotations(textprops=textprops, **takw) for takw in takws
        if takw
    ]

    xmin, ymin = np.array(pos_list).min(axis=0)
    xmax, ymax = np.array(pos_list).max(axis=0)
    if 'name' in model.ttype2_template:
        num_names = len(model.ttype2_template['name'].basis)
        num_annots = len(model.ttype2_cpds['name'])
        if num_annots > 4:
            ax.set_xlim((xmin - 40, xmax + 40))
            ax.set_ylim((ymin - 50, ymax + 50))
            fig.set_size_inches(30, 7)
        else:
            ax.set_xlim((xmin - 42, xmax + 42))
            ax.set_ylim((ymin - 50, ymax + 50))
            fig.set_size_inches(23, 7)
        title = 'num_names=%r, num_annots=%r' % (
            num_names,
            num_annots,
        )
    else:
        title = ''
    map_assign = kwargs.get('map_assign', None)

    def word_insert(text):
        return '' if len(text) == 0 else text + ' '

    top_assignments = kwargs.get('top_assignments', None)
    if top_assignments is not None:
        map_assign, map_prob = top_assignments[0]
        if map_assign is not None:
            title += '\n%sMAP: ' % (word_insert(kwargs.get('method', '')))
            title += map_assign + ' @' + '%.2f%%' % (100 * map_prob, )
    if kwargs.get('show_title', True):
        pt.set_figtitle(title, size=14)

    for hack in hacks:
        hack()

    if has_inferred:
        # Hack in colorbars
        # if ut.list_type(basis) is int:
        #     pt.colorbar(scalars, colors, lbl='score', ticklabels=np.array(basis) + 1)
        # else:
        #     pt.colorbar(scalars, colors, lbl='score', ticklabels=basis)
        keys = ['name', 'score']
        locs = ['left', 'right']
        for key, loc in zip(keys, locs):
            if key in ttype_colors:
                basis = model.ttype2_template[key].basis
                # scalars =
                colors = ttype_colors[key]
                scalars = ttype_scalars[key]
                pt.colorbar(scalars,
                            colors,
                            lbl=key,
                            ticklabels=basis,
                            ticklocation=loc)
Beispiel #13
0
def augment_nnindexer_experiment():
    """

    References:
        http://answers.opencv.org/question/44592/flann-index-training-fails-with-segfault/

    CommandLine:
        utprof.py -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment

        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --show


        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6 --nosave-flann --no-api-cache --nocache-uuids

        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_MTEST --show
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show

        # RUNS THE SEGFAULTING CASE
        python -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show
        # Debug it
        gdb python
        run -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --show
        gdb python
        run -m ibeis.algo.hots._neighbor_experiment --test-augment_nnindexer_experiment --db PZ_Master0 --diskshow --adjust=.1 --save "augment_experiment_{db}.png" --dpath='.' --dpi=180 --figsize=9,6


    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots._neighbor_experiment import *  # NOQA
        >>> # execute function
        >>> augment_nnindexer_experiment()
        >>> # verify results
        >>> ut.show_if_requested()

    """
    import ibeis
    # build test data
    #ibs = ibeis.opendb('PZ_MTEST')
    ibs = ibeis.opendb(defaultdb='PZ_Master0')
    if ibs.get_dbname() == 'PZ_MTEST':
        initial = 1
        addition_stride = 4
        max_ceiling = 100
    elif ibs.get_dbname() == 'PZ_Master0':
        initial = 128
        #addition_stride = 64
        #addition_stride = 128
        addition_stride = 256
        max_ceiling = 10000
        #max_ceiling = 4000
        #max_ceiling = 2000
        #max_ceiling = 600
    else:
        assert False
    all_daids = ibs.get_valid_aids(species='zebra_plains')
    qreq_ = ibs.new_query_request(all_daids, all_daids)
    max_num = min(max_ceiling, len(all_daids))

    # Clear Caches
    ibs.delete_flann_cachedir()
    neighbor_index_cache.clear_memcache()
    neighbor_index_cache.clear_uuid_cache(qreq_)

    # Setup
    all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:])
    # ensure all features are computed

    nnindexer_list = []
    addition_lbl = 'Addition'
    _addition_iter = list(range(initial + 1, max_num, addition_stride))
    addition_iter = iter(ut.ProgressIter(_addition_iter, lbl=addition_lbl,
                                         freq=1, autoadjust=False))
    time_list_addition = []
    #time_list_reindex = []
    addition_count_list = []
    tmp_cfgstr_list = []

    #for _ in range(80):
    #    next(addition_iter)
    try:
        memtrack = ut.MemoryTracker(disable=False)
        for count in addition_iter:
            aid_list_ = all_randomize_daids_[0:count]
            # Request an indexer which could be an augmented version of an existing indexer.
            with ut.Timer(verbose=False) as t:
                memtrack.report('BEFORE AUGMENT')
                nnindexer_ = neighbor_index_cache.request_augmented_ibeis_nnindexer(qreq_, aid_list_)
                memtrack.report('AFTER AUGMENT')
            nnindexer_list.append(nnindexer_)
            addition_count_list.append(count)
            time_list_addition.append(t.ellapsed)
            tmp_cfgstr_list.append(nnindexer_.cfgstr)
            print('===============\n\n')
        print(ut.repr2(time_list_addition))
        print(ut.repr2(list(map(id, nnindexer_list))))
        print(ut.repr2(tmp_cfgstr_list))
        print(ut.repr2(list([nnindxer.cfgstr for nnindxer in nnindexer_list])))

        IS_SMALL = False

        if IS_SMALL:
            nnindexer_list = []
        reindex_label = 'Reindex'
        # go backwards for reindex
        _reindex_iter = list(range(initial + 1, max_num, addition_stride))[::-1]
        reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_label)
        time_list_reindex = []
        #time_list_reindex = []
        reindex_count_list = []

        for count in reindex_iter:
            print('\n+===PREDONE====================\n')
            # check only a single size for memory leaks
            #count = max_num // 16 + ((x % 6) * 1)
            #x += 1

            aid_list_ = all_randomize_daids_[0:count]
            # Call the same code, but force rebuilds
            memtrack.report('BEFORE REINDEX')
            with ut.Timer(verbose=False) as t:
                nnindexer_ = neighbor_index_cache.request_augmented_ibeis_nnindexer(
                    qreq_, aid_list_, force_rebuild=True, memtrack=memtrack)
            memtrack.report('AFTER REINDEX')
            ibs.print_cachestats_str()
            print('[nnindex.MEMCACHE] size(NEIGHBOR_CACHE) = %s' % (
                ut.get_object_size_str(neighbor_index_cache.NEIGHBOR_CACHE.items()),))
            print('[nnindex.MEMCACHE] len(NEIGHBOR_CACHE) = %s' % (
                len(neighbor_index_cache.NEIGHBOR_CACHE.items()),))
            print('[nnindex.MEMCACHE] size(UUID_MAP_CACHE) = %s' % (
                ut.get_object_size_str(neighbor_index_cache.UUID_MAP_CACHE),))
            print('totalsize(nnindexer) = ' + ut.get_object_size_str(nnindexer_))
            memtrack.report_type(neighbor_index_cache.NeighborIndex)
            ut.print_object_size_tree(nnindexer_, lbl='nnindexer_')
            if IS_SMALL:
                nnindexer_list.append(nnindexer_)
            reindex_count_list.append(count)
            time_list_reindex.append(t.ellapsed)
            #import cv2
            #import matplotlib as mpl
            #print(mem_top.mem_top(limit=30, width=120,
            #                      #exclude_refs=[cv2.__dict__, mpl.__dict__]
            #     ))
            print('L___________________\n\n\n')
        print(ut.repr2(time_list_reindex))
        if IS_SMALL:
            print(ut.repr2(list(map(id, nnindexer_list))))
            print(ut.repr2(list([nnindxer.cfgstr for nnindxer in nnindexer_list])))
    except KeyboardInterrupt:
            print('\n[train] Caught CRTL+C')
            resolution = ''
            from six.moves import input
            while not (resolution.isdigit()):
                print('\n[train] What do you want to do?')
                print('[train]     0 - Continue')
                print('[train]     1 - Embed')
                print('[train]  ELSE - Stop network training')
                resolution = input('[train] Resolution: ')
            resolution = int(resolution)
            # We have a resolution
            if resolution == 0:
                print('resuming training...')
            elif resolution == 1:
                ut.embed()

    import plottool_ibeis as pt

    next_fnum = iter(range(0, 1)).next  # python3 PY3
    pt.figure(fnum=next_fnum())
    if len(addition_count_list) > 0:
        pt.plot2(addition_count_list, time_list_addition, marker='-o', equal_aspect=False,
                 x_label='num_annotations', label=addition_lbl + ' Time')

    if len(reindex_count_list) > 0:
        pt.plot2(reindex_count_list, time_list_reindex, marker='-o', equal_aspect=False,
                 x_label='num_annotations', label=reindex_label + ' Time')

    pt.set_figtitle('Augmented indexer experiment')

    pt.legend()
Beispiel #14
0
def hackshow_names(ibs, aid_list, fnum=None):
    r"""
    Args:
        ibs (IBEISController):  ibeis controller object
        aid_list (list):

    CommandLine:
        python -m ibeis.other.dbinfo --exec-hackshow_names --show
        python -m ibeis.other.dbinfo --exec-hackshow_names --show --db PZ_Master1

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.other.dbinfo import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
        >>> aid_list = ibs.get_valid_aids()
        >>> result = hackshow_names(ibs, aid_list)
        >>> print(result)
        >>> ut.show_if_requested()
    """
    import plottool_ibeis as pt
    import vtool_ibeis as vt
    grouped_aids, nid_list = ibs.group_annots_by_name(aid_list)
    grouped_aids = [aids for aids in grouped_aids if len(aids) > 1]
    unixtimes_list = ibs.unflat_map(ibs.get_annot_image_unixtimes_asfloat,
                                    grouped_aids)
    yaws_list = ibs.unflat_map(ibs.get_annot_yaws, grouped_aids)
    #markers_list = [[(1, 2, yaw * 360 / (np.pi * 2)) for yaw in yaws] for yaws in yaws_list]

    unixtime_list = ut.flatten(unixtimes_list)
    timemax = np.nanmax(unixtime_list)
    timemin = np.nanmin(unixtime_list)
    timerange = timemax - timemin
    unixtimes_list = [((unixtimes[:] - timemin) / timerange)
                      for unixtimes in unixtimes_list]
    for unixtimes in unixtimes_list:
        num_nan = sum(np.isnan(unixtimes))
        unixtimes[np.isnan(unixtimes)] = np.linspace(-1, -.5, num_nan)
    #ydata_list = [np.arange(len(aids)) for aids in grouped_aids]
    sortx_list = vt.argsort_groups(unixtimes_list, reverse=False)
    #markers_list = ut.list_ziptake(markers_list, sortx_list)
    yaws_list = ut.list_ziptake(yaws_list, sortx_list)
    ydatas_list = vt.ziptake(unixtimes_list, sortx_list)
    #ydatas_list = sortx_list
    #ydatas_list = vt.argsort_groups(unixtimes_list, reverse=False)

    # Sort by num members
    #ydatas_list = ut.take(ydatas_list, np.argsort(list(map(len, ydatas_list))))
    xdatas_list = [
        np.zeros(len(ydatas)) + count
        for count, ydatas in enumerate(ydatas_list)
    ]
    #markers = ut.flatten(markers_list)
    #yaws = np.array(ut.flatten(yaws_list))
    y_data = np.array(ut.flatten(ydatas_list))
    x_data = np.array(ut.flatten(xdatas_list))
    fnum = pt.ensure_fnum(fnum)
    pt.figure(fnum=fnum)
    ax = pt.gca()

    #unique_yaws, groupxs = vt.group_indices(yaws)

    ax.scatter(x_data, y_data, color=[1, 0, 0], s=1, marker='.')
    #pt.draw_stems(x_data, y_data, marker=markers, setlims=True, linestyle='')
    pt.dark_background()
    ax = pt.gca()
    ax.set_xlim(min(x_data) - .1, max(x_data) + .1)
    ax.set_ylim(min(y_data) - .1, max(y_data) + .1)
Beispiel #15
0
def flann_add_time_experiment():
    """
    builds plot of number of annotations vs indexer build time.

    TODO: time experiment

    CommandLine:
        python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --show
        python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_Master0 --show
        utprof.py -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --show

        valgrind --tool=memcheck --suppressions=valgrind-python.supp python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --no-with-reindex

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots._neighbor_experiment import *  # NOQA
        >>> import ibeis
        >>> #ibs = ibeis.opendb('PZ_MTEST')
        >>> result = flann_add_time_experiment()
        >>> # verify results
        >>> print(result)
        >>> ut.show_if_requested()

    """
    import ibeis
    import utool as ut
    import numpy as np
    import plottool_ibeis as pt

    def make_flann_index(vecs, flann_params):
        flann = pyflann.FLANN()
        flann.build_index(vecs, **flann_params)
        return flann

    db = ut.get_argval('--db')
    ibs = ibeis.opendb(db=db)

    # Input
    if ibs.get_dbname() == 'PZ_MTEST':
        initial = 1
        reindex_stride = 16
        addition_stride = 4
        max_ceiling = 120
    elif ibs.get_dbname() == 'PZ_Master0':
        #ibs = ibeis.opendb(db='GZ_ALL')
        initial = 32
        reindex_stride = 32
        addition_stride = 16
        max_ceiling = 300001
    else:
        assert False
    #max_ceiling = 32
    all_daids = ibs.get_valid_aids()
    max_num = min(max_ceiling, len(all_daids))
    flann_params = vt.get_flann_params()

    # Output
    count_list,  time_list_reindex  = [], []
    count_list2, time_list_addition = [], []

    # Setup
    #all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:])
    all_randomize_daids_ = all_daids
    # ensure all features are computed
    ibs.get_annot_vecs(all_randomize_daids_)

    def reindex_step(count, count_list, time_list_reindex):
        daids    = all_randomize_daids_[0:count]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        with ut.Timer(verbose=False) as t:
            flann = make_flann_index(vecs, flann_params)  # NOQA
        count_list.append(count)
        time_list_reindex.append(t.ellapsed)

    def addition_step(count, flann, count_list2, time_list_addition):
        daids = all_randomize_daids_[count:count + 1]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        with ut.Timer(verbose=False) as t:
            flann.add_points(vecs)
        count_list2.append(count)
        time_list_addition.append(t.ellapsed)

    def make_initial_index(initial):
        daids = all_randomize_daids_[0:initial + 1]
        vecs = np.vstack(ibs.get_annot_vecs(daids))
        flann = make_flann_index(vecs, flann_params)
        return flann

    WITH_REINDEX = not ut.get_argflag('--no-with-reindex')
    if WITH_REINDEX:
        # Reindex Part
        reindex_lbl = 'Reindexing'
        _reindex_iter = range(1, max_num, reindex_stride)
        reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_lbl, freq=1)
        for count in reindex_iter:
            reindex_step(count, count_list, time_list_reindex)

    # Add Part
    flann = make_initial_index(initial)
    addition_lbl = 'Addition'
    _addition_iter = range(initial + 1, max_num, addition_stride)
    addition_iter = ut.ProgressIter(_addition_iter, lbl=addition_lbl)
    for count in addition_iter:
        addition_step(count, flann, count_list2, time_list_addition)

    print('---')
    print('Reindex took time_list_reindex %.2s seconds' % sum(time_list_reindex))
    print('Addition took time_list_reindex  %.2s seconds' % sum(time_list_addition))
    print('---')
    statskw = dict(precision=2, newlines=True)
    print('Reindex stats ' + ut.get_stats_str(time_list_reindex, **statskw))
    print('Addition stats ' + ut.get_stats_str(time_list_addition, **statskw))

    print('Plotting')

    #with pt.FigureContext:

    next_fnum = iter(range(0, 2)).next  # python3 PY3
    pt.figure(fnum=next_fnum())
    if WITH_REINDEX:
        pt.plot2(count_list, time_list_reindex, marker='-o', equal_aspect=False,
                 x_label='num_annotations', label=reindex_lbl + ' Time', dark=False)

    #pt.figure(fnum=next_fnum())
    pt.plot2(count_list2, time_list_addition, marker='-o', equal_aspect=False,
             x_label='num_annotations', label=addition_lbl + ' Time')

    pt
    pt.legend()
Beispiel #16
0
    def demo_classes(pblm):
        r"""
        CommandLine:
            python -m ibeis.algo.verif.vsone demo_classes --saveparts --save=classes.png --clipwhite

            python -m ibeis.algo.verif.vsone demo_classes --saveparts --save=figures/classes.png --clipwhite --dpath=~/latex/crall-iccv-2017

        Example:
            >>> # DISABLE_DOCTEST
            >>> from ibeis.algo.verif.vsone import *  # NOQA
            >>> pblm = OneVsOneProblem.from_empty(defaultdb='PZ_PB_RF_TRAIN')
            >>> pblm.load_features()
            >>> pblm.load_samples()
            >>> pblm.build_feature_subsets()
            >>> pblm.demo_classes()
            >>> ut.show_if_requested()
        """
        task_key = 'match_state'
        labels = pblm.samples.subtasks[task_key]
        pb_labels = pblm.samples.subtasks['photobomb_state']
        classname_offset = {
            POSTV: 0,
            NEGTV: 0,
            INCMP: 0,
        }
        class_name = POSTV
        class_name = NEGTV
        class_name = INCMP

        feats = pblm.samples.X_dict['learn(sum,glob)']

        offset = 0
        class_to_edge = {}
        for class_name in labels.class_names:
            print('Find example of %r' % (class_name, ))
            # Find an example of each class (that is not a photobomb)
            pbflags = pb_labels.indicator_df['notpb']
            flags = labels.indicator_df[class_name]
            assert np.all(pbflags.index == flags.index)
            flags = flags & pbflags
            ratio = feats['sum(ratio)']
            if class_name == INCMP:
                # flags &= feats['global(delta_yaw)'] > 3
                flags &= feats['global(delta_view)'] > 2
                # flags &= feats['sum(ratio)'] > 0
            if class_name == NEGTV:
                low = ratio[flags].max()
                flags &= feats['sum(ratio)'] >= low
            if class_name == POSTV:
                low = ratio[flags].median() / 2
                high = ratio[flags].median()
                flags &= feats['sum(ratio)'] < high
                flags &= feats['sum(ratio)'] > low
            # flags &= pblm.samples.simple_scores[flags]['score_lnbnn_1vM'] > 0
            idxs = np.where(flags)[0]
            print('Found %d candidates' % (len(idxs)))
            offset = classname_offset[class_name]
            idx = idxs[offset]
            series = labels.indicator_df.iloc[idx]
            assert series[class_name]
            edge = series.name
            class_to_edge[class_name] = edge

        import plottool_ibeis as pt
        import guitool_ibeis as gt
        gt.ensure_qapp()
        pt.qtensure()

        fnum = 1
        pt.figure(fnum=fnum, pnum=(1, 3, 1))
        pnum_ = pt.make_pnum_nextgen(1, 3)

        # classname_alias = {
        #     POSTV: 'positive',
        #     NEGTV: 'negative',
        #     INCMP: 'incomparable',
        # }

        ibs = pblm.infr.ibs
        for class_name in class_to_edge.keys():
            edge = class_to_edge[class_name]
            aid1, aid2 = edge
            # alias = classname_alias[class_name]
            print('class_name = %r' % (class_name, ))
            annot1 = ibs.annots([aid1])[0]._make_lazy_dict()
            annot2 = ibs.annots([aid2])[0]._make_lazy_dict()
            vt.matching.ensure_metadata_normxy(annot1)
            vt.matching.ensure_metadata_normxy(annot2)
            match = vt.PairwiseMatch(annot1, annot2)
            cfgdict = pblm.hyper_params.vsone_match.asdict()
            match.apply_all(cfgdict)
            pt.figure(fnum=fnum, pnum=pnum_())
            match.show(show_ell=False, show_ori=False)
Beispiel #17
0
def show_model(model, evidence={}, soft_evidence={}, **kwargs):
    """
    References:
        http://stackoverflow.com/questions/22207802/pygraphviz-networkx-set-node-level-or-layer

    Ignore:
        pkg-config --libs-only-L libcgraph
        sudo apt-get  install libgraphviz-dev -y
        sudo apt-get  install libgraphviz4 -y

        # sudo apt-get install pkg-config
        sudo apt-get install libgraphviz-dev
        # pip install git+git://github.com/pygraphviz/pygraphviz.git
        pip install pygraphviz
        python -c "import pygraphviz; print(pygraphviz.__file__)"

        sudo pip3 install pygraphviz --install-option="--include-path=/usr/include/graphviz" --install-option="--library-path=/usr/lib/graphviz/"
        python3 -c "import pygraphviz; print(pygraphviz.__file__)"

    CommandLine:
        python -m ibeis.algo.hots.bayes --exec-show_model --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.bayes import *  # NOQA
        >>> model = '?'
        >>> evidence = {}
        >>> soft_evidence = {}
        >>> result = show_model(model, evidence, soft_evidence)
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool_ibeis as pt
        >>> ut.show_if_requested()
    """
    if ut.get_argval('--hackmarkov') or ut.get_argval('--hackjunc'):
        draw_tree_model(model, **kwargs)
        return

    import plottool_ibeis as pt
    import networkx as netx
    fnum = pt.ensure_fnum(None)
    netx_graph = (model)
    #netx_graph.graph.setdefault('graph', {})['size'] = '"10,5"'
    #netx_graph.graph.setdefault('graph', {})['rankdir'] = 'LR'

    pos_dict = get_hacked_pos(netx_graph)
    #pos_dict = netx.nx_agraph.pygraphviz_layout(netx_graph)
    #pos = netx.nx_agraph.nx_pydot.pydot_layout(netx_graph, prog='dot')
    #pos_dict = netx.nx_agraph.graphviz_layout(netx_graph)

    textprops = {
        'family': 'monospace',
        'horizontalalignment': 'left',
        #'horizontalalignment': 'center',
        #'size': 12,
        'size': 8,
    }

    netx_nodes = model.nodes(data=True)
    node_key_list = ut.get_list_column(netx_nodes, 0)
    pos_list = ut.dict_take(pos_dict, node_key_list)

    var2_post = {f.variables[0]: f for f in kwargs.get('factor_list', [])}

    prior_text = None
    post_text = None
    evidence_tas = []
    post_tas = []
    prior_tas = []
    node_color = []

    has_inferred = evidence or var2_post
    if has_inferred:
        ignore_prior_with_ttype = [SCORE_TTYPE, MATCH_TTYPE]
        show_prior = False
    else:
        ignore_prior_with_ttype = []
        #show_prior = True
        show_prior = False

    dpy = 5
    dbx, dby = (20, 20)
    takw1 = {'bbox_align': (.5, 0), 'pos_offset': [0, dpy], 'bbox_offset': [dbx, dby]}
    takw2 = {'bbox_align': (.5, 1), 'pos_offset': [0, -dpy], 'bbox_offset': [-dbx, -dby]}

    name_colors = pt.distinct_colors(max(model.num_names, 10))
    name_colors = name_colors[:model.num_names]

    #cmap_ = 'hot' #mx = 0.65 #mn = 0.15
    cmap_, mn, mx = 'plasma', 0.15, 1.0
    _cmap = pt.plt.get_cmap(cmap_)
    def cmap(x):
        return _cmap((x * mx) + mn)

    for node, pos in zip(netx_nodes, pos_list):
        variable = node[0]
        cpd = model.var2_cpd[variable]
        prior_marg = (cpd if cpd.evidence is None else
                      cpd.marginalize(cpd.evidence, inplace=False))

        show_evidence = variable in evidence
        show_prior = cpd.ttype not in ignore_prior_with_ttype
        show_post = variable in var2_post
        show_prior |= cpd.ttype not in ignore_prior_with_ttype

        post_marg = None

        if show_post:
            post_marg = var2_post[variable]

        def get_name_color(phi):
            order = phi.values.argsort()[::-1]
            if len(order) < 2:
                dist_next = phi.values[order[0]]
            else:
                dist_next = phi.values[order[0]] - phi.values[order[1]]
            dist_total = (phi.values[order[0]])
            confidence = (dist_total * dist_next) ** (2.5 / 4)
            #print('confidence = %r' % (confidence,))
            color = name_colors[order[0]]
            color = pt.color_funcs.desaturate_rgb(color, 1 - confidence)
            color = np.array(color)
            return color

        if variable in evidence:
            if cpd.ttype == SCORE_TTYPE:
                cmap_index = evidence[variable] / (cpd.variable_card - 1)
                color = cmap(cmap_index)
                color = pt.lighten_rgb(color, .4)
                color = np.array(color)
                node_color.append(color)
            elif cpd.ttype == NAME_TTYPE:
                color = name_colors[evidence[variable]]
                color = np.array(color)
                node_color.append(color)
            else:
                color = pt.FALSE_RED
                node_color.append(color)
        #elif variable in soft_evidence:
        #    color = pt.LIGHT_PINK
        #    show_prior = True
        #    color = get_name_color(prior_marg)
        #    node_color.append(color)
        else:
            if cpd.ttype == NAME_TTYPE and post_marg is not None:
                color = get_name_color(post_marg)
                node_color.append(color)
            elif cpd.ttype == MATCH_TTYPE and post_marg is not None:
                color = cmap(post_marg.values[1])
                color = pt.lighten_rgb(color, .4)
                color = np.array(color)
                node_color.append(color)
            else:
                #color = pt.WHITE
                color = pt.NEUTRAL
                node_color.append(color)

        if show_prior:
            if variable in soft_evidence:
                prior_color = pt.LIGHT_PINK
            else:
                prior_color = None
            prior_text = pgm_ext.make_factor_text(prior_marg, 'prior')
            prior_tas.append(dict(text=prior_text, pos=pos, color=prior_color, **takw2))
        if show_evidence:
            _takw1 = takw1
            if cpd.ttype == SCORE_TTYPE:
                _takw1 = takw2
            evidence_text = cpd.variable_statenames[evidence[variable]]
            if isinstance(evidence_text, int):
                evidence_text = '%d/%d' % (evidence_text + 1, cpd.variable_card)
            evidence_tas.append(dict(text=evidence_text, pos=pos, color=color, **_takw1))
        if show_post:
            _takw1 = takw1
            if cpd.ttype == MATCH_TTYPE:
                _takw1 = takw2
            post_text = pgm_ext.make_factor_text(post_marg, 'post')
            post_tas.append(dict(text=post_text, pos=pos, color=None, **_takw1))

    def trnps_(dict_list):
        """ tranpose dict list """
        list_dict = ut.ddict(list)
        for dict_ in dict_list:
            for key, val in dict_.items():
                list_dict[key + '_list'].append(val)
        return list_dict

    takw1_ = trnps_(post_tas + evidence_tas)
    takw2_ = trnps_(prior_tas)

    # Draw graph
    if has_inferred:
        pnum1 = (3, 1, (slice(0, 2), 0))
    else:
        pnum1 = None

    fig = pt.figure(fnum=fnum, pnum=pnum1, doclf=True)  # NOQA
    ax = pt.gca()
    #print('node_color = %s' % (ut.repr3(node_color),))
    drawkw = dict(pos=pos_dict, ax=ax, with_labels=True, node_size=1500,
                  node_color=node_color)
    netx.draw(netx_graph, **drawkw)

    hacks = []
    if len(post_tas + evidence_tas):
        hacks.append(pt.draw_text_annotations(textprops=textprops, **takw1_))
    if prior_tas:
        hacks.append(pt.draw_text_annotations(textprops=textprops, **takw2_))

    xmin, ymin = np.array(pos_list).min(axis=0)
    xmax, ymax = np.array(pos_list).max(axis=0)
    num_annots = len(model.ttype2_cpds[NAME_TTYPE])
    if num_annots > 4:
        ax.set_xlim((xmin - 40, xmax + 40))
        ax.set_ylim((ymin - 50, ymax + 50))
        fig.set_size_inches(30, 7)
    else:
        ax.set_xlim((xmin - 42, xmax + 42))
        ax.set_ylim((ymin - 50, ymax + 50))
        fig.set_size_inches(23, 7)
    fig = pt.gcf()

    title = 'num_names=%r, num_annots=%r' % (model.num_names, num_annots,)
    map_assign = kwargs.get('map_assign', None)

    top_assignments = kwargs.get('top_assignments', None)
    if top_assignments is not None:
        map_assign, map_prob = top_assignments[0]
        if map_assign is not None:
            def word_insert(text):
                return '' if len(text) == 0 else text + ' '
            title += '\n%sMAP: ' % (word_insert(kwargs.get('method', '')))
            title += map_assign + ' @' + '%.2f%%' % (100 * map_prob,)
    if kwargs.get('show_title', True):
        pt.set_figtitle(title, size=14)

    for hack in hacks:
        hack()

    # Hack in colorbars
    if has_inferred:
        pt.colorbar(np.linspace(0, 1, len(name_colors)), name_colors, lbl=NAME_TTYPE,
                    ticklabels=model.ttype2_template[NAME_TTYPE].basis, ticklocation='left')

        basis = model.ttype2_template[SCORE_TTYPE].basis
        scalars = np.linspace(0, 1, len(basis))
        scalars = np.linspace(0, 1, 100)
        colors = pt.scores_to_color(scalars, cmap_=cmap_, reverse_cmap=False,
                                    cmap_range=(mn, mx))
        colors = [pt.lighten_rgb(c, .4) for c in colors]

        if ut.list_type(basis) is int:
            pt.colorbar(scalars, colors, lbl=SCORE_TTYPE, ticklabels=np.array(basis) + 1)
        else:
            pt.colorbar(scalars, colors, lbl=SCORE_TTYPE, ticklabels=basis)
            #print('basis = %r' % (basis,))

    # Draw probability hist
    if has_inferred and top_assignments is not None:
        bin_labels = ut.get_list_column(top_assignments, 0)
        bin_vals =  ut.get_list_column(top_assignments, 1)

        # bin_labels = ['\n'.join(ut.textwrap.wrap(_lbl, width=30)) for _lbl in bin_labels]

        pt.draw_histogram(bin_labels, bin_vals, fnum=fnum, pnum=(3, 8, (2, slice(4, None))),
                          transpose=True,
                          use_darkbackground=False,
                          #xtick_rotation=-10,
                          ylabel='Prob', xlabel='assignment')
        pt.set_title('Assignment probabilities')
Beispiel #18
0
def show_model(model, evidence={}, soft_evidence={}, **kwargs):
    """
    References:
        http://stackoverflow.com/questions/22207802/pygraphviz-networkx-set-node-level-or-layer

    Ignore:
        pkg-config --libs-only-L libcgraph
        sudo apt-get  install libgraphviz-dev -y
        sudo apt-get  install libgraphviz4 -y

        # sudo apt-get install pkg-config
        sudo apt-get install libgraphviz-dev
        # pip install git+git://github.com/pygraphviz/pygraphviz.git
        pip install pygraphviz
        python -c "import pygraphviz; print(pygraphviz.__file__)"

        sudo pip3 install pygraphviz --install-option="--include-path=/usr/include/graphviz" --install-option="--library-path=/usr/lib/graphviz/"
        python3 -c "import pygraphviz; print(pygraphviz.__file__)"
    """
    if ut.get_argval('--hackmarkov') or ut.get_argval('--hackjunc'):
        draw_tree_model(model, **kwargs)
        return

    import plottool_ibeis as pt
    import networkx as netx
    import matplotlib as mpl
    fnum = pt.ensure_fnum(None)
    fig = pt.figure(fnum=fnum, pnum=(3, 1, (slice(0, 2), 0)), doclf=True)  # NOQA
    #fig = pt.figure(fnum=fnum, pnum=(3, 2, (1, slice(1, 2))), doclf=True)  # NOQA
    ax = pt.gca()
    var2_post = {f.variables[0]: f for f in kwargs.get('factor_list', [])}

    netx_graph = (model)
    #netx_graph.graph.setdefault('graph', {})['size'] = '"10,5"'
    #netx_graph.graph.setdefault('graph', {})['rankdir'] = 'LR'

    pos = get_hacked_pos(netx_graph)
    #netx.nx_agraph.pygraphviz_layout(netx_graph)
    #pos = netx.nx_agraph.pydot_layout(netx_graph, prog='dot')
    #pos = netx.nx_agraph.graphviz_layout(netx_graph)

    drawkw = dict(pos=pos, ax=ax, with_labels=True, node_size=1500)
    if evidence is not None:
        node_colors = [
            # (pt.TRUE_BLUE
            (pt.WHITE
             if node not in soft_evidence else
             pt.LIGHT_PINK)
            if node not in evidence
            else pt.FALSE_RED
            for node in netx_graph.nodes()]

        for node in netx_graph.nodes():
            cpd = model.var2_cpd[node]
            if cpd.ttype == 'score':
                pass
        drawkw['node_color'] = node_colors

    netx.draw(netx_graph, **drawkw)

    show_probs = True
    if show_probs:
        textprops = {
            'family': 'monospace',
            'horizontalalignment': 'left',
            #'horizontalalignment': 'center',
            #'size': 12,
            'size': 8,
        }

        textkw = dict(
            xycoords='data', boxcoords='offset points', pad=0.25,
            framewidth=True, arrowprops=dict(arrowstyle='->'),
            #bboxprops=dict(fc=node_attr['fillcolor']),
        )

        netx_nodes = model.nodes(data=True)
        node_key_list = ut.get_list_column(netx_nodes, 0)
        pos_list = ut.dict_take(pos, node_key_list)

        artist_list = []
        offset_box_list = []
        for pos_, node in zip(pos_list, netx_nodes):
            x, y = pos_
            variable = node[0]

            cpd = model.var2_cpd[variable]

            prior_marg = (cpd if cpd.evidence is None else
                          cpd.marginalize(cpd.evidence, inplace=False))

            prior_text = None

            text = None
            if variable in evidence:
                text = cpd.variable_statenames[evidence[variable]]
            elif variable in var2_post:
                post_marg = var2_post[variable]
                text = pgm_ext.make_factor_text(post_marg, 'post')
                prior_text = pgm_ext.make_factor_text(prior_marg, 'prior')
            else:
                if len(evidence) == 0 and len(soft_evidence) == 0:
                    prior_text = pgm_ext.make_factor_text(prior_marg, 'prior')

            show_post = kwargs.get('show_post', False)
            show_prior = kwargs.get('show_prior', False)
            show_prior = True
            show_post = True

            show_ev = (evidence is not None and variable in evidence)
            if (show_post or show_ev) and text is not None:
                offset_box = mpl.offsetbox.TextArea(text, textprops)
                artist = mpl.offsetbox.AnnotationBbox(
                    # offset_box, (x + 5, y), xybox=(20., 5.),
                    offset_box, (x, y + 5), xybox=(4., 20.),
                    #box_alignment=(0, 0),
                    box_alignment=(.5, 0),
                    **textkw)
                offset_box_list.append(offset_box)
                artist_list.append(artist)

            if show_prior and prior_text is not None:
                offset_box2 = mpl.offsetbox.TextArea(prior_text, textprops)
                artist2 = mpl.offsetbox.AnnotationBbox(
                    # offset_box2, (x - 5, y), xybox=(-20., -15.),
                    # offset_box2, (x, y - 5), xybox=(-15., -20.),
                    offset_box2, (x, y - 5), xybox=(-4, -20.),
                    #box_alignment=(1, 1),
                    box_alignment=(.5, 1),
                    **textkw)
                offset_box_list.append(offset_box2)
                artist_list.append(artist2)

        for artist in artist_list:
            ax.add_artist(artist)

        xmin, ymin = np.array(pos_list).min(axis=0)
        xmax, ymax = np.array(pos_list).max(axis=0)
        num_annots = len(model.ttype2_cpds['name'])
        if num_annots > 4:
            ax.set_xlim((xmin - 40, xmax + 40))
            ax.set_ylim((ymin - 50, ymax + 50))
            fig.set_size_inches(30, 7)
        else:
            ax.set_xlim((xmin - 42, xmax + 42))
            ax.set_ylim((ymin - 50, ymax + 50))
            fig.set_size_inches(23, 7)
        fig = pt.gcf()

        title = 'num_names=%r, num_annots=%r' % (model.num_names, num_annots,)
        map_assign = kwargs.get('map_assign', None)
        #max_marginal_list = []
        #for name, marginal in marginalized_joints.items():
        #    states = list(ut.iprod(*marginal.statenames))
        #    vals = marginal.values.ravel()
        #    x = vals.argmax()
        #    max_marginal_list += ['P(' + ', '.join(states[x]) + ') = ' + str(vals[x])]
        # title += str(marginal)
        top_assignments = kwargs.get('top_assignments', None)
        if top_assignments is not None:
            map_assign, map_prob = top_assignments[0]
            if map_assign is not None:
                # title += '\nMAP=' + ut.repr2(map_assign, strvals=True)
                title += '\nMAP: ' + map_assign + ' @' + '%.2f%%' % (100 * map_prob,)
        if kwargs.get('show_title', True):
            pt.set_figtitle(title, size=14)
        #pt.set_xlabel()

        def hack_fix_centeralign():
            if textprops['horizontalalignment'] == 'center':
                print('Fixing centeralign')
                fig = pt.gcf()
                fig.canvas.draw()

                # Superhack for centered text. Fix bug in
                # /usr/local/lib/python2.7/dist-packages/matplotlib/offsetbox.py
                # /usr/local/lib/python2.7/dist-packages/matplotlib/text.py
                for offset_box in offset_box_list:
                    offset_box.set_offset
                    z = offset_box._text.get_window_extent()
                    (z.x1 - z.x0) / 2
                    offset_box._text
                    T = offset_box._text.get_transform()
                    A = mpl.transforms.Affine2D()
                    A.clear()
                    A.translate((z.x1 - z.x0) / 2, 0)
                    offset_box._text.set_transform(T + A)
        hack_fix_centeralign()
    top_assignments = kwargs.get('top_assignments', None)
    if top_assignments is not None:
        bin_labels = ut.get_list_column(top_assignments, 0)
        bin_vals =  ut.get_list_column(top_assignments, 1)

        # bin_labels = ['\n'.join(ut.textwrap.wrap(_lbl, width=30)) for _lbl in bin_labels]

        pt.draw_histogram(bin_labels, bin_vals, fnum=fnum, pnum=(3, 8, (2, slice(4, None))),
                          transpose=True,
                          use_darkbackground=False,
                          #xtick_rotation=-10,
                          ylabel='Prob', xlabel='assignment')
        pt.set_title('Assignment probabilities')
Beispiel #19
0
def _dev_iters_until_threshold():
    """
    INTERACTIVE DEVELOPMENT FUNCTION

    How many iterations of ewma until you hit the poisson / biniomal threshold

    This establishes a principled way to choose the threshold for the refresh
    criterion in my thesis. There are paramters --- moving parts --- that we
    need to work with: `a` the patience, `s` the span, and `mu` our ewma.

    `s` is a span paramter indicating how far we look back.

    `mu` is the average number of label-changing reviews in roughly the last
    `s` manual decisions.

    These numbers are used to estimate the probability that any of the next `a`
    manual decisions will be label-chanigng. When that probability falls below
    a threshold we terminate. The goal is to choose `a`, `s`, and the threshold
    `t`, such that the probability will fall below the threshold after a maximum
    of `a` consecutive non-label-chaning reviews. IE we want to tie the patience
    paramter (how far we look ahead) to how far we actually are willing to go.
    """
    import numpy as np
    import utool as ut
    import sympy as sym
    i = sym.symbols('i', integer=True, nonnegative=True, finite=True)
    # mu_i = sym.symbols('mu_i', integer=True, nonnegative=True, finite=True)
    s = sym.symbols('s', integer=True, nonnegative=True, finite=True)  # NOQA
    thresh = sym.symbols('tau', real=True, nonnegative=True, finite=True)  # NOQA
    alpha = sym.symbols('alpha', real=True, nonnegative=True, finite=True)  # NOQA
    c_alpha = sym.symbols('c_alpha', real=True, nonnegative=True, finite=True)
    # patience
    a = sym.symbols('a', real=True, nonnegative=True, finite=True)

    available_subs = {
        a: 20,
        s: a,
        alpha: 2 / (s + 1),
        c_alpha: (1 - alpha),
    }

    def subs(expr, d=available_subs):
        """ recursive expression substitution """
        expr1 = expr.subs(d)
        if expr == expr1:
            return expr1
        else:
            return subs(expr1, d=d)

    # mu is either the support for the poisson distribution
    # or is is the p in the binomial distribution
    # It is updated at timestep i based on ewma, assuming each incoming responce is 0
    mu_0 = 1.0
    mu_i = c_alpha ** i

    # Estimate probability that any event will happen in the next `a` reviews
    # at time `i`.
    poisson_i = 1 - sym.exp(-mu_i * a)
    binom_i = 1 - (1 - mu_i) ** a

    # Expand probabilities to be a function of i, s, and a
    part = ut.delete_dict_keys(available_subs.copy(), [a, s])
    mu_i = subs(mu_i, d=part)
    poisson_i = subs(poisson_i, d=part)
    binom_i = subs(binom_i, d=part)

    if True:
        # ewma of mu at time i if review is always not label-changing (meaningful)
        mu_1 = c_alpha * mu_0  # NOQA
        mu_2 = c_alpha * mu_1  # NOQA

    if True:
        i_vals = np.arange(0, 100)
        mu_vals = np.array([subs(mu_i).subs({i: i_}).evalf() for i_ in i_vals])  # NOQA
        binom_vals = np.array([subs(binom_i).subs({i: i_}).evalf() for i_ in i_vals])  # NOQA
        poisson_vals = np.array([subs(poisson_i).subs({i: i_}).evalf() for i_ in i_vals])  # NOQA

        # Find how many iters it actually takes my expt to terminate
        thesis_draft_thresh = np.exp(-2)
        np.where(mu_vals < thesis_draft_thresh)[0]
        np.where(binom_vals < thesis_draft_thresh)[0]
        np.where(poisson_vals < thesis_draft_thresh)[0]

    sym.pprint(sym.simplify(mu_i))
    sym.pprint(sym.simplify(binom_i))
    sym.pprint(sym.simplify(poisson_i))

    # Find the thresholds that force termination after `a` reviews have passed
    # do this by setting i=a
    poisson_thresh = poisson_i.subs({i: a})
    binom_thresh = binom_i.subs({i: a})

    print('Poisson thresh')
    print(sym.latex(sym.Eq(thresh, poisson_thresh)))
    print(sym.latex(sym.Eq(thresh, sym.simplify(poisson_thresh))))

    poisson_thresh.subs({a: 115, s: 30}).evalf()

    sym.pprint(sym.Eq(thresh, poisson_thresh))
    sym.pprint(sym.Eq(thresh, sym.simplify(poisson_thresh)))

    print('Binomial thresh')
    sym.pprint(sym.simplify(binom_thresh))

    sym.pprint(sym.simplify(poisson_thresh.subs({s: a})))

    def taud(coeff):
        return coeff * 360

    if 'poisson_cache' not in vars():
        poisson_cache = {}
        binom_cache = {}

    S, A = np.meshgrid(np.arange(1, 150, 1), np.arange(0, 150, 1))

    import plottool_ibeis as pt
    SA_coords = list(zip(S.ravel(), A.ravel()))
    for sval, aval in ut.ProgIter(SA_coords):
        if (sval, aval) not in poisson_cache:
            poisson_cache[(sval, aval)] = float(poisson_thresh.subs({a: aval, s: sval}).evalf())
    poisson_zdata = np.array(
        [poisson_cache[(sval, aval)] for sval, aval in SA_coords]).reshape(A.shape)
    fig = pt.figure(fnum=1, doclf=True)
    pt.gca().set_axis_off()
    pt.plot_surface3d(S, A, poisson_zdata, xlabel='s', ylabel='a',
                      rstride=3, cstride=3,
                      zlabel='poisson', mode='wire', contour=True,
                      title='poisson3d')
    pt.gca().set_zlim(0, 1)
    pt.gca().view_init(elev=taud(1 / 16), azim=taud(5 / 8))
    fig.set_size_inches(10, 6)
    fig.savefig('a-s-t-poisson3d.png', dpi=300, bbox_inches=pt.extract_axes_extents(fig, combine=True))

    for sval, aval in ut.ProgIter(SA_coords):
        if (sval, aval) not in binom_cache:
            binom_cache[(sval, aval)] = float(binom_thresh.subs({a: aval, s: sval}).evalf())
    binom_zdata = np.array(
        [binom_cache[(sval, aval)] for sval, aval in SA_coords]).reshape(A.shape)
    fig = pt.figure(fnum=2, doclf=True)
    pt.gca().set_axis_off()
    pt.plot_surface3d(S, A, binom_zdata, xlabel='s', ylabel='a',
                      rstride=3, cstride=3,
                      zlabel='binom', mode='wire', contour=True,
                      title='binom3d')
    pt.gca().set_zlim(0, 1)
    pt.gca().view_init(elev=taud(1 / 16), azim=taud(5 / 8))
    fig.set_size_inches(10, 6)
    fig.savefig('a-s-t-binom3d.png', dpi=300, bbox_inches=pt.extract_axes_extents(fig, combine=True))

    # Find point on the surface that achieves a reasonable threshold

    # Sympy can't solve this
    # sym.solve(sym.Eq(binom_thresh.subs({s: 50}), .05))
    # sym.solve(sym.Eq(poisson_thresh.subs({s: 50}), .05))
    # Find a numerical solution
    def solve_numeric(expr, target, want, fixed, method=None, bounds=None):
        """
        Args:
            expr (Expr): symbolic expression
            target (float): numberic value
            fixed (dict): fixed values of the symbol

        expr = poisson_thresh
        expr.free_symbols
        fixed = {s: 10}

        solve_numeric(poisson_thresh, .05, {s: 30}, method=None)
        solve_numeric(poisson_thresh, .05, {s: 30}, method='Nelder-Mead')
        solve_numeric(poisson_thresh, .05, {s: 30}, method='BFGS')
        """
        import scipy.optimize
        # Find the symbol you want to solve for
        want_symbols = expr.free_symbols - set(fixed.keys())
        # TODO: can probably extend this to multiple params
        assert len(want_symbols) == 1, 'specify all but one var'
        assert want == list(want_symbols)[0]
        fixed_expr = expr.subs(fixed)
        def func(a1):
            expr_value = float(fixed_expr.subs({want: a1}).evalf())
            return (expr_value - target) ** 2
        # if method is None:
        #     method = 'Nelder-Mead'
        #     method = 'Newton-CG'
        #     method = 'BFGS'
        # Use one of the other params the startin gpoing
        a1 = list(fixed.values())[0]
        result = scipy.optimize.minimize(func, x0=a1, method=method, bounds=bounds)
        if not result.success:
            print('\n')
            print(result)
            print('\n')
        return result

    # Numeric measurments of thie line

    thresh_vals = [.001, .01, .05, .1, .135]
    svals = np.arange(1, 100)

    target_poisson_plots = {}
    for target in ut.ProgIter(thresh_vals, bs=False, freq=1):
        poisson_avals = []
        for sval in ut.ProgIter(svals, 'poisson', freq=1):
            expr = poisson_thresh
            fixed = {s: sval}
            want = a
            aval = solve_numeric(expr, target, want, fixed,
                                 method='Nelder-Mead').x[0]
            poisson_avals.append(aval)
        target_poisson_plots[target] = (svals, poisson_avals)

    fig = pt.figure(fnum=3)
    for target, dat in target_poisson_plots.items():
        pt.plt.plot(*dat, label='prob={}'.format(target))
    pt.gca().set_xlabel('s')
    pt.gca().set_ylabel('a')
    pt.legend()
    pt.gca().set_title('poisson')
    fig.set_size_inches(5, 3)
    fig.savefig('a-vs-s-poisson.png', dpi=300, bbox_inches=pt.extract_axes_extents(fig, combine=True))

    target_binom_plots = {}
    for target in ut.ProgIter(thresh_vals, bs=False, freq=1):
        binom_avals = []
        for sval in ut.ProgIter(svals, 'binom', freq=1):
            aval = solve_numeric(binom_thresh, target, a, {s: sval}, method='Nelder-Mead').x[0]
            binom_avals.append(aval)
        target_binom_plots[target] = (svals, binom_avals)

    fig = pt.figure(fnum=4)
    for target, dat in target_binom_plots.items():
        pt.plt.plot(*dat, label='prob={}'.format(target))
    pt.gca().set_xlabel('s')
    pt.gca().set_ylabel('a')
    pt.legend()
    pt.gca().set_title('binom')
    fig.set_size_inches(5, 3)
    fig.savefig('a-vs-s-binom.png', dpi=300, bbox_inches=pt.extract_axes_extents(fig, combine=True))

    # ----
    if True:

        fig = pt.figure(fnum=5, doclf=True)
        s_vals = [1, 2, 3, 10, 20, 30, 40, 50]
        for sval in s_vals:
            pp = poisson_thresh.subs({s: sval})

            a_vals = np.arange(0, 200)
            pp_vals = np.array([float(pp.subs({a: aval}).evalf()) for aval in a_vals])  # NOQA

            pt.plot(a_vals, pp_vals, label='s=%r' % (sval,))
        pt.legend()
        pt.gca().set_xlabel('a')
        pt.gca().set_ylabel('poisson prob after a reviews')
        fig.set_size_inches(5, 3)
        fig.savefig('a-vs-thresh-poisson.png', dpi=300, bbox_inches=pt.extract_axes_extents(fig, combine=True))

        fig = pt.figure(fnum=6, doclf=True)
        s_vals = [1, 2, 3, 10, 20, 30, 40, 50]
        for sval in s_vals:
            pp = binom_thresh.subs({s: sval})
            a_vals = np.arange(0, 200)
            pp_vals = np.array([float(pp.subs({a: aval}).evalf()) for aval in a_vals])  # NOQA
            pt.plot(a_vals, pp_vals, label='s=%r' % (sval,))
        pt.legend()
        pt.gca().set_xlabel('a')
        pt.gca().set_ylabel('binom prob after a reviews')
        fig.set_size_inches(5, 3)
        fig.savefig('a-vs-thresh-binom.png', dpi=300, bbox_inches=pt.extract_axes_extents(fig, combine=True))

        # -------

        fig = pt.figure(fnum=5, doclf=True)
        a_vals = [1, 2, 3, 10, 20, 30, 40, 50]
        for aval in a_vals:
            pp = poisson_thresh.subs({a: aval})
            s_vals = np.arange(1, 200)
            pp_vals = np.array([float(pp.subs({s: sval}).evalf()) for sval in s_vals])  # NOQA
            pt.plot(s_vals, pp_vals, label='a=%r' % (aval,))
        pt.legend()
        pt.gca().set_xlabel('s')
        pt.gca().set_ylabel('poisson prob')
        fig.set_size_inches(5, 3)
        fig.savefig('s-vs-thresh-poisson.png', dpi=300, bbox_inches=pt.extract_axes_extents(fig, combine=True))

        fig = pt.figure(fnum=5, doclf=True)
        a_vals = [1, 2, 3, 10, 20, 30, 40, 50]
        for aval in a_vals:
            pp = binom_thresh.subs({a: aval})
            s_vals = np.arange(1, 200)
            pp_vals = np.array([float(pp.subs({s: sval}).evalf()) for sval in s_vals])  # NOQA
            pt.plot(s_vals, pp_vals, label='a=%r' % (aval,))
        pt.legend()
        pt.gca().set_xlabel('s')
        pt.gca().set_ylabel('binom prob')
        fig.set_size_inches(5, 3)
        fig.savefig('s-vs-thresh-binom.png', dpi=300, bbox_inches=pt.extract_axes_extents(fig, combine=True))

    #---------------------
    # Plot out a table

    mu_i.subs({s: 75, a: 75}).evalf()
    poisson_thresh.subs({s: 75, a: 75}).evalf()

    sval = 50
    for target, dat in target_poisson_plots.items():
        slope = np.median(np.diff(dat[1]))
        aval = int(np.ceil(sval * slope))
        thresh = float(poisson_thresh.subs({s: sval, a: aval}).evalf())
        print('aval={}, sval={}, thresh={}, target={}'.format(aval, sval, thresh, target))

    for target, dat in target_binom_plots.items():
        slope = np.median(np.diff(dat[1]))
        aval = int(np.ceil(sval * slope))
Beispiel #20
0
def show_multi_images(ibs, gid_list, fnum=None, **kwargs):
    r"""
    Args:
        ibs (IBEISController):  ibeis controller object
        gid_list (list):
        fnum (int):  figure number(default = None)

    CommandLine:
        python -m ibeis.viz.viz_image --test-show_multi_images --db NNP_Master3 --gids=7409,7448,4670,7497,7496,7464,7446,7442 --show
        python -m ibeis.viz.viz_image --test-show_multi_images --db NNP_Master3 --gids=1,2,3 --show

    Ignore:
        >>> # print to 8 gids sorted by num aids
        >>> import ibeis
        >>> ibs = ibeis.opendb('NNP_Master3')
        >>> gid_list = ibs.get_valid_gids()
        >>> aids_list = ibs.get_image_aids(gid_list)
        >>> index_list = ut.list_argsort(list(map(len, aids_list)))[::-1]
        >>> gid_list = ut.take(gid_list, index_list[0:8])
        >>> print(','.join(map(str, gid_list)))

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.viz.viz_image import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='testdb1')
        >>> gid_list = ut.get_argval('--gids', list, default=[1, 2])
        >>> fnum = None
        >>> result = show_multi_images(ibs, gid_list, fnum, draw_lbls=False, notitle=True, sel_aids='all')
        >>> print(result)
        >>> ut.show_if_requested()
    """
    fnum = pt.ensure_fnum(fnum)
    nGids = len(gid_list)
    if nGids == 0:
        fig = pt.figure(fnum=fnum, pnum=(1, 1, 1), **kwargs)
        pt.imshow_null(fnum=fnum, **kwargs)
        return fig
    # Trigger computation of all chips in parallel
    #ibsfuncs.ensure_annotation_data(ibs, aid_list, chips=(not in_image or annote), feats=annote)

    rc = ut.get_argval('--rc', type_=list, default=None)
    if rc is None:
        nRows, nCols = ph.get_square_row_cols(nGids)
    else:
        nRows, nCols = rc
    #notitle = ut.get_argflag('--notitle')
    #draw_lbls = not ut.get_argflag('--no-draw_lbls')
    #show_chip_kw = dict(annote=annote, in_image=in_image, notitle=notitle, draw_lbls=draw_lbls)
    #print('[viz_name] * r=%r, c=%r' % (nRows, nCols))
    #gs2 = gridspec.GridSpec(nRows, nCols)
    pnum_ = pt.get_pnum_func(nRows, nCols)
    fig = pt.figure(fnum=fnum, pnum=pnum_(0), **kwargs)
    fig.clf()
    for px, gid in enumerate(gid_list):
        print(pnum_(px))
        _fig, _ax1 = show_image(ibs, gid, fnum=fnum, pnum=pnum_(px), **kwargs)
        #ax = pt.gca()
        #if aid in sel_aids:
        #    pt.draw_border(ax, pt.GREEN, 4)
        #if ut.get_argflag('--numlbl') and not DOBOTH:
        #    ax.set_xlabel('(' + str(px + 1) + ')')
        #plot_aid3(ibs, aid)
    pass
Beispiel #21
0
def get_injured_sharks():
    """
    >>> from ibeis.scripts.getshark import *  # NOQA
    """
    import requests
    url = 'http://www.whaleshark.org/getKeywordImages.jsp'
    resp = requests.get(url)
    assert resp.status_code == 200
    keywords = resp.json()['keywords']
    key_list = ut.take_column(keywords, 'indexName')
    key_to_nice  = {k['indexName']: k['readableName'] for k in keywords}

    injury_patterns = [
        'injury', 'net', 'hook', 'trunc', 'damage', 'scar', 'nicks', 'bite',
    ]

    injury_keys = [key for key in key_list if any([pat in key for pat in injury_patterns])]
    noninjury_keys = ut.setdiff(key_list, injury_keys)
    injury_nice = ut.lmap(lambda k: key_to_nice[k], injury_keys)  # NOQA
    noninjury_nice = ut.lmap(lambda k: key_to_nice[k], noninjury_keys)  # NOQA
    key_list = injury_keys

    keyed_images = {}
    for key in ut.ProgIter(key_list, lbl='reading index', bs=True):
        key_url = url + '?indexName={indexName}'.format(indexName=key)
        key_resp = requests.get(key_url)
        assert key_resp.status_code == 200
        key_imgs = key_resp.json()['images']
        keyed_images[key] = key_imgs

    key_hist = {key: len(imgs) for key, imgs in keyed_images.items()}
    key_hist = ut.sort_dict(key_hist, 'vals')
    print(ut.repr3(key_hist))
    nice_key_hist = ut.map_dict_keys(lambda k: key_to_nice[k], key_hist)
    nice_key_hist = ut.sort_dict(nice_key_hist, 'vals')
    print(ut.repr3(nice_key_hist))

    key_to_urls = {key: ut.take_column(vals, 'url') for key, vals in keyed_images.items()}
    overlaps = {}
    import itertools
    overlap_img_list = []
    for k1, k2 in itertools.combinations(key_to_urls.keys(), 2):
        overlap_imgs = ut.isect(key_to_urls[k1], key_to_urls[k2])
        num_overlap = len(overlap_imgs)
        overlaps[(k1, k2)] = num_overlap
        overlaps[(k1, k1)] = len(key_to_urls[k1])
        if num_overlap > 0:
            #print('[%s][%s], overlap=%r' % (k1, k2, num_overlap))
            overlap_img_list.extend(overlap_imgs)

    all_img_urls = list(set(ut.flatten(key_to_urls.values())))
    num_all = len(all_img_urls)  # NOQA
    print('num_all = %r' % (num_all,))

    # Determine super-categories
    categories = ['nicks', 'scar', 'trunc']

    # Force these keys into these categories
    key_to_cat = {'scarbite': 'other_injury'}

    cat_to_keys = ut.ddict(list)

    for key in key_to_urls.keys():
        flag = 1
        if key in key_to_cat:
            cat = key_to_cat[key]
            cat_to_keys[cat].append(key)
            continue
        for cat in categories:
            if cat in key:
                cat_to_keys[cat].append(key)
                flag = 0
        if flag:
            cat = 'other_injury'
            cat_to_keys[cat].append(key)

    cat_urls = ut.ddict(list)
    for cat, keys in cat_to_keys.items():
        for key in keys:
            cat_urls[cat].extend(key_to_urls[key])

    cat_hist = {}
    for cat in list(cat_urls.keys()):
        cat_urls[cat] = list(set(cat_urls[cat]))
        cat_hist[cat] = len(cat_urls[cat])

    print(ut.repr3(cat_to_keys))
    print(ut.repr3(cat_hist))

    key_to_cat = dict([(val, key) for key, vals in cat_to_keys.items() for val in vals])

    #ingestset = {
    #    '__class__': 'ImageSet',
    #    'images': ut.ddict(dict)
    #}
    #for key, key_imgs in keyed_images.items():
    #    for imgdict in key_imgs:
    #        url = imgdict['url']
    #        encid = imgdict['correspondingEncounterNumber']
    #        # Make structure
    #        encdict = encounters[encid]
    #        encdict['__class__'] = 'Encounter'
    #        imgdict = ut.delete_keys(imgdict.copy(), ['correspondingEncounterNumber'])
    #        imgdict['__class__'] = 'Image'
    #        cat = key_to_cat[key]
    #        annotdict = {'relative_bbox': [.01, .01, .98, .98], 'tags': [cat, key]}
    #        annotdict['__class__'] = 'Annotation'

    #        # Ensure structures exist
    #        encdict['images'] = encdict.get('images', [])
    #        imgdict['annots'] = imgdict.get('annots', [])

    #        # Add an image to this encounter
    #        encdict['images'].append(imgdict)
    #        # Add an annotation to this image
    #        imgdict['annots'].append(annotdict)

    ##http://springbreak.wildbook.org/rest/org.ecocean.Encounter/1111
    #get_enc_url = 'http://www.whaleshark.org/rest/org.ecocean.Encounter/%s' % (encid,)
    #resp = requests.get(get_enc_url)
    #print(ut.repr3(encdict))
    #print(ut.repr3(encounters))

    # Download the files to the local disk
    #fpath_list =

    all_urls = ut.unique(ut.take_column(
        ut.flatten(
            ut.dict_subset(keyed_images, ut.flatten(cat_to_keys.values())).values()
        ), 'url'))

    dldir = ut.truepath('~/tmpsharks')
    from os.path import commonprefix, basename  # NOQA
    prefix = commonprefix(all_urls)
    suffix_list = [url_[len(prefix):] for url_ in all_urls]
    fname_list = [suffix.replace('/', '--') for suffix in suffix_list]

    fpath_list = []
    for url, fname in ut.ProgIter(zip(all_urls, fname_list), lbl='downloading imgs', freq=1):
        fpath = ut.grab_file_url(url, download_dir=dldir, fname=fname, verbose=False)
        fpath_list.append(fpath)

    # Make sure we keep orig info
    #url_to_keys = ut.ddict(list)
    url_to_info = ut.ddict(dict)
    for key, imgdict_list in keyed_images.items():
        for imgdict in imgdict_list:
            url = imgdict['url']
            info = url_to_info[url]
            for k, v in imgdict.items():
                info[k] = info.get(k, [])
                info[k].append(v)
            info['keys'] = info.get('keys', [])
            info['keys'].append(key)
            #url_to_keys[url].append(key)

    info_list = ut.take(url_to_info, all_urls)
    for info in info_list:
        if len(set(info['correspondingEncounterNumber'])) > 1:
            assert False, 'url with two different encounter nums'
    # Combine duplicate tags

    hashid_list = [ut.get_file_uuid(fpath_, stride=8) for fpath_ in ut.ProgIter(fpath_list, bs=True)]
    groupxs = ut.group_indices(hashid_list)[1]

    # Group properties by duplicate images
    #groupxs = [g for g in groupxs if len(g) > 1]
    fpath_list_ = ut.take_column(ut.apply_grouping(fpath_list, groupxs), 0)
    url_list_ = ut.take_column(ut.apply_grouping(all_urls, groupxs), 0)
    info_list_ = [ut.map_dict_vals(ut.flatten, ut.dict_accum(*info_))
                  for info_ in ut.apply_grouping(info_list, groupxs)]

    encid_list_ = [ut.unique(info_['correspondingEncounterNumber'])[0]
                   for info_ in info_list_]
    keys_list_ = [ut.unique(info_['keys']) for info_ in info_list_]
    cats_list_ = [ut.unique(ut.take(key_to_cat, keys)) for keys in keys_list_]

    clist = ut.ColumnLists({
        'gpath': fpath_list_,
        'url': url_list_,
        'encid': encid_list_,
        'key': keys_list_,
        'cat': cats_list_,
    })

    #for info_ in ut.apply_grouping(info_list, groupxs):
    #    info = ut.dict_accum(*info_)
    #    info = ut.map_dict_vals(ut.flatten, info)
    #    x = ut.unique(ut.flatten(ut.dict_accum(*info_)['correspondingEncounterNumber']))
    #    if len(x) > 1:
    #        info = info.copy()
    #        del info['keys']
    #        print(ut.repr3(info))

    flags = ut.lmap(ut.fpath_has_imgext, clist['gpath'])
    clist = clist.compress(flags)

    import ibeis
    ibs = ibeis.opendb('WS_Injury', allow_newdir=True)

    gid_list = ibs.add_images(clist['gpath'])
    clist['gid'] = gid_list

    failed_flags = ut.flag_None_items(clist['gid'])
    print('# failed %s' % (sum(failed_flags)),)
    passed_flags = ut.not_list(failed_flags)
    clist = clist.compress(passed_flags)
    ut.assert_all_not_None(clist['gid'])
    #ibs.get_image_uris_original(clist['gid'])
    ibs.set_image_uris_original(clist['gid'], clist['url'], overwrite=True)

    #ut.zipflat(clist['cat'], clist['key'])
    if False:
        # Can run detection instead
        clist['tags'] = ut.zipflat(clist['cat'])
        aid_list = ibs.use_images_as_annotations(clist['gid'], adjust_percent=0.01,
                                                 tags_list=clist['tags'])
        aid_list

    import plottool_ibeis as pt
    from ibeis import core_annots
    pt.qt4ensure()
    #annots = ibs.annots()
    #aids = [1, 2]
    #ibs.depc_annot.get('hog', aids , 'hog')
    #ibs.depc_annot.get('chip', aids, 'img')
    for aid in ut.InteractiveIter(ibs.get_valid_aids()):
        hogs = ibs.depc_annot.d.get_hog_hog([aid])
        chips = ibs.depc_annot.d.get_chips_img([aid])
        chip = chips[0]
        hogimg = core_annots.make_hog_block_image(hogs[0])
        pt.clf()
        pt.imshow(hogimg, pnum=(1, 2, 1))
        pt.imshow(chip, pnum=(1, 2, 2))
        fig = pt.gcf()
        fig.show()
        fig.canvas.draw()

    #print(len(groupxs))

    #if False:
    #groupxs = ut.find_duplicate_items(ut.lmap(basename, suffix_list)).values()
    #print(ut.repr3(ut.apply_grouping(all_urls, groupxs)))
    #    # FIX
    #    for fpath, fname in zip(fpath_list, fname_list):
    #        if ut.checkpath(fpath):
    #            ut.move(fpath, join(dirname(fpath), fname))
    #            print('fpath = %r' % (fpath,))

    #import ibeis
    #from ibeis.dbio import ingest_dataset
    #dbdir = ibeis.sysres.lookup_dbdir('WS_ALL')
    #self = ingest_dataset.Ingestable2(dbdir)

    if False:
        # Show overlap matrix
        import plottool_ibeis as pt
        import pandas as pd
        import numpy as np
        dict_ = overlaps
        s = pd.Series(dict_, index=pd.MultiIndex.from_tuples(overlaps))
        df = s.unstack()
        lhs, rhs = df.align(df.T)
        df = lhs.add(rhs, fill_value=0).fillna(0)

        label_texts = df.columns.values

        def label_ticks(label_texts):
            import plottool_ibeis as pt
            truncated_labels = [repr(lbl[0:100]) for lbl in label_texts]
            ax = pt.gca()
            ax.set_xticks(list(range(len(label_texts))))
            ax.set_xticklabels(truncated_labels)
            [lbl.set_rotation(-55) for lbl in ax.get_xticklabels()]
            [lbl.set_horizontalalignment('left') for lbl in ax.get_xticklabels()]

            #xgrid, ygrid = np.meshgrid(range(len(label_texts)), range(len(label_texts)))
            #pt.plot_surface3d(xgrid, ygrid, disjoint_mat)
            ax.set_yticks(list(range(len(label_texts))))
            ax.set_yticklabels(truncated_labels)
            [lbl.set_horizontalalignment('right') for lbl in ax.get_yticklabels()]
            [lbl.set_verticalalignment('center') for lbl in ax.get_yticklabels()]
            #[lbl.set_rotation(20) for lbl in ax.get_yticklabels()]

        #df = df.sort(axis=0)
        #df = df.sort(axis=1)

        sortx = np.argsort(df.sum(axis=1).values)[::-1]
        df = df.take(sortx, axis=0)
        df = df.take(sortx, axis=1)

        fig = pt.figure(fnum=1)
        fig.clf()
        mat = df.values.astype(np.int32)
        mat[np.diag_indices(len(mat))] = 0
        vmax = mat[(1 - np.eye(len(mat))).astype(np.bool)].max()
        import matplotlib.colors
        norm = matplotlib.colors.Normalize(vmin=0, vmax=vmax, clip=True)
        pt.plt.imshow(mat, cmap='hot', norm=norm, interpolation='none')
        pt.plt.colorbar()
        pt.plt.grid('off')
        label_ticks(label_texts)
        fig.tight_layout()

    #overlap_df = pd.DataFrame.from_dict(overlap_img_list)

    class TmpImage(ut.NiceRepr):
        pass

    from skimage.feature import hog
    from skimage import data, color, exposure
    import plottool_ibeis as pt
    image2 = color.rgb2gray(data.astronaut())  # NOQA

    fpath = './GOPR1120.JPG'

    import vtool_ibeis as vt
    for fpath in [fpath]:
        """
        http://scikit-image.org/docs/dev/auto_examples/plot_hog.html
        """

        image = vt.imread(fpath, grayscale=True)
        image = pt.color_funcs.to_base01(image)

        fig = pt.figure(fnum=2)
        fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),
                            cells_per_block=(1, 1), visualise=True)

        fig, (ax1, ax2) = pt.plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)

        ax1.axis('off')
        ax1.imshow(image, cmap=pt.plt.cm.gray)
        ax1.set_title('Input image')
        ax1.set_adjustable('box-forced')

        # Rescale histogram for better display
        hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))

        ax2.axis('off')
        ax2.imshow(hog_image_rescaled, cmap=pt.plt.cm.gray)
        ax2.set_title('Histogram of Oriented Gradients')
        ax1.set_adjustable('box-forced')
        pt.plt.show()
Beispiel #22
0
def viz_netx_chipgraph(ibs,
                       graph,
                       fnum=None,
                       use_image=False,
                       layout=None,
                       zoom=None,
                       prog='neato',
                       as_directed=False,
                       augment_graph=True,
                       layoutkw=None,
                       framewidth=True,
                       **kwargs):
    r"""
    DEPRICATE or improve

    Args:
        ibs (IBEISController):  ibeis controller object
        graph (nx.DiGraph):
        fnum (int):  figure number(default = None)
        use_image (bool): (default = False)
        zoom (float): (default = 0.4)

    Returns:
        ?: pos

    CommandLine:
        python -m ibeis --tf viz_netx_chipgraph --show

    Cand:
        ibeis review_tagged_joins --save figures4/mergecase.png --figsize=15,15
            --clipwhite --diskshow
        ibeis compute_occurrence_groups --save figures4/occurgraph.png
            --figsize=40,40 --clipwhite --diskshow
        ~/code/ibeis/ibeis/algo/preproc/preproc_occurrence.py

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.viz.viz_graph import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb(defaultdb='PZ_MTEST')
        >>> nid_list = ibs.get_valid_nids()[0:10]
        >>> fnum = None
        >>> use_image = True
        >>> zoom = 0.4
        >>> make_name_graph_interaction(ibs, nid_list, prog='neato')
        >>> ut.show_if_requested()
    """
    import plottool_ibeis as pt
    print('[viz_graph] drawing chip graph')
    fnum = pt.ensure_fnum(fnum)
    pt.figure(fnum=fnum, pnum=(1, 1, 1))
    ax = pt.gca()

    if layout is None:
        layout = 'agraph'
    print('layout = %r' % (layout, ))

    if use_image:
        ensure_node_images(ibs, graph)
    nx.set_node_attributes(graph, name='shape', values='rect')

    if layoutkw is None:
        layoutkw = {}
    layoutkw['prog'] = layoutkw.get('prog', prog)
    layoutkw.update(kwargs)

    if prog == 'neato':
        graph = graph.to_undirected()

    plotinfo = pt.show_nx(
        graph,
        ax=ax,
        # img_dict=img_dict,
        layout=layout,
        # hacknonode=bool(use_image),
        layoutkw=layoutkw,
        as_directed=as_directed,
        framewidth=framewidth,
    )
    return plotinfo
Beispiel #23
0
def show_qres(ibs, cm, qreq_=None, **kwargs):
    r"""
    Display Query Result Logic
    Defaults to: query chip, groundtruth matches, and top matches

    Args:
        ibs (ibeis.IBEISController): ibeis controller object
        cm (ibeis.ChipMatch): object of feature correspondences and scores
        qreq_ (ibeis.QueryRequest):  query request object with hyper-parameters(default = None)

    Kwargs:
        annot_mode, figtitle, make_figtitle, aug, top_aids, all_kpts,
        show_query, in_image, sidebyside, name_scoring, max_nCols,
        failed_to_match, fnum
        in_image (bool) show result  in image view if True else chip view
        annot_mode (int):
            if annot_mode == 0, then draw lines and ellipse
            elif annot_mode == 1, then dont draw lines or ellipse
            elif annot_mode == 2, then draw only lines
            elif annot_mode == 3, draw heatmask only
        See: viz_matches.show_name_matches, viz_helpers.get_query_text

    Returns:
        mpl.Figure: fig

    CommandLine:
        ./main.py --query 1 -y --db PZ_MTEST --noshow-qres
        python -m ibeis.viz.viz_qres show_qres --show
        python -m ibeis.viz.viz_qres show_qres --show --top-aids=10 --db=PZ_MTEST \
                --sidebyside --annot_mode=0 --notitle --no-viz_name_score \
                --qaids=5 --max_nCols=2 --adjust=.01,.01,.01

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.viz.viz_qres import *  # NOQA
        >>> import ibeis
        >>> cm, qreq_ = ibeis.testdata_cm()
        >>> kwargs = dict(
        >>>     top_aids=ut.get_argval('--top-aids', type_=int, default=3),
        >>>     sidebyside=not ut.get_argflag('--no-sidebyside'),
        >>>     annot_mode=ut.get_argval('--annot_mode', type_=int, default=1),
        >>>     viz_name_score=not ut.get_argflag('--no-viz_name_score'),
        >>>     simplemode=ut.get_argflag('--simplemode'),
        >>>     max_nCols=ut.get_argval('--max_nCols', type_=int, default=None)
        >>> )
        >>> ibs = qreq_.ibs
        >>> fig = show_qres(ibs, cm, show_query=False, qreq_=qreq_, **kwargs)
        >>> ut.show_if_requested()
    """
    #ut.print_dict(kwargs)
    annot_mode = kwargs.get('annot_mode', 1) % 4  # this is toggled
    figtitle = kwargs.get('figtitle', '')
    aug = kwargs.get('aug', '')
    top_aids = kwargs.get('top_aids', DEFAULT_NTOP)
    gt_aids = kwargs.get('gt_aids', [])
    all_kpts = kwargs.get('all_kpts', False)
    show_query = kwargs.get('show_query', False)
    in_image = kwargs.get('in_image', False)
    sidebyside = kwargs.get('sidebyside', True)
    simplemode = kwargs.get('simplemode', False)
    colorbar_ = kwargs.get('colorbar_', False)
    #name_scoring   = kwargs.get('name_scoring', False)
    viz_name_score = kwargs.get('viz_name_score', qreq_ is not None)
    max_nCols = kwargs.get('max_nCols', None)
    failed_to_match = kwargs.get('failed_to_match', False)

    fnum = pt.ensure_fnum(kwargs.get('fnum', None))

    if ut.VERBOSE and ut.NOT_QUIET:
        print('query_info = ' + ut.repr2(ibs.get_annot_info(cm.qaid,
                                                            default=True,
                                                            gname=False,
                                                            name=False,
                                                            notes=False,
                                                            exemplar=False),
                                         nl=4))
        print('top_aids_info = ' +
              ut.repr2(ibs.get_annot_info(top_aids,
                                          default=True,
                                          gname=False,
                                          name=False,
                                          notes=False,
                                          exemplar=False,
                                          reference_aid=cm.qaid),
                       nl=4))

    fig = pt.figure(fnum=fnum, docla=True, doclf=True)

    if isinstance(top_aids, int):
        top_aids = cm.get_top_aids(top_aids)

    if failed_to_match:
        # HACK to visually indicate failure to match in analysis
        show_query = True
        top_aids = [None] + top_aids

    nTop = len(top_aids)

    if max_nCols is None:
        max_nCols = 5
        if nTop in [6, 7]:
            max_nCols = 3
        if nTop in [8]:
            max_nCols = 4

    try:
        assert len(list(set(top_aids).intersection(
            set(gt_aids)))) == 0, ('gts should be missed.  not in top')
    except AssertionError as ex:
        ut.printex(ex, keys=['top_aids', 'gt_aids'])
        raise

    if ut.DEBUG2:
        print(cm.get_inspect_str())

    #--------------------------------------------------
    # Get grid / cell information to build subplot grid
    #--------------------------------------------------
    # Show query or not
    nQuerySubplts = 1 if show_query else 0
    # The top row is given slots for ground truths and querys
    # all aids in gt_aids should not be in top aids
    nGtSubplts = nQuerySubplts + (0 if gt_aids is None else len(gt_aids))
    # The bottom rows are for the top results
    nTopNSubplts = nTop
    nTopNCols = min(max_nCols, nTopNSubplts)
    nGTCols = min(max_nCols, nGtSubplts)
    nGTCols = max(nGTCols, nTopNCols)
    nTopNCols = nGTCols
    # Get number of rows to show groundtruth
    nGtRows = 0 if nGTCols == 0 else int(np.ceil(nGtSubplts / nGTCols))
    # Get number of rows to show results
    nTopNRows = 0 if nTopNCols == 0 else int(np.ceil(nTopNSubplts / nTopNCols))
    nGtCells = nGtRows * nGTCols
    # Total number of rows
    nRows = nTopNRows + nGtRows

    DEBUG_SHOW_QRES = 0

    if DEBUG_SHOW_QRES:
        allgt_aids = ibs.get_annot_groundtruth(cm.qaid)
        nSelGt = len(gt_aids)
        nAllGt = len(allgt_aids)
        print('[show_qres]========================')
        print('[show_qres]----------------')
        print('[show_qres] * annot_mode=%r' % (annot_mode, ))
        print('[show_qres] #nTop=%r #missed_gts=%r/%r' %
              (nTop, nSelGt, nAllGt))
        print('[show_qres] * -----')
        print('[show_qres] * nRows=%r' % (nRows, ))
        print('[show_qres] * nGtSubplts=%r' % (nGtSubplts, ))
        print('[show_qres] * nTopNSubplts=%r' % (nTopNSubplts, ))
        print('[show_qres] * nQuerySubplts=%r' % (nQuerySubplts, ))
        print('[show_qres] * -----')
        print('[show_qres] * nGTCols=%r' % (nGTCols, ))
        print('[show_qres] * -----')
        print('[show_qres] * fnum=%r' % (fnum, ))
        print('[show_qres] * figtitle=%r' % (figtitle, ))
        print('[show_qres] * max_nCols=%r' % (max_nCols, ))
        print('[show_qres] * show_query=%r' % (show_query, ))
        print('[show_qres] * kwargs=%s' % (ut.repr2(kwargs), ))

    # HACK:
    _color_list = pt.distinct_colors(nTop)
    aid2_color = {aid: _color_list[ox] for ox, aid in enumerate(top_aids)}
    ranked_aids = cm.get_top_aids()

    # Helpers
    def _show_query_fn(plotx_shift, rowcols):
        """ helper for show_qres """
        plotx = plotx_shift + 1
        pnum = (rowcols[0], rowcols[1], plotx)
        #print('[viz] Plotting Query: pnum=%r' % (pnum,))
        _kwshow = dict(draw_kpts=annot_mode)
        _kwshow.update(kwargs)
        _kwshow['prefix'] = 'q'
        _kwshow['pnum'] = pnum
        _kwshow['aid2_color'] = aid2_color
        _kwshow['draw_ell'] = annot_mode >= 1
        viz_chip.show_chip(ibs, cm.qaid, annote=False, qreq_=qreq_, **_kwshow)

    def _plot_matches_aids(aid_list, plotx_shift, rowcols):
        """ helper for show_qres to draw many aids """
        _kwshow = dict(draw_ell=annot_mode,
                       draw_pts=False,
                       draw_lines=annot_mode,
                       ell_alpha=.5,
                       all_kpts=all_kpts)
        _kwshow.update(kwargs)
        _kwshow['fnum'] = fnum
        _kwshow['in_image'] = in_image
        _kwshow['colorbar_'] = colorbar_
        if sidebyside:
            # Draw each match side by side the query
            _kwshow['draw_ell'] = annot_mode in {1}
            _kwshow['draw_lines'] = annot_mode in {1, 2}
            _kwshow['heatmask'] = annot_mode in {3}
        else:
            #print('annot_mode = %r' % (annot_mode,))
            _kwshow['draw_ell'] = annot_mode == 1
            #_kwshow['draw_pts'] = annot_mode >= 1
            #_kwshow['draw_lines'] = False
            _kwshow['show_query'] = False

        def _show_matches_fn(aid, orank, pnum):
            """ Helper function for drawing matches to one aid """
            aug = 'rank=%r\n' % orank
            _kwshow['pnum'] = pnum
            _kwshow['title_aug'] = aug
            #draw_ell = annot_mode == 1
            #draw_lines = annot_mode >= 1
            # If we already are showing the query dont show it here
            if sidebyside:
                # Draw each match side by side the query
                if viz_name_score:
                    cm.show_single_namematch(qreq_, ibs.get_annot_nids(aid),
                                             **_kwshow)
                else:
                    if simplemode:
                        _kwshow['draw_border'] = False
                        _kwshow['draw_lbl'] = False
                        _kwshow['notitle'] = True
                        _kwshow['vert'] = False
                        _kwshow['modifysize'] = True
                    cm.show_single_annotmatch(qreq_, aid, **_kwshow)
                    #viz_matches.show_matches(ibs, cm, aid, qreq_=qreq_, **_kwshow)
            else:
                # Draw each match by themselves
                data_config2_ = None if qreq_ is None else qreq_.extern_data_config2
                #_kwshow['draw_border'] = kwargs.get('draw_border', True)
                #_kwshow['notitle'] = ut.get_argflag(('--no-title', '--notitle'))
                viz_chip.show_chip(ibs,
                                   aid,
                                   annote=False,
                                   notitle=True,
                                   data_config2_=data_config2_,
                                   **_kwshow)

        if DEBUG_SHOW_QRES:
            print('[show_qres()] Plotting Chips %s:' %
                  vh.get_aidstrs(aid_list))
        if aid_list is None:
            return
        # Do lazy load before show
        #data_config2_ = None if qreq_ is None else qreq_.extern_data_config2

        # tblhack = getattr(qreq_, 'tablename', None)
        # HACK FOR HUMPBACKS
        # (Also in viz_matches)
        # if tblhack == 'vsone' or (qreq_ is not None and not qreq_._isnewreq):
        #     # precompute
        #     pass
        #     #ibs.get_annot_chips(aid_list, config2_=data_config2_, ensure=True)
        #     #ibs.get_annot_kpts(aid_list, config2_=data_config2_, ensure=True)

        for ox, aid in enumerate(aid_list):
            plotx = ox + plotx_shift + 1
            pnum = (rowcols[0], rowcols[1], plotx)
            oranks = np.where(ranked_aids == aid)[0]
            # This pair has no matches between them.
            if len(oranks) == 0:
                orank = -1
                if aid is None:
                    pt.imshow_null('Failed to find matches\nfor qaid=%r' %
                                   (cm.qaid),
                                   fnum=fnum,
                                   pnum=pnum,
                                   fontsize=18)
                else:
                    _show_matches_fn(aid, orank, pnum)
                #if DEBUG_SHOW_QRES:
                #    print('skipping pnum=%r' % (pnum,))
                continue
            if DEBUG_SHOW_QRES:
                print('pnum=%r' % (pnum, ))
            orank = oranks[0] + 1
            _show_matches_fn(aid, orank, pnum)

    shift_topN = nGtCells

    if nGtSubplts == 1:
        nGTCols = 1

    if nRows == 0:
        pt.imshow_null('[viz_qres] No matches. nRows=0', fnum=fnum)
    else:
        fig = pt.figure(fnum=fnum,
                        pnum=(nRows, nGTCols, 1),
                        docla=True,
                        doclf=True)
        pt.plt.subplot(nRows, nGTCols, 1)
        # Plot Query
        if show_query:
            _show_query_fn(0, (nRows, nGTCols))
        # Plot Ground Truth (if given)
        _plot_matches_aids(gt_aids, nQuerySubplts, (nRows, nGTCols))
        # Plot Results
        _plot_matches_aids(top_aids, shift_topN, (nRows, nTopNCols))
        figtitle += aug
    if failed_to_match:
        figtitle += '\n No matches found'

    incanvas = kwargs.get('with_figtitle', not vh.NO_LBL_OVERRIDE)
    pt.set_figtitle(figtitle, incanvas=incanvas)

    # Result Interaction
    return fig
Beispiel #24
0
    def chipmatch_view(self,
                       fnum=None,
                       pnum=(1, 1, 1),
                       verbose=None,
                       **kwargs_):
        """
        just visualizes the matches using some type of lines

        CommandLine:
            python -m ibeis.viz.interact.interact_matches --test-chipmatch_view --show

        Example:
            >>> # DISABLE_DOCTEST
            >>> from ibeis.viz.interact.interact_matches import *  # NOQA
            >>> self = testdata_match_interact()
            >>> self.chipmatch_view()
            >>> pt.show_if_requested()
        """
        if fnum is None:
            fnum = self.fnum
        if verbose is None:
            verbose = ut.VERBOSE

        ibs = self.ibs
        aid = self.daid
        qaid = self.qaid
        figtitle = self.figtitle

        # drawing mode draw: with/without lines/feats
        mode = kwargs_.get('mode', self.mode)
        draw_ell = mode >= 1
        draw_lines = mode == 2
        #self.mode = (self.mode + 1) % 3
        pt.figure(fnum=fnum, docla=True, doclf=True)
        show_matches_kw = self.kwargs.copy()
        show_matches_kw.update(
            dict(fnum=fnum,
                 pnum=pnum,
                 draw_lines=draw_lines,
                 draw_ell=draw_ell,
                 colorbar_=True,
                 vert=self.vert))
        show_matches_kw.update(kwargs_)

        if self.warp_homog:
            show_matches_kw['H1'] = self.H1

        #show_matches_kw['score'] = self.score
        show_matches_kw['rawscore'] = self.score
        show_matches_kw['aid2_raw_rank'] = self.rank
        tup = viz.viz_matches.show_matches2(ibs,
                                            self.qaid,
                                            self.daid,
                                            self.fm,
                                            self.fs,
                                            qreq_=self.qreq_,
                                            **show_matches_kw)
        ax, xywh1, xywh2 = tup
        self.xywh2 = xywh2

        pt.set_figtitle(figtitle + ' ' + vh.get_vsstr(qaid, aid))