예제 #1
0
    def execute_test():
        func = find_expt_func(e)
        assert func is not None, 'unknown experiment e=%r' % (e, )

        argspec = ut.get_func_argspec(func)
        if (len(argspec.args) >= 2 and argspec.args[0] == 'ibs'
                and argspec.args[1] == 'testres'):
            # most experiments need a testres
            expts_kw = dict(
                defaultdb=db,
                dbdir=dbdir,
                a=a,
                t=t,
                qaid_override=qaid_override,
                daid_override=daid_override,
                initial_aids=initial_aids,
            )
            testdata_expts_func = functools.partial(
                main_helpers.testdata_expts, **expts_kw)

            ibs, testres = testdata_expts_func()
            # Build the requested drawing funciton
            draw_func = functools.partial(func, ibs, testres, **kwargs)
            testres.draw_func = draw_func
            ut.inject_func_as_method(testres, draw_cases)
            ut.inject_func_as_method(testres, draw_taghist)
            # testres.draw_cases = draw_cases
            return testres
        else:
            raise AssertionError('Unknown type of function for experiment')
예제 #2
0
def simple_api_item_widget():
    r"""
    Very simple example of basic APIItemWidget widget with CustomAPI

    CommandLine:
        python -m wbia.guitool.api_item_widget --test-simple_api_item_widget
        python -m wbia.guitool.api_item_widget --test-simple_api_item_widget --show

    Example:
        >>> # ENABLE_DOCTEST
        >>> # xdoctest: +REQUIRES(--gui)
        >>> from wbia.guitool.api_item_widget import *  # NOQA
        >>> import wbia.guitool as gt
        >>> gt.ensure_qapp()  # must be ensured before any embeding
        >>> wgt = simple_api_item_widget()
        >>> # xdoctest: +REQUIRES(--show)
        >>> ut.quit_if_noshow()
        >>> wgt.show()
        >>> gt.qtapp_loop(wgt, frequency=100)
    """
    import wbia.guitool as gt

    gt.ensure_qapp()
    col_getter_dict = {
        'col1': [1, 2, 3],
        'col2': ['a', 'b', 'c'],
        'col3': ['e', 'f', 'g'],
    }
    sortby = 'col1'

    # col_display_role_func_dict = {
    #    'col1': lambda x: 'banana %d' % ((x * 100 % 23)),
    # }

    api = gt.CustomAPI(
        col_getter_dict=col_getter_dict,
        # col_display_role_func_dict=col_display_role_func_dict,
        editable_colnames=['col3'],
        sortby=sortby,
    )
    headers = api.make_headers(tblnice='Simple Example')

    wgt = gt.APIItemWidget()
    wgt.change_headers(headers)
    # gt.qtapp_loop(qwin=wgt, ipy=ipy, frequency=loop_freq)

    # for testing
    wgt.menubar = gt.newMenubar(wgt)
    wgt.menuFile = wgt.menubar.newMenu('Dev')

    def wgt_embed(wgt):
        view = wgt.view  # NOQA
        import utool

        utool.embed()

    ut.inject_func_as_method(wgt, wgt_embed)
    wgt.menuFile.newAction(triggered=wgt.wgt_embed)
    return wgt
예제 #3
0
def make_vsone_tuner(
    ibs, edge=None, qreq_=None, autoupdate=True, info_text=None, cfgdict=None
):
    """
    Makes a qt widget for inspecting one-vs-one matches

    CommandLine:
        python -m wbia.gui.inspect_gui make_vsone_tuner --show

    Example:
        >>> # xdoctest: +REQUIRES(--gui)
        >>> from wbia.gui.inspect_gui import *  # NOQA
        >>> import wbia
        >>> gt.ensure_qapp()
        >>> ut.qtensure()
        >>> ibs = wbia.opendb(defaultdb='PZ_MTEST')
        >>> edge = ut.get_argval('--aids', default=[1, 2], type_=list)
        >>> self = make_vsone_tuner(ibs, edge, autoupdate=False)
        >>> ut.quit_if_noshow()
        >>> self.show()
        >>> gt.qtapp_loop(qwin=self, freq=10)

    """
    from vtool import inspect_matches
    import vtool as vt

    if cfgdict is not None:
        assert qreq_ is None, 'specify only one cfg or qreq_'
    else:
        cfgdict = {}

    def set_edge(self, edge, info_text=None):
        aid1, aid2 = edge
        if qreq_ is None:
            qreq2_ = ibs.new_query_request([aid1], [aid2], cfgdict=cfgdict, verbose=False)
        else:
            qreq2_ = ibs.new_query_request(
                [aid1], [aid2], cfgdict=qreq_.qparams, verbose=False
            )
        qconfig2_ = qreq2_.extern_query_config2
        dconfig2_ = qreq2_.extern_data_config2
        annot1 = ibs.annots([aid1], config=qconfig2_)[0]._make_lazy_dict()
        annot2 = ibs.annots([aid2], config=dconfig2_)[0]._make_lazy_dict()
        match = vt.PairwiseMatch(annot1, annot2)

        def on_context():
            from wbia.gui import inspect_gui

            return inspect_gui.make_annotpair_context_options(ibs, aid1, aid2, None)

        self.set_match(match, on_context, info_text)

    self = inspect_matches.MatchInspector(autoupdate=autoupdate, cfgdict=cfgdict)
    ut.inject_func_as_method(self, set_edge)
    if edge is not None:
        self.set_edge(edge, info_text)
    return self
예제 #4
0
def unmonkeypatch_encounters(ibs):
    from wbia.other import ibsfuncs

    ut.inject_func_as_method(
        ibs, ibsfuncs.get_annot_encounter_text, 'get_annot_encounter_text', force=True
    )
    ut.inject_func_as_method(
        ibs, ibsfuncs.get_annot_occurrence_text, 'get_annot_occurrence_text', force=True
    )
예제 #5
0
파일: __init__.py 프로젝트: heroinlin/ibeis
    def execute_test():
        func = find_expt_func(e)
        assert func is not None, 'unknown experiment e=%r' % (e,)

        argspec = ut.get_func_argspec(func)
        if len(argspec.args) >= 2 and argspec.args[0] == 'ibs' and argspec.args[1] == 'testres':
            # most experiments need a testres
            expts_kw = dict(defaultdb=db, a=a, t=t,
                            qaid_override=qaid_override,
                            daid_override=daid_override,
                            initial_aids=initial_aids
                           )
            testdata_expts_func = functools.partial(main_helpers.testdata_expts, **expts_kw)

            ibs, testres = testdata_expts_func()
            # Build the requested drawing funciton
            draw_func = functools.partial(func, ibs, testres, **kwargs)
            testres.draw_func = draw_func
            ut.inject_func_as_method(testres, draw_cases)
            ut.inject_func_as_method(testres, draw_taghist)
            #testres.draw_cases = draw_cases
            return testres
        else:
            raise AssertionError('Unknown type of function for experiment')
예제 #6
0
def customize_model(model):
    model.var2_cpd = {cpd.variable: cpd for cpd in model.cpds}
    model.ttype2_cpds = ut.groupby_attr(model.cpds, 'ttype')
    model._templates = list(
        set([cpd._template_ for cpd in model.var2_cpd.values()]))
    model.ttype2_template = {t.ttype: t for t in model._templates}

    def pretty_evidence(model, evidence):
        return [
            evar + '=' + str(model.var2_cpd[evar].variable_statenames[val])
            for evar, val in evidence.items()
        ]

    def print_templates(model, ignore_ttypes=[]):
        templates = model._templates
        ut.colorprint('\n --- CPD Templates ---', 'blue')
        for temp_cpd in templates:
            if temp_cpd.ttype not in ignore_ttypes:
                ut.colorprint(temp_cpd._cpdstr('psql'), 'brightcyan')

    def print_priors(model, ignore_ttypes=[], title='Priors', color='blue'):
        ut.colorprint('\n --- %s ---' % (title, ), color=color)
        for ttype, cpds in model.ttype2_cpds.items():
            if ttype not in ignore_ttypes:
                for fs_ in ut.ichunks(cpds, 4):
                    ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]),
                                  color)

    ut.inject_func_as_method(model, print_priors)
    ut.inject_func_as_method(model, print_templates)
    ut.inject_func_as_method(model, pretty_evidence)
    ut.inject_func_as_method(model, pgm_viz.show_model)
    ut.inject_func_as_method(model, pgm_viz.show_markov_model)
    ut.inject_func_as_method(model, pgm_viz.show_junction_tree)
    return model
예제 #7
0
    def __init__(self,
                 ibs,
                 cm,
                 aid2=None,
                 fnum=None,
                 figtitle='Match Interaction',
                 same_fig=True,
                 qreq_=None,
                 **kwargs):
        self.qres = cm

        self.ibs = ibs
        self.cm = cm
        self.qreq_ = qreq_
        self.fnum = pt.ensure_fnum(fnum)
        # Unpack Args
        if aid2 is None:
            index = 0
            # FIXME: no sortself
            cm.sortself()
            self.rank = index
        else:
            index = cm.daid2_idx.get(aid2, None)
            # TODO: rank?
            self.rank = None
        if index is not None:
            self.qaid = self.cm.qaid
            self.daid = self.cm.daid_list[index]
            self.fm = self.cm.fm_list[index]
            self.fk = self.cm.fk_list[index]
            self.fsv = self.cm.fsv_list[index]
            if self.cm.fs_list is None:
                fs_list = self.cm.get_fsv_prod_list()
            else:
                fs_list = self.cm.fs_list
            self.fs = None if fs_list is None else fs_list[index]
            self.score = None if self.cm.score_list is None else self.cm.score_list[
                index]
            self.H1 = None if self.cm.H_list is None else cm.H_list[index]
        else:
            self.qaid = self.cm.qaid
            self.daid = aid2
            self.fm = np.empty((0, 2), dtype=hstypes.FM_DTYPE)
            self.fk = np.empty(0, dtype=hstypes.FK_DTYPE)
            self.fsv = np.empty((0, 2), dtype=hstypes.FS_DTYPE)
            self.fs = np.empty(0, dtype=hstypes.FS_DTYPE)
            self.score = None
            self.H1 = None

        # Read properties
        self.query_config2_ = (None if self.qreq_ is None else
                               self.qreq_.get_external_query_config2())
        self.data_config2_ = (None if self.qreq_ is None else
                              self.qreq_.get_external_data_config2())
        self.rchip1 = vh.get_chips(ibs, [self.qaid],
                                   config2_=self.query_config2_)[0]
        self.rchip2 = vh.get_chips(ibs, [self.daid],
                                   config2_=self.data_config2_)[0]
        # Begin Interaction
        # call doclf docla and make figure
        self.fig = ih.begin_interaction('matches', self.fnum)
        self.xywh2_ptr = [None]
        self.mode = kwargs.pop('mode', 0)
        # New state vars
        self.same_fig = same_fig
        self.use_homog = False
        self.vert = kwargs.pop('vert', None)
        self.mx = kwargs.pop('mx', None)
        self.last_fx = 0
        self.fnum2 = pt.next_fnum()
        self.figtitle = figtitle
        self.kwargs = kwargs

        abstract_interaction.register_interaction(self)
        ut.inject_func_as_method(self,
                                 AbstractInteraction.append_button.im_func)
        ut.inject_func_as_method(self,
                                 AbstractInteraction.show_popup_menu.im_func)
        self.scope = []

        if not kwargs.get('nobegin', False):
            dodraw = kwargs.get('dodraw', True)
            self.begin(dodraw=dodraw)
예제 #8
0
def testdata_tree_view():
    r"""
    CommandLine:
        python -m wbia.guitool.api_tree_view testdata_tree_view
        python -m wbia.guitool.api_tree_view testdata_tree_view --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> # xdoctest: +REQUIRES(--gui)
        >>> import wbia.guitool as gt
        >>> from wbia.guitool.api_tree_view import *  # NOQA
        >>> wgt = testdata_tree_view()
        >>> view = wgt.view
        >>> rows = view.selectedRows()
        >>> print('rows = %r' % (rows,))
        >>> # xdoctest: +REQUIRES(--show)
        >>> ut.quit_if_noshow()
        >>> gt.qtapp_loop(qwin=wgt)
    """
    import wbia.guitool as gt

    gt.ensure_qapp()
    col_name_list = ['name', 'num_annots', 'annots']
    col_getter_dict = {
        'name': ['fred', 'sue', 'tom', 'mary', 'paul'],
        'num_annots': [2, 1, 3, 5, 1],
    }
    # make consistent data
    grouped_data = [[
        col_getter_dict['name'][index] + '-' + str(i) for i in range(num)
    ] for index, num in enumerate(col_getter_dict['num_annots'])]
    flat_data, reverse_list = ut.invertible_flatten1(grouped_data)
    col_getter_dict['annots'] = flat_data

    iders = [list(range(len(col_getter_dict['name']))), reverse_list]

    col_level_dict = {
        'name': 0,
        'num_annots': 0,
        'annots': 1,
    }
    sortby = 'name'

    api = gt.CustomAPI(
        col_name_list=col_name_list,
        col_getter_dict=col_getter_dict,
        sortby=sortby,
        iders=iders,
        col_level_dict=col_level_dict,
    )
    headers = api.make_headers(tblnice='Tree Example')

    wgt = gt.APIItemWidget(view_class=APITreeView)
    wgt.change_headers(headers)

    wgt.menubar = gt.newMenubar(wgt)
    wgt.menuFile = wgt.menubar.newMenu('Dev')

    def wgt_embed(wgt):
        view = wgt.view  # NOQA
        import utool

        utool.embed()

    ut.inject_func_as_method(wgt, wgt_embed)
    wgt.menuFile.newAction(triggered=wgt.wgt_embed)

    return wgt
예제 #9
0
파일: specialdraw.py 프로젝트: whaozl/ibeis
def general_identify_flow():
    r"""
    CommandLine:
        python -m ibeis.scripts.specialdraw general_identify_flow --show --save pairsim.png --dpi=100 --diskshow --clipwhite

        python -m ibeis.scripts.specialdraw general_identify_flow --dpi=200 --diskshow --clipwhite --dpath ~/latex/cand/ --figsize=20,10  --save figures4/pairprob.png --arrow-width=2.0


    Example:
        >>> # SCRIPT
        >>> from ibeis.scripts.specialdraw import *  # NOQA
        >>> general_identify_flow()
        >>> ut.quit_if_noshow()
        >>> ut.show_if_requested()
    """
    import networkx as nx
    import plottool as pt
    pt.ensure_pylab_qt4()
    # pt.plt.xkcd()

    graph = nx.DiGraph()

    def makecluster(name, num, **attrkw):
        return [ut.nx_makenode(name + str(n), **attrkw) for n in range(num)]

    def add_edge2(u, v, *args, **kwargs):
        v = ut.ensure_iterable(v)
        u = ut.ensure_iterable(u)
        for _u, _v in ut.product(u, v):
            graph.add_edge(_u, _v, *args, **kwargs)

    # *** Primary color:
    p_shade2 = '#41629A'
    # *** Secondary color
    s1_shade2 = '#E88B53'
    # *** Secondary color
    s2_shade2 = '#36977F'
    # *** Complement color
    c_shade2 = '#E8B353'

    ns = 512

    ut.inject_func_as_method(graph, ut.nx_makenode)

    annot1_color = p_shade2
    annot2_color = s1_shade2
    #annot1_color2 = pt.color_funcs.lighten_rgb(colors.hex2color(annot1_color), .01)

    annot1 = graph.nx_makenode('Annotation X',
                               width=ns,
                               height=ns,
                               groupid='annot',
                               color=annot1_color)
    annot2 = graph.nx_makenode('Annotation Y',
                               width=ns,
                               height=ns,
                               groupid='annot',
                               color=annot2_color)

    featX = graph.nx_makenode('Features X',
                              size=(ns / 1.2, ns / 2),
                              groupid='feats',
                              color=lighten_hex(annot1_color, .1))
    featY = graph.nx_makenode('Features Y',
                              size=(ns / 1.2, ns / 2),
                              groupid='feats',
                              color=lighten_hex(annot2_color, .1))
    #'#4771B3')

    global_pairvec = graph.nx_makenode(
        'Global similarity\n(viewpoint, quality, ...)',
        width=ns * ut.PHI * 1.2,
        color=s2_shade2)
    findnn = graph.nx_makenode('Find correspondences\n(nearest neighbors)',
                               shape='ellipse',
                               color=c_shade2)
    local_pairvec = graph.nx_makenode(
        'Local similarities\n(LNBNN, spatial error, ...)',
        size=(ns * 2.2, ns),
        color=lighten_hex(c_shade2, .1))
    agglocal = graph.nx_makenode('Aggregate',
                                 size=(ns / 1.1, ns / 2),
                                 shape='ellipse',
                                 color=lighten_hex(c_shade2, .2))
    catvecs = graph.nx_makenode('Concatenate',
                                size=(ns / 1.1, ns / 2),
                                shape='ellipse',
                                color=lighten_hex(s2_shade2, .1))
    pairvec = graph.nx_makenode('Vector of\npairwise similarities',
                                color=lighten_hex(s2_shade2, .2))
    classifier = graph.nx_makenode('Classifier\n(SVM/RF/DNN)',
                                   color=lighten_hex(s2_shade2, .3))
    prob = graph.nx_makenode(
        'Matching Probability\n(same individual given\nsimilar viewpoint)',
        color=lighten_hex(s2_shade2, .4))

    graph.add_edge(annot1, global_pairvec)
    graph.add_edge(annot2, global_pairvec)

    add_edge2(annot1, featX)
    add_edge2(annot2, featY)

    add_edge2(featX, findnn)
    add_edge2(featY, findnn)

    add_edge2(findnn, local_pairvec)

    graph.add_edge(local_pairvec, agglocal, constraint=True)
    graph.add_edge(agglocal, catvecs, constraint=False)
    graph.add_edge(global_pairvec, catvecs)

    graph.add_edge(catvecs, pairvec)

    # graph.add_edge(annot1, classifier, style='invis')
    # graph.add_edge(pairvec, classifier , constraint=False)
    graph.add_edge(pairvec, classifier)
    graph.add_edge(classifier, prob)

    ut.nx_set_default_node_attributes(graph, 'shape', 'rect')
    #ut.nx_set_default_node_attributes(graph, 'fillcolor', nx.get_node_attributes(graph, 'color'))
    #ut.nx_set_default_node_attributes(graph, 'style',  'rounded')
    ut.nx_set_default_node_attributes(graph, 'style', 'filled,rounded')
    ut.nx_set_default_node_attributes(graph, 'fixedsize', 'true')
    ut.nx_set_default_node_attributes(graph, 'xlabel',
                                      nx.get_node_attributes(graph, 'label'))
    ut.nx_set_default_node_attributes(graph, 'width', ns * ut.PHI)
    ut.nx_set_default_node_attributes(graph, 'height', ns)
    ut.nx_set_default_node_attributes(graph, 'regular', False)

    #font = 'MonoDyslexic'
    #font = 'Mono_Dyslexic'
    font = 'Ubuntu'
    ut.nx_set_default_node_attributes(graph, 'fontsize', 72)
    ut.nx_set_default_node_attributes(graph, 'fontname', font)

    #ut.nx_delete_node_attr(graph, 'width')
    #ut.nx_delete_node_attr(graph, 'height')
    #ut.nx_delete_node_attr(graph, 'fixedsize')
    #ut.nx_delete_node_attr(graph, 'style')
    #ut.nx_delete_node_attr(graph, 'regular')
    #ut.nx_delete_node_attr(graph, 'shape')

    #graph.node[annot1]['label'] = "<f0> left|<f1> mid&#92; dle|<f2> right"
    #graph.node[annot2]['label'] = ut.codeblock(
    #    '''
    #    <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
    #      <TR><TD>left</TD><TD PORT="f1">mid dle</TD><TD PORT="f2">right</TD></TR>
    #    </TABLE>>
    #    ''')
    #graph.node[annot1]['label'] = ut.codeblock(
    #    '''
    #    <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
    #      <TR><TD>left</TD><TD PORT="f1">mid dle</TD><TD PORT="f2">right</TD></TR>
    #    </TABLE>>
    #    ''')

    #graph.node[annot1]['shape'] = 'none'
    #graph.node[annot1]['margin'] = '0'

    layoutkw = {
        'forcelabels': True,
        'prog': 'dot',
        'rankdir': 'LR',
        # 'splines': 'curved',
        'splines': 'line',
        'samplepoints': 20,
        'showboxes': 1,
        # 'splines': 'polyline',
        #'splines': 'spline',
        'sep': 100 / 72,
        'nodesep': 300 / 72,
        'ranksep': 300 / 72,
        #'inputscale': 72,
        # 'inputscale': 1,
        # 'dpi': 72,
        # 'concentrate': 'true', # merges edge lines
        # 'splines': 'ortho',
        # 'aspect': 1,
        # 'ratio': 'compress',
        # 'size': '5,4000',
        # 'rank': 'max',
    }

    #fontkw = dict(fontfamilty='sans-serif', fontweight='normal', fontsize=12)
    #fontkw = dict(fontname='Ubuntu', fontweight='normal', fontsize=12)
    #fontkw = dict(fontname='Ubuntu', fontweight='light', fontsize=20)
    fontkw = dict(fontname=font, fontweight='light', fontsize=12)
    #prop = fm.FontProperties(fname='/usr/share/fonts/truetype/groovygh.ttf')

    pt.show_nx(graph, layout='agraph', layoutkw=layoutkw, **fontkw)
    pt.zoom_factory()
예제 #10
0
def to_string_monkey(df, highlight_cols=[0, 1]):
    """  monkey patch to pandas to highlight the maximum value in specified
    cols of a row """
    kwds = dict(buf=None,
                columns=None,
                col_space=None,
                header=True,
                index=True,
                na_rep='NaN',
                formatters=None,
                float_format=None,
                sparsify=None,
                index_names=True,
                justify=None,
                line_width=None,
                max_rows=None,
                max_cols=None,
                show_dimensions=False)
    self = pd.formats.format.DataFrameFormatter(df, **kwds)
    self.highlight_cols = highlight_cols
    ut.inject_func_as_method(self,
                             monkey_to_str_columns,
                             '_to_str_columns',
                             override=True,
                             force=True)

    def strip_ansi(text):
        import re
        ansi_escape = re.compile(r'\x1b[^m]*m')
        return ansi_escape.sub('', text)

    def justify_ansi(self, texts, max_len, mode='right'):
        if mode == 'left':
            return [
                x.ljust(max_len + (len(x) - len(strip_ansi(x)))) for x in texts
            ]
        elif mode == 'center':
            return [
                x.center(max_len + (len(x) - len(strip_ansi(x))))
                for x in texts
            ]
        else:
            return [
                x.rjust(max_len + (len(x) - len(strip_ansi(x)))) for x in texts
            ]

    def strlen_ansii(self, text):
        return pd.compat.strlen(strip_ansi(text), encoding=self.encoding)

    ut.inject_func_as_method(self.adj,
                             strlen_ansii,
                             'len',
                             override=True,
                             force=True)
    ut.inject_func_as_method(self.adj,
                             justify_ansi,
                             'justify',
                             override=True,
                             force=True)
    # strcols = monkey_to_str_columns(self)
    # texts = strcols[2]
    # str_ = self.adj.adjoin(1, *strcols)
    # print(str_)
    # print(strip_ansi(str_))
    self.to_string()
    result = self.buf.getvalue()
    return result
예제 #11
0
def monkeypatch_encounters(ibs, aids, cache=None, **kwargs):
    """
    Hacks in a temporary custom definition of encounters for this controller

    50 days for PZ_MTEST
    kwargs = dict(days=50)

    if False:
        name_mindeltas = []
        for name in annots.group_items(annots.nids).values():
            times = name.image_unixtimes_asfloat
            deltas = [ut.unixtime_to_timedelta(np.abs(t1 - t2))
                      for t1, t2 in ut.combinations(times, 2)]
            if deltas:
                name_mindeltas.append(min(deltas))
        print(ut.repr3(ut.lmap(ut.get_timedelta_str,
                               sorted(name_mindeltas))))
    """
    from ibeis.algo.preproc.occurrence_blackbox import cluster_timespace_sec
    import numpy as np
    import datetime
    if len(aids) == 0:
        return
    annots = ibs.annots(sorted(set(aids)))
    thresh_sec = datetime.timedelta(**kwargs).total_seconds()
    # thresh_sec = datetime.timedelta(minutes=30).seconds

    if cache is None:
        cache = True
        # cache = len(aids) > 200
    cfgstr = str(ut.combine_uuids(annots.visual_uuids)) + str(thresh_sec)
    cacher = ut.Cacher('occurrence_labels', cfgstr=cfgstr, enabled=cache)
    data = cacher.tryload()
    if data is None:
        print('Computing occurrences for monkey patch for %d aids' %
              (len(aids)))
        posixtimes = annots.image_unixtimes_asfloat
        latlons = annots.gps
        data = cluster_timespace_sec(posixtimes,
                                     latlons,
                                     thresh_sec=thresh_sec,
                                     km_per_sec=.002)
        cacher.save(data)
    occurrence_ids = data
    if occurrence_ids is None:
        # return
        # each annot is its own occurrence
        occurrence_ids = list(range(len(annots)))

    ndec = int(np.ceil(np.log10(max(occurrence_ids))))
    suffmt = '-monkey-occur%0' + str(ndec) + 'd'
    encounter_labels = [
        n + suffmt % (o, ) for o, n in zip(occurrence_ids, annots.names)
    ]
    occurrence_labels = [suffmt[1:] % (o, ) for o in occurrence_ids]
    enc_lookup = ut.dzip(annots.aids, encounter_labels)
    occur_lookup = ut.dzip(annots.aids, occurrence_labels)

    # annots_per_enc = ut.dict_hist(encounter_labels, ordered=True)
    # ut.get_stats(list(annots_per_enc.values()))

    # encounters = ibs._annot_groups(annots.group(encounter_labels)[1])
    # enc_names = ut.take_column(encounters.nids, 0)
    # name_to_encounters = ut.group_items(encounters, enc_names)

    # print('name_to_encounters = %s' % (ut.repr3(name_to_encounters)),)
    # print('Names to num encounters')
    # name_to_num_enc = ut.dict_hist(
    #     ut.map_dict_vals(len, name_to_encounters).values())

    # monkey patch to override encounter info
    def _monkey_get_annot_occurrence_text(ibs, aids):
        return ut.dict_take(occur_lookup, aids)

    def _monkey_get_annot_encounter_text(ibs, aids):
        return ut.dict_take(enc_lookup, aids)

    ut.inject_func_as_method(ibs,
                             _monkey_get_annot_encounter_text,
                             'get_annot_encounter_text',
                             force=True)
    ut.inject_func_as_method(ibs,
                             _monkey_get_annot_occurrence_text,
                             'get_annot_occurrence_text',
                             force=True)
예제 #12
0
파일: pgm_ext.py 프로젝트: heroinlin/ibeis
def customize_model(model):
    model.var2_cpd = {cpd.variable: cpd for cpd in model.cpds}
    model.ttype2_cpds = ut.groupby_attr(model.cpds, 'ttype')
    model._templates = list(set([cpd._template_
                                 for cpd in model.var2_cpd.values()]))
    model.ttype2_template = {t.ttype: t for t in model._templates}
    def pretty_evidence(model, evidence):
        return [evar + '=' + str(model.var2_cpd[evar].variable_statenames[val])
                for evar, val in evidence.items()]

    def print_templates(model, ignore_ttypes=[]):
        templates = model._templates
        ut.colorprint('\n --- CPD Templates ---', 'blue')
        for temp_cpd in templates:
            if temp_cpd.ttype not in ignore_ttypes:
                ut.colorprint(temp_cpd._cpdstr('psql'), 'turquoise')

    def print_priors(model, ignore_ttypes=[], title='Priors', color='darkblue'):
        ut.colorprint('\n --- %s ---' % (title,), color=color)
        for ttype, cpds in model.ttype2_cpds.items():
            if ttype not in ignore_ttypes:
                for fs_ in ut.ichunks(cpds, 4):
                    ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]), color)

    ut.inject_func_as_method(model, print_priors)
    ut.inject_func_as_method(model, print_templates)
    ut.inject_func_as_method(model, pretty_evidence)
    ut.inject_func_as_method(model, pgm_viz.show_model)
    ut.inject_func_as_method(model, pgm_viz.show_markov_model)
    ut.inject_func_as_method(model, pgm_viz.show_junction_tree)
    return model
예제 #13
0
    def __init__(self, ibs, cm, aid2=None, fnum=None,
                 figtitle='Match Interaction', same_fig=True,
                 qreq_=None, **kwargs):
        self.qres = cm

        self.ibs = ibs
        self.cm = cm
        self.qreq_ = qreq_
        self.fnum = pt.ensure_fnum(fnum)
        # Unpack Args
        if aid2 is None:
            index = 0
            # FIXME: no sortself
            cm.sortself()
            self.rank = index
        else:
            index = cm.daid2_idx.get(aid2, None)
            # TODO: rank?
            self.rank = None
        if index is not None:
            self.qaid  = self.cm.qaid
            self.daid  = self.cm.daid_list[index]
            self.fm    = self.cm.fm_list[index]
            self.fk    = self.cm.fk_list[index]
            self.fsv   = self.cm.fsv_list[index]
            if self.cm.fs_list is None:
                fs_list = self.cm.get_fsv_prod_list()
            else:
                fs_list = self.cm.fs_list
            self.fs    = None if fs_list is None else fs_list[index]
            self.score = None if self.cm.score_list is None else self.cm.score_list[index]
            self.H1    = None if self.cm.H_list is None else cm.H_list[index]
        else:
            self.qaid  = self.cm.qaid
            self.daid  = aid2
            self.fm    = np.empty((0, 2), dtype=hstypes.FM_DTYPE)
            self.fk    = np.empty(0, dtype=hstypes.FK_DTYPE)
            self.fsv   = np.empty((0, 2), dtype=hstypes.FS_DTYPE)
            self.fs    = np.empty(0, dtype=hstypes.FS_DTYPE)
            self.score = None
            self.H1    = None

        # Read properties
        self.query_config2_ = (None if self.qreq_ is None else
                               self.qreq_.get_external_query_config2())
        self.data_config2_ = (None if self.qreq_ is None else
                              self.qreq_.get_external_data_config2())
        self.rchip1 = vh.get_chips(ibs, [self.qaid], config2_=self.query_config2_)[0]
        self.rchip2 = vh.get_chips(ibs, [self.daid], config2_=self.data_config2_)[0]
        # Begin Interaction
        # call doclf docla and make figure
        self.fig = ih.begin_interaction('matches', self.fnum)
        self.xywh2_ptr  = [None]
        self.mode = kwargs.pop('mode', 0)
        # New state vars
        self.same_fig = same_fig
        self.use_homog = False
        self.vert = kwargs.pop('vert', None)
        self.mx   = kwargs.pop('mx', None)
        self.last_fx = 0
        self.fnum2 = pt.next_fnum()
        self.figtitle = figtitle
        self.kwargs = kwargs

        abstract_interaction.register_interaction(self)
        ut.inject_func_as_method(self, AbstractInteraction.append_button.im_func)
        ut.inject_func_as_method(self, AbstractInteraction.show_popup_menu.im_func)
        self.scope = []

        if not kwargs.get('nobegin', False):
            dodraw = kwargs.get('dodraw', True)
            self.begin(dodraw=dodraw)
예제 #14
0
def to_string_monkey(df, highlight_cols=None):
    """  monkey patch to pandas to highlight the maximum value in specified
    cols of a row

    df = pd.DataFrame(
        np.array([[ 0.87031269,  0.86886931,  0.86842073,  0.91981975],
                  [ 0.34196218,  0.34289191,  0.34206377,  0.34252863],
                  [ 0.34827074,  0.34829214,  0.35032833,  0.28857126],
                  [ 0.76979453,  0.77214855,  0.77547518,  0.38850962]]),
        columns=['sum(fgweights)', 'sum(weighted_ratio)', 'len(matches)', 'score_lnbnn_1vM'],
        index=['match_state(match-v-rest)', 'match_state(nomatch-v-rest)', 'match_state(notcomp-v-rest)', 'photobomb_state']
    )
    highlight_cols = 'all'

    ut.editfile(pd.formats.printing.adjoin)

    """
    import pandas as pd
    import utool as ut
    import numpy as np
    import six
    if isinstance(highlight_cols, six.string_types) and highlight_cols == 'all':
        highlight_cols = np.arange(len(df.columns))
    # kwds = dict(buf=None, columns=None, col_space=None, header=True,
    #             index=True, na_rep='NaN', formatters=None,
    #             float_format=None, sparsify=None, index_names=True,
    #             justify=None, line_width=None, max_rows=None,
    #             max_cols=None, show_dimensions=False)
    # self = pd.formats.format.DataFrameFormatter(df, **kwds)
    self = pd.formats.format.DataFrameFormatter(df)
    self.highlight_cols = highlight_cols
    ut.inject_func_as_method(self, monkey_to_str_columns, '_to_str_columns', override=True, force=True)

    def strip_ansi(text):
        import re
        ansi_escape = re.compile(r'\x1b[^m]*m')
        return ansi_escape.sub('', text)

    def justify_ansi(self, texts, max_len, mode='right'):
        if mode == 'left':
            return [x.ljust(max_len + (len(x) - len(strip_ansi(x)))) for x in texts]
        elif mode == 'center':
            return [x.center(max_len + (len(x) - len(strip_ansi(x)))) for x in texts]
        else:
            return [x.rjust(max_len + (len(x) - len(strip_ansi(x)))) for x in texts]
    ut.inject_func_as_method(self.adj, justify_ansi, 'justify', override=True, force=True)

    def strlen_ansii(self, text):
        return pd.compat.strlen(strip_ansi(text), encoding=self.encoding)
    ut.inject_func_as_method(self.adj, strlen_ansii, 'len', override=True, force=True)

    if False:
        strlen = ut.partial(strlen_ansii, self.adj)  # NOQA
        justfunc = ut.partial(justify_ansi, self.adj)  # NOQA
        # Essentially what to_string does
        strcols = monkey_to_str_columns(self)
        # texts = strcols[2]
        space = 1
        lists = strcols
        str_ = self.adj.adjoin(space, *lists)
        print(str_)
        print(strip_ansi(str_))
    self.to_string()
    result = self.buf.getvalue()
    # hack because adjoin is not working correctly with injected strlen
    result = '\n'.join([x.rstrip() for x in result.split('\n')])
    return result
예제 #15
0
def general_identify_flow():
    r"""
    CommandLine:
        python -m ibeis.scripts.specialdraw general_identify_flow --show --save pairsim.png --dpi=100 --diskshow --clipwhite

        python -m ibeis.scripts.specialdraw general_identify_flow --dpi=200 --diskshow --clipwhite --dpath ~/latex/cand/ --figsize=20,10  --save figures4/pairprob.png --arrow-width=2.0


    Example:
        >>> # SCRIPT
        >>> from ibeis.scripts.specialdraw import *  # NOQA
        >>> general_identify_flow()
        >>> ut.quit_if_noshow()
        >>> ut.show_if_requested()
    """
    import networkx as nx
    import plottool as pt
    pt.ensure_pylab_qt4()
    # pt.plt.xkcd()

    graph = nx.DiGraph()

    def makecluster(name, num, **attrkw):
        return [ut.nx_makenode(name + str(n), **attrkw) for n in range(num)]

    def add_edge2(u, v, *args, **kwargs):
        v = ut.ensure_iterable(v)
        u = ut.ensure_iterable(u)
        for _u, _v in ut.product(u, v):
            graph.add_edge(_u, _v, *args, **kwargs)

    # *** Primary color:
    p_shade2 = '#41629A'
    # *** Secondary color
    s1_shade2 = '#E88B53'
    # *** Secondary color
    s2_shade2 = '#36977F'
    # *** Complement color
    c_shade2 = '#E8B353'

    ns = 512

    ut.inject_func_as_method(graph, ut.nx_makenode)

    annot1_color = p_shade2
    annot2_color = s1_shade2
    #annot1_color2 = pt.color_funcs.lighten_rgb(colors.hex2color(annot1_color), .01)

    annot1 = graph.nx_makenode('Annotation X', width=ns, height=ns, groupid='annot', color=annot1_color)
    annot2 = graph.nx_makenode('Annotation Y', width=ns, height=ns, groupid='annot', color=annot2_color)

    featX = graph.nx_makenode('Features X', size=(ns / 1.2, ns / 2), groupid='feats', color=lighten_hex(annot1_color, .1))
    featY = graph.nx_makenode('Features Y', size=(ns / 1.2, ns / 2), groupid='feats', color=lighten_hex(annot2_color, .1))
    #'#4771B3')

    global_pairvec = graph.nx_makenode('Global similarity\n(viewpoint, quality, ...)', width=ns * ut.PHI * 1.2, color=s2_shade2)
    findnn = graph.nx_makenode('Find correspondences\n(nearest neighbors)', shape='ellipse', color=c_shade2)
    local_pairvec = graph.nx_makenode('Local similarities\n(LNBNN, spatial error, ...)',
                                      size=(ns * 2.2, ns), color=lighten_hex(c_shade2, .1))
    agglocal = graph.nx_makenode('Aggregate', size=(ns / 1.1, ns / 2), shape='ellipse', color=lighten_hex(c_shade2, .2))
    catvecs = graph.nx_makenode('Concatenate', size=(ns / 1.1, ns / 2), shape='ellipse', color=lighten_hex(s2_shade2, .1))
    pairvec = graph.nx_makenode('Vector of\npairwise similarities', color=lighten_hex(s2_shade2, .2))
    classifier = graph.nx_makenode('Classifier\n(SVM/RF/DNN)', color=lighten_hex(s2_shade2, .3))
    prob = graph.nx_makenode('Matching Probability\n(same individual given\nsimilar viewpoint)', color=lighten_hex(s2_shade2, .4))

    graph.add_edge(annot1, global_pairvec)
    graph.add_edge(annot2, global_pairvec)

    add_edge2(annot1, featX)
    add_edge2(annot2, featY)

    add_edge2(featX, findnn)
    add_edge2(featY, findnn)

    add_edge2(findnn, local_pairvec)

    graph.add_edge(local_pairvec, agglocal, constraint=True)
    graph.add_edge(agglocal, catvecs, constraint=False)
    graph.add_edge(global_pairvec, catvecs)

    graph.add_edge(catvecs, pairvec)

    # graph.add_edge(annot1, classifier, style='invis')
    # graph.add_edge(pairvec, classifier , constraint=False)
    graph.add_edge(pairvec, classifier)
    graph.add_edge(classifier, prob)

    ut.nx_set_default_node_attributes(graph, 'shape',  'rect')
    #ut.nx_set_default_node_attributes(graph, 'fillcolor', nx.get_node_attributes(graph, 'color'))
    #ut.nx_set_default_node_attributes(graph, 'style',  'rounded')
    ut.nx_set_default_node_attributes(graph, 'style',  'filled,rounded')
    ut.nx_set_default_node_attributes(graph, 'fixedsize', 'true')
    ut.nx_set_default_node_attributes(graph, 'xlabel', nx.get_node_attributes(graph, 'label'))
    ut.nx_set_default_node_attributes(graph, 'width', ns * ut.PHI)
    ut.nx_set_default_node_attributes(graph, 'height', ns)
    ut.nx_set_default_node_attributes(graph, 'regular', False)

    #font = 'MonoDyslexic'
    #font = 'Mono_Dyslexic'
    font = 'Ubuntu'
    ut.nx_set_default_node_attributes(graph, 'fontsize', 72)
    ut.nx_set_default_node_attributes(graph, 'fontname', font)

    #ut.nx_delete_node_attr(graph, 'width')
    #ut.nx_delete_node_attr(graph, 'height')
    #ut.nx_delete_node_attr(graph, 'fixedsize')
    #ut.nx_delete_node_attr(graph, 'style')
    #ut.nx_delete_node_attr(graph, 'regular')
    #ut.nx_delete_node_attr(graph, 'shape')

    #graph.node[annot1]['label'] = "<f0> left|<f1> mid&#92; dle|<f2> right"
    #graph.node[annot2]['label'] = ut.codeblock(
    #    '''
    #    <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
    #      <TR><TD>left</TD><TD PORT="f1">mid dle</TD><TD PORT="f2">right</TD></TR>
    #    </TABLE>>
    #    ''')
    #graph.node[annot1]['label'] = ut.codeblock(
    #    '''
    #    <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0">
    #      <TR><TD>left</TD><TD PORT="f1">mid dle</TD><TD PORT="f2">right</TD></TR>
    #    </TABLE>>
    #    ''')

    #graph.node[annot1]['shape'] = 'none'
    #graph.node[annot1]['margin'] = '0'

    layoutkw = {
        'forcelabels': True,
        'prog': 'dot',
        'rankdir': 'LR',
        # 'splines': 'curved',
        'splines': 'line',
        'samplepoints': 20,
        'showboxes': 1,
        # 'splines': 'polyline',
        #'splines': 'spline',
        'sep': 100 / 72,
        'nodesep': 300 / 72,
        'ranksep': 300 / 72,
        #'inputscale': 72,
        # 'inputscale': 1,
        # 'dpi': 72,
        # 'concentrate': 'true', # merges edge lines
        # 'splines': 'ortho',
        # 'aspect': 1,
        # 'ratio': 'compress',
        # 'size': '5,4000',
        # 'rank': 'max',
    }

    #fontkw = dict(fontfamilty='sans-serif', fontweight='normal', fontsize=12)
    #fontkw = dict(fontname='Ubuntu', fontweight='normal', fontsize=12)
    #fontkw = dict(fontname='Ubuntu', fontweight='light', fontsize=20)
    fontkw = dict(fontname=font, fontweight='light', fontsize=12)
    #prop = fm.FontProperties(fname='/usr/share/fonts/truetype/groovygh.ttf')

    pt.show_nx(graph, layout='agraph', layoutkw=layoutkw, **fontkw)
    pt.zoom_factory()