Ejemplo n.º 1
0
 def print_feat_stats(kpts, vecs):
     assert len(vecs) == len(kpts), 'disagreement'
     print('keypoints and vecs agree')
     flat_kpts = np.vstack(kpts)
     num_kpts = list(map(len, kpts))
     kpt_scale = vt.get_scales(flat_kpts)
     num_kpts_stats = ut.get_stats(num_kpts)
     scale_kpts_stats = ut.get_stats(kpt_scale)
     print('Number of ' + prefix + ' keypoints: ' + ut.repr3(num_kpts_stats, nl=0, precision=2))
     print('Scale of ' + prefix + ' keypoints: ' + ut.repr3(scale_kpts_stats, nl=0, precision=2))
Ejemplo n.º 2
0
 def __init__(self, qdup_pos_map={}, ddup_pos_map={}):
     message = ('Some UUIDs are specified more than once at positions:\n'
                'duplicate_data_uuids=%s\n'
                'duplicate_query_uuids=%s\n') % (
                    ut.repr3(qdup_pos_map, nl=1),
                    ut.repr3(ddup_pos_map, nl=1))
     qdup_pos_map_ = { str(k): v for k, v in qdup_pos_map.iteritems() }
     ddup_pos_map_ = { str(k): v for k, v in ddup_pos_map.iteritems() }
     rawreturn = {
         'qdup_pos_map' : qdup_pos_map_,
         'ddup_pos_map' : ddup_pos_map_,
     }
     code = 601
     super(DuplicateUUIDException, self).__init__(message, rawreturn, code)
Ejemplo n.º 3
0
def _test_indent_print():
    # Indent test code doesnt work in doctest blocks.
    import utool as ut
    flag = ut.ensure_logging()
    print('Checking indent. Should have none')
    with ut.Indenter('[INDENT] '):
        print('Checking indent. Should be indented')
    print('Should no longer be indented')
    text = ut.get_current_log_text()
    # The last line might sometimes be empty or not.
    # Not sure.
    # New hack: had to put in stride. Seems like logs get written
    # with two line breaks now
    last_lines = text.split('\n')[-8::2]
    if last_lines[-1] != '':
        assert False, 'DEV ERROR. REMOVE FIRST LINE INSTEAD OF LAST'
        last_lines = last_lines[:-1]

    #print('last_lines = %r' % (ut.repr3(last_lines)))
    try:
        assert last_lines[0].find('[INDENT] ') == -1, last_lines[0]
        assert last_lines[1].find('[INDENT] ') >= 0, 'did not indent %r' % (last_lines[1],)
        assert last_lines[2].find('[INDENT] ') == -1, last_lines[2]
    except AssertionError:
        print('Error. Last 3 lines')
        print(ut.repr3(last_lines))
        raise
    if not flag:
        ut.stop_logging()
Ejemplo n.º 4
0
def make_ibeis_cell_list(ibs):
    cell_template_list = get_default_cell_template_list(ibs)
    autogen_str = make_autogen_str()
    dbname = ibs.get_dbname()
    #if ut.get_argflag('--hacktestscore'):
    #    annotconfig_list_body = ut.codeblock(
    #        '''
    #        'timectrl',
    #        '''
    #    )
    #else:
    default_acfgstr = ut.get_argval('-a', type_=str, default='default:is_known=True')
    annotconfig_list_body = ut.codeblock(
        ut.repr2(default_acfgstr) + '\n' +
        ut.codeblock('''
        # See ibeis/expt/annotation_configs.py for names of annot configuration options
        #'default:has_any=(query,),dpername=1,exclude_reference=True',
        #'default:is_known=True',
        #'default:qsame_encounter=True,been_adjusted=True,excluderef=True'
        #'default:qsame_encounter=True,been_adjusted=True,excluderef=True,qsize=10,dsize=20',
        #'default:require_timestamp=True,min_timedelta=3600',
        #'default:species=primary',
        #'timectrl:',
        #'timectrl:been_adjusted=True,dpername=3',
        #'timectrl:qsize=10,dsize=20',
        #'unctrl:been_adjusted=True',
        ''')
    )
    #if ut.get_argflag('--hacktestscore'):
    #    pipeline_list_body = ut.codeblock(
    #        '''
    #        # See ibeis/algo/Config.py for names of pipeline config options
    #        'default:lnbnn_on=True,bar_l2_on=False,normonly_on=False,fg_on=True',
    #        'default:lnbnn_on=False,bar_l2_on=True,normonly_on=False,fg_on=True',
    #        'default:lnbnn_on=False,bar_l2_on=False,normonly_on=True,fg_on=True',
    #        'default:lnbnn_on=True,bar_l2_on=False,normonly_on=False,fg_on=False',
    #        'default:lnbnn_on=False,bar_l2_on=True,normonly_on=False,fg_on=False',
    #        'default:lnbnn_on=False,bar_l2_on=False,normonly_on=True,fg_on=False',
    #        '''
    #    )
    #elif True:
    default_pcfgstr_list = ut.get_argval(('-t', '-p'), type_=list, default='default')
    default_pcfgstr = ut.repr3(default_pcfgstr_list, nobr=True)

    pipeline_list_body = ut.codeblock(
        default_pcfgstr + '\n' +
        ut.codeblock('''
        #'default',
        #'default:K=1',
        #'default:K=1,AI=False',
        #'default:K=1,AI=False,QRH=True',
        #'default:K=1,RI=True,AI=False',
        #'default:K=1,adapteq=True',
        #'default:fg_on=[True,False]',
        ''')
    )
    locals_ = locals()
    _format = partial(format_cells, locals_=locals_)
    cell_list = ut.flatten(map(_format, cell_template_list))
    return cell_list
Ejemplo n.º 5
0
 def print_tree_struct(*args, **kwargs):
     tree_str = (ut.indent(ut.repr3(get_tree_info(*args, **kwargs), nl=1)))
     print(tree_str)
     #bytes_str = ut.byte_str2(drive.get_total_nbytes(dpath_to_unique_fidx[path]))
     #print('path = %r, %s' % (path, bytes_str))
     #print(ut.repr3(key_list))
     return tree_str
Ejemplo n.º 6
0
 def print_tree(root, path, dpath_to_unique_fidx=dpath_to_unique_fidx, drive=drive, depth=None):
     print('path = %r' % (path,))
     print(ut.byte_str2(drive.get_total_nbytes(dpath_to_unique_fidx[path])))
     path_components = ut.dirsplit(path)
     # Navigate to correct spot in tree
     current = root
     for c in path_components:
         current = current[c]
     print(ut.repr3(current, truncate=1))
Ejemplo n.º 7
0
def hello_world(*args, **kwargs):
    """
    CommandLine:
        python -m ibeis.web.apis --exec-hello_world:0
        python -m ibeis.web.apis --exec-hello_world:1

    Example:
        >>> # WEB_DOCTEST
        >>> from ibeis.web.app import *  # NOQA
        >>> import ibeis
        >>> web_ibs = ibeis.opendb_bg_web(browser=True, url_suffix='/api/test/helloworld/?test0=0')  # start_job_queue=False)
        >>> print('Server will run until control c')
        >>> #web_ibs.terminate2()

    Example1:
        >>> # WEB_DOCTEST
        >>> from ibeis.web.app import *  # NOQA
        >>> import ibeis
        >>> import requests
        >>> import ibeis
        >>> web_ibs = ibeis.opendb_bg_web('testdb1', start_job_queue=False)
        >>> domain = 'http://127.0.0.1:5000'
        >>> url = domain + '/api/test/helloworld/?test0=0'
        >>> payload = {
        >>>     'test1' : 'test1',
        >>>     'test2' : None,  # NOTICE test2 DOES NOT SHOW UP
        >>> }
        >>> resp = requests.post(url, data=payload)
        >>> print(resp)
        >>> web_ibs.terminate2()
    """
    print('+------------ HELLO WORLD ------------')
    print('Args: %r' % (args,))
    print('Kwargs: %r' % (kwargs,))
    print('request.args: %r' % (request.args,))
    print('request.form: %r' % (request.form,))
    print('request.url; %r' % (request.url,))
    print('request.environ: %s' % (ut.repr3(request.environ),))
    print('request: %s' % (ut.repr3(request.__dict__),))
    print('L____________ HELLO WORLD ____________')
Ejemplo n.º 8
0
def make_ibeis_cell_list(ibs):
    cell_template_list = get_default_cell_template_list(ibs)
    autogen_str = ut.make_autogen_str()
    dbname = ibs.get_dbname()
    default_acfgstr = ut.get_argval('-a', type_=str, default='default:is_known=True')

    asreport = ut.get_argflag('--asreport')

    default_pcfgstr_list = ut.get_argval(('-t', '-p'), type_=list, default='default')
    default_pcfgstr = ut.repr3(default_pcfgstr_list, nobr=True)

    if asreport:
        annotconfig_list_body = ut.codeblock(
            ut.repr2(default_acfgstr) )
        pipeline_list_body = ut.codeblock(
            default_pcfgstr
        )
    else:
        annotconfig_list_body = ut.codeblock(
            ut.repr2(default_acfgstr) + '\n' +
            ut.codeblock('''
            # See ibeis/expt/annotation_configs.py for names of annot configuration options
            #'default:has_any=(query,),dpername=1,exclude_reference=True',
            #'default:is_known=True',
            #'default:qsame_imageset=True,been_adjusted=True,excluderef=True,qsize=10,dsize=20',
            #'default:require_timestamp=True,min_timedelta=3600',
            #'default:species=primary',
            #'timectrl:',
            #'unctrl:been_adjusted=True',
            ''')
        )
        pipeline_list_body = ut.codeblock(
            default_pcfgstr + '\n' +
            ut.codeblock('''
            #'default',
            #'default:K=1,AI=False,QRH=True',
            #'default:K=1,RI=True,AI=False',
            #'default:K=1,adapteq=True',
            #'default:fg_on=[True,False]',
            ''')
        )

    locals_ = locals()
    _format = partial(ut.format_cells, locals_=locals_)
    cell_list = ut.flatten(map(_format, cell_template_list))
    return cell_list
Ejemplo n.º 9
0
def collector_loop(dbdir):
    """
    Service that stores completed algorithm results
    """
    import ibeis
    update_proctitle('collector_loop')
    print = partial(ut.colorprint, color='yellow')
    with ut.Indenter('[collect] '):
        collect_rout_sock = ctx.socket(zmq.ROUTER)
        collect_rout_sock.setsockopt_string(zmq.IDENTITY, 'collect.ROUTER')
        collect_rout_sock.connect(collect_url2)
        if VERBOSE_JOBS:
            print('connect collect_url2  = %r' % (collect_url2,))

        ibs = ibeis.opendb(dbdir=dbdir, use_cache=False, web=False)
        # shelve_path = join(ut.get_shelves_dir(appname='ibeis'), 'engine')
        shelve_path = ibs.get_shelves_path()
        ut.delete(shelve_path)
        ut.ensuredir(shelve_path)

        collecter_data = {}
        awaiting_data = {}
        try:
            while True:
                # several callers here
                # CALLER: collector_notify
                # CALLER: collector_store
                # CALLER: collector_request_status
                # CALLER: collector_request_result
                idents, collect_request = rcv_multipart_json(collect_rout_sock, print=print)
                try:
                    reply = on_collect_request(collect_request, collecter_data,
                                               awaiting_data, shelve_path)
                except Exception as ex:
                    print(ut.repr3(collect_request))
                    ut.printex(ex, 'ERROR in collection')
                send_multipart_json(collect_rout_sock, idents, reply)
        except KeyboardInterrupt:
            print('Caught ctrl+c in collector loop. Gracefully exiting')
        if VERBOSE_JOBS:
            print('Exiting collector')
Ejemplo n.º 10
0
 def _wrapper(*args, **kwargs):
     try:
         return func(*args, **kwargs)
     except Exception as ex:
         import utool as ut
         ut.printex(ex)
         import inspect  # NOQA
         trace = inspect.trace()
         locals_ = trace[-1][0].f_locals
         print('-- <TRACE LOCALS> --')
         for level, t in enumerate(trace[1:]):
             frame = t[0]
             locals_ = frame.f_locals
             local_repr_dict = {key: ut.trunc_repr(val)
                                for key, val in locals_.items()}
             print('LOCALS LEVEL %d' % (level,))
             print(ut.repr3(local_repr_dict, strvals=True, nl=1))
         print('-- </TRACE LOCALS> --')
         #import utool
         #utool.embed()
         raise
Ejemplo n.º 11
0
def graph_info(graph, verbose=False):
    import utool as ut
    node_attrs = list(graph.node.values())
    edge_attrs = list(ut.take_column(graph.edges(data=True), 2))
    node_attr_hist = ut.dict_hist(ut.flatten([attr.keys() for attr in node_attrs]))
    edge_attr_hist = ut.dict_hist(ut.flatten([attr.keys() for attr in edge_attrs]))
    node_type_hist = ut.dict_hist(list(map(type, graph.nodes())))
    info_dict = ut.odict([
        ('directed', graph.is_directed()),
        ('multi', graph.is_multigraph()),
        ('num_nodes', len(graph)),
        ('num_edges', len(list(graph.edges()))),
        ('edge_attr_hist', ut.sort_dict(edge_attr_hist)),
        ('node_attr_hist', ut.sort_dict(node_attr_hist)),
        ('node_type_hist', ut.sort_dict(node_type_hist)),
        ('graph_attrs', graph.graph),
        ('graph_name', graph.name),
    ])
    #unique_attrs = ut.map_dict_vals(ut.unique, ut.dict_accum(*node_attrs))
    #ut.dict_isect_combine(*node_attrs))
    #[list(attrs.keys())]
    if verbose:
        print(ut.repr3(info_dict))
    return info_dict
Ejemplo n.º 12
0
def get_toy_data_1vM(num_annots, num_names=None, **kwargs):
    r"""
    Args:
        num_annots (int):
        num_names (int): (default = None)

    Kwargs:
        initial_aids, initial_nids, nid_sequence, seed

    Returns:
        tuple: (pair_list, feat_list)

    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-get_toy_data_1vM --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> num_annots = 1000
        >>> num_names = 40
        >>> get_toy_data_1vM(num_annots, num_names)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    import vtool as vt
    tup_ = get_toy_annots(num_annots, num_names, **kwargs)
    aids, nids, aids1, nids1, all_aids, all_nids = tup_
    rng = vt.ensure_rng(None)

    # Test a simple SVM classifier
    nid2_nexemp = ut.dict_hist(nids1)
    aid2_nid = dict(zip(aids, nids))

    ut.fix_embed_globals()

    #def add_to_globals(globals_, subdict):
    #    globals_.update(subdict)

    unique_nids = list(nid2_nexemp.keys())

    def annot_to_class_feats2(aid, aid2_nid, top=None):
        pair_list = []
        score_list = []
        nexemplar_list = []
        for nid in unique_nids:
            label = (aid2_nid[aid] == nid)
            num_exemplars = nid2_nexemp.get(nid, 0)
            if num_exemplars == 0:
                continue
            params = toy_params[label]
            mu, sigma = ut.dict_take(params, ['mu', 'sigma'])
            score_ = rng.normal(mu, sigma, size=num_exemplars).max()
            score = np.clip(score_, 0, np.inf)
            pair_list.append((aid, nid))
            score_list.append(score)
            nexemplar_list.append(num_exemplars)
        rank_list = ut.argsort(score_list, reverse=True)
        feat_list = np.array([score_list, rank_list, nexemplar_list]).T
        sortx = np.argsort(rank_list)
        feat_list = feat_list.take(sortx, axis=0)
        pair_list = np.array(pair_list).take(sortx, axis=0)
        if top is not None:
            feat_list = feat_list[:top]
            pair_list = pair_list[0:top]
        return pair_list, feat_list

    toclass_features = [annot_to_class_feats2(aid, aid2_nid, top=5) for aid in aids]
    aidnid_pairs = np.vstack(ut.get_list_column(toclass_features, 0))
    feat_list = np.vstack(ut.get_list_column(toclass_features, 1))
    score_list = feat_list.T[0:1].T
    lbl_list = [aid2_nid[aid] == nid for aid, nid in aidnid_pairs]

    from sklearn import svm
    #clf1 = svm.LinearSVC()
    print('Learning classifiers')

    clf3 = svm.SVC()
    clf3.fit(feat_list, lbl_list)

    clf1 = svm.LinearSVC()
    clf1.fit(score_list, lbl_list)

    # Score new annots against the training database
    tup_ = get_toy_annots(num_annots * 2, num_names, initial_aids=all_aids, initial_nids=all_nids)
    aids, nids, aids1, nids1, all_aids, all_nids = tup_
    aid2_nid = dict(zip(aids, nids))
    toclass_features = [annot_to_class_feats2(aid, aid2_nid) for aid in aids]
    aidnid_pairs = np.vstack(ut.get_list_column(toclass_features, 0))
    feat_list = np.vstack(ut.get_list_column(toclass_features, 1))
    lbl_list = np.array([aid2_nid[aid] == nid for aid, nid in aidnid_pairs])

    print('Running tests')

    score_list = feat_list.T[0:1].T

    tp_feat_list = feat_list[lbl_list]
    tn_feat_list = feat_list[~lbl_list]
    tp_lbls = lbl_list[lbl_list]
    tn_lbls = lbl_list[~lbl_list]
    print('num tp: %d' % len(tp_lbls))
    print('num fp: %d' % len(tn_lbls))

    tp_score_list = score_list[lbl_list]
    tn_score_list = score_list[~lbl_list]

    print('tp_feat' + ut.repr3(ut.get_stats(tp_feat_list, axis=0), precision=2))
    print('tp_feat' + ut.repr3(ut.get_stats(tn_feat_list, axis=0), precision=2))

    print('tp_score' + ut.repr2(ut.get_stats(tp_score_list), precision=2))
    print('tp_score' + ut.repr2(ut.get_stats(tn_score_list), precision=2))

    tp_pred3 = clf3.predict(tp_feat_list)
    tn_pred3 = clf3.predict(tn_feat_list)
    print((tp_pred3.sum(), tp_pred3.shape))
    print((tn_pred3.sum(), tn_pred3.shape))

    tp_score3 = clf3.score(tp_feat_list, tp_lbls)
    tn_score3 = clf3.score(tn_feat_list, tn_lbls)

    tp_pred1 = clf1.predict(tp_score_list)
    tn_pred1 = clf1.predict(tn_score_list)
    print((tp_pred1.sum(), tp_pred1.shape))
    print((tn_pred1.sum(), tn_pred1.shape))

    tp_score1 = clf1.score(tp_score_list, tp_lbls)
    tn_score1 = clf1.score(tn_score_list, tn_lbls)
    print('tp score with rank    = %r' % (tp_score3,))
    print('tn score with rank    = %r' % (tn_score3,))

    print('tp score without rank = %r' % (tp_score1,))
    print('tn score without rank = %r' % (tn_score1,))
    toy_data = {}

    return toy_data
Ejemplo n.º 13
0
def classify_k(cfg={}):
    """
    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-classify_k --show --ev :nA=3
        python -m ibeis.algo.hots.demobayes --exec-classify_k --show --ev :nA=3,k=1
        python -m ibeis.algo.hots.demobayes --exec-classify_k --show --ev :nA=3,k=0 --method=approx
        python -m ibeis.algo.hots.demobayes --exec-classify_k --show --ev :nA=10,k=1 --method=approx

    Example:
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> cfg_list = testdata_demo_cfgs()
        >>> classify_k(cfg_list[0])
        >>> ut.show_if_requested()
    """
    cfg = cfg.copy()
    num_annots = cfg.pop('num_annots', 3)
    num_scores = cfg.pop('num_scores', 2)
    num_iter = cfg.pop('k', 0)
    nid_sequence = np.array([0, 0, 1, 2, 2, 1, 1])
    toy_data = get_toy_data_1v1(num_annots, nid_sequence=nid_sequence)
    force_evidence = None
    force_evidence = 0
    diag_scores, = ut.dict_take(
        toy_data, 'diag_scores'.split(', '))

    #print('diag_scores = %r' % (diag_scores,))
    #diag_labels = pairwise_matches.compress(is_diag)
    #diag_pairs = ut.compress(pairwise_aidxs, is_diag)

    discr_domain, discr_p_same = learn_prob_score(num_scores)[0:2]
    def discretize_scores(scores):
        # Assign continuous scores to closest discrete index
        score_idxs = np.abs(1 - (discr_domain / scores[:, None])).argmin(axis=1)
        return score_idxs

    # Careful ordering is important here
    score_evidence = discretize_scores(diag_scores)
    if force_evidence is not None:
        for x in range(len(score_evidence)):
            score_evidence[x] = 0

    model, evidence, query_results = test_model(
        num_annots=num_annots, num_names=num_annots,
        num_scores=num_scores,
        mode=1,
        score_evidence=score_evidence,
        p_score_given_same=discr_p_same,
        score_basis=discr_domain,
        #verbose=True
    )
    print(query_results['top_assignments'][0])
    toy_data1 = toy_data
    print('toy_data1 = ' + ut.repr3(toy_data1, nl=1))
    num_annots2 = num_annots + 1
    score_evidence1 = [None] * len(score_evidence)
    full_evidence = score_evidence.tolist()

    factor_list = query_results['factor_list']
    using_soft = False
    if using_soft:
        soft_evidence1 = [dict(zip(x.statenames[0], x.values)) for x in factor_list]

    for _ in range(num_iter):
        print('\n\n ---------- \n\n')
        #toy_data1['all_nids'].max() + 1
        num_names_gen = len(toy_data1['all_aids']) + 1
        num_names_gen = toy_data1['all_nids'].max() + 2
        toy_data2 = get_toy_data_1v1(
            1, num_names_gen,
            initial_aids=toy_data1['all_aids'],
            initial_nids=toy_data1['all_nids'],
            nid_sequence=nid_sequence)
        diag_scores2, = ut.dict_take(
            toy_data2, 'diag_scores'.split(', '))
        print('toy_data2 = ' + ut.repr3(toy_data2, nl=1))

        score_evidence2 = discretize_scores(diag_scores2).tolist()
        if force_evidence is not None:
            for x in range(len(score_evidence2)):
                score_evidence2[x] = force_evidence
        print('score_evidence2 = %r' % (score_evidence2,))

        if using_soft:
            # Demo with soft evidence
            model, evidence, query_results2 = test_model(
                num_annots=num_annots2, num_names=num_annots2,
                num_scores=num_scores,
                mode=1,
                name_evidence=soft_evidence1,
                #score_evidence=score_evidence1 + score_evidence2,
                score_evidence=score_evidence2,
                p_score_given_same=discr_p_same,
                score_basis=discr_domain,
                #verbose=True,
                hack_score_only=len(score_evidence2),
            )

        if 1:
            # Demo with full evidence
            model, evidence, query_results2 = test_model(
                num_annots=num_annots2, num_names=num_annots2,
                num_scores=num_scores,
                mode=1,
                score_evidence=full_evidence + score_evidence2,
                p_score_given_same=discr_p_same,
                score_basis=discr_domain,
                verbose=True
            )
        factor_list2 = query_results2['factor_list']
        if using_soft:
            soft_evidence1 = [dict(zip(x.statenames[0], x.values)) for x in factor_list2]
        score_evidence1 += ([None] * len(score_evidence2))
        full_evidence = full_evidence + score_evidence2
        num_annots2 += 1
        toy_data1 = toy_data2
Ejemplo n.º 14
0
 def on_scroll(self, event):
     if self.debug:
         print('[pt.a] on_scroll')
         print(ut.repr3(event.__dict__))
     pass
Ejemplo n.º 15
0
def demo_bayesnet(cfg={}):
    r"""
    Make a model that knows who the previous annots are and tries to classify a new annot

    CommandLine:
        python -m ibeis --tf demo_bayesnet --diskshow --verbose --save demo4.png --dpath . --figsize=20,10 --dpi=128 --clipwhite

        python -m ibeis --tf demo_bayesnet --ev :nA=3,Sab=0,Sac=0,Sbc=1
        python -m ibeis --tf demo_bayesnet --ev :nA=4,Sab=0,Sac=0,Sbc=1,Sbd=1 --show
        python -m ibeis --tf demo_bayesnet --ev :nA=4,Sab=0,Sac=0,Sbc=1,Scd=1 --show
        python -m ibeis --tf demo_bayesnet --ev :nA=4,Sab=0,Sac=0,Sbc=1,Sbd=1,Scd=1 --show

        python -m ibeis --tf demo_bayesnet --ev :nA=3,Sab=0,Sac=0,Sbc=1
        python -m ibeis --tf demo_bayesnet --ev :nA=5,rand_scores=True --show

        python -m ibeis --tf demo_bayesnet --ev :nA=4,nS=3,rand_scores=True --show --verbose
        python -m ibeis --tf demo_bayesnet --ev :nA=5,nS=2,Na=fred,rand_scores=True --show --verbose
        python -m ibeis --tf demo_bayesnet --ev :nA=5,nS=5,Na=fred,rand_scores=True --show --verbose
        python -m ibeis --tf demo_bayesnet --ev :nA=4,nS=2,Na=fred,rand_scores=True --show --verbose

        python -m ibeis.algo.hots.demobayes --exec-demo_bayesnet \
                --ev =:nA=4,Sab=0,Sac=0,Sbc=1 \
                :Sbd=1 :Scd=1 :Sbd=1,Scd=1 :Sbd=1,Scd=1,Sad=0 \
                --show --present

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> cfg_list = testdata_demo_cfgs()
        >>> print('cfg_list = %r' % (cfg_list,))
        >>> for cfg in cfg_list:
        >>>     demo_bayesnet(cfg)
        >>> ut.show_if_requested()
    """
    cfg = cfg.copy()
    num_annots = cfg.pop('num_annots', 3)
    num_names = cfg.pop('num_names', None)
    num_scores = cfg.pop('num_scores', 2)
    rand_scores = cfg.pop('rand_scores', False)
    method = cfg.pop('method', 'bp')
    other_evidence = {k: v for k, v in cfg.items() if not k.startswith('_')}
    if rand_scores:
        #import randomdotorg
        #import sys
        #r = randomdotorg.RandomDotOrg('ExampleCode')
        #seed = int((1 - 2 * r.random()) * sys.maxint)
        toy_data = get_toy_data_1v1(num_annots, nid_sequence=[0, 0, 1, 0, 1, 2])
        print('toy_data = ' + ut.repr3(toy_data, nl=1))
        diag_scores, = ut.dict_take(
            toy_data, 'diag_scores'.split(', '))
        discr_domain, discr_p_same = learn_prob_score(num_scores)[0:2]
        def discretize_scores(scores):
            # Assign continuous scores to discrete index
            score_idxs = np.abs(1 - (discr_domain / scores[:, None])).argmin(axis=1)
            return score_idxs
        score_evidence = discretize_scores(diag_scores)
    else:
        score_evidence = []
        discr_p_same = None
        discr_domain = None
    model, evidence, query_results = test_model(
        num_annots=num_annots, num_names=num_names,
        num_scores=num_scores,
        score_evidence=score_evidence,
        mode=1,
        other_evidence=other_evidence,
        p_score_given_same=discr_p_same,
        score_basis=discr_domain,
        method=method,
    )
Ejemplo n.º 16
0
    def fix_duplicates(drive):
        r"""
        for every duplicate file passing a (eg avi) filter, remove the file
        that is in the smallest directory. On a tie use the smallest dpath.
        This will filter all duplicate files in a folder into a single folder.

        but... need to look at non-duplicates in that folder and decide if they
        should be moved as well.  So, should trigger on folders that have at
        least 50% duplicate.  Might not want to move curated folders.

        Example:
            cd ~/local/scripts
            >>> from register_files import *  # NOQA
            >>> dpaths = ut.get_argval('--drives', type_=list, default=['E:/'])#'D:/', 'E:/', 'F:/'])
            >>> drives = [Drive(root_dpath) for root_dpath in dpaths]
            >>> E = drive = drives[0]
            >>> #D, E, F = drives
        """
        print('Fixing Duplicates in %r' % (drive,))
        list_ = drive.fpath_hashX_list
        multiindex_dict_ = build_multindex(list_)
        duplicate_hashes = [
            key for key, val in six.iteritems(multiindex_dict_)
            if len(val) > 1
        ]
        duplicate_idxs = ut.dict_take(multiindex_dict_, duplicate_hashes)
        unflat_fpaths = ut.list_unflat_take(drive.fpath_list, duplicate_idxs)
        # Check if any dups have been removed
        still_exists = ut.unflat_map(exists, unflat_fpaths)
        unflat_idxs2 = ut.zipcompress(duplicate_idxs, still_exists)
        duplicate_idxs = [idxs for idxs in unflat_idxs2 if len(idxs) > 1]
        # Look at duplicate files
        unflat_fpaths = ut.list_unflat_take(drive.fpath_list, duplicate_idxs)
        unflat_sizes = ut.list_unflat_take(drive.fpath_bytes_list, duplicate_idxs)
        # Find highly coupled directories
        if True:
            coupled_dirs = []
            for fpaths in unflat_fpaths:
                #basedir = ut.longest_existing_path(commonprefix(fpaths))
                dirs = sorted(list(map(dirname, fpaths)))
                _list = list(range(len(dirs)))
                idxs = ut.upper_diag_self_prodx(_list)
                coupled_dirs.extend(list(map(tuple, ut.list_unflat_take(dirs, idxs))))
            hist_ = ut.dict_hist(coupled_dirs)
            coupled_idxs = ut.list_argsort(hist_.values())[::-1]
            most_coupled = ut.take(list(hist_.keys()), coupled_idxs[0:100])
            print('Coupled fpaths: ' + ut.list_str(most_coupled, nl=True))
        print('%d unique files are duplicated' % (len(unflat_sizes),))
        #print('Duplicate sizes: ' + ut.list_str(unflat_sizes[0:10], nl=True))
        #print('Duplicate fpaths: ' + ut.list_str(unflat_fpaths[0:10], nl=True))
        #print('Duplicate fpaths: ' + ut.list_str(unflat_fpaths[0::5], nl=True))
        print('Duplicate fpaths: ' + ut.list_str(unflat_fpaths, nl=True))
        # Find duplicate directories
        dpath_list = list(drive.dpath_to_fidx.keys())
        fidxs_list = ut.dict_take(drive.dpath_to_fidx, drive.dpath_list)
        #exists_list = list(map(exists, drive.fpath_list))
        #unflat_exists = ut.list_unflat_take(exists_list, fidxs_list)
        fname_registry = [basename(fpath) for fpath in drive.fpath_list]
        unflat_fnames = ut.list_unflat_take(fname_registry, fidxs_list)
        def unsorted_list_hash(list_):
            return ut.hashstr27(str(sorted(list_)))
        unflat_fname_sets = list(map(unsorted_list_hash, ut.ProgIter(unflat_fnames, freq=10000)))
        fname_based_duplicate_dpaths = []
        multiindex_dict2_ = build_multindex(unflat_fname_sets)
        fname_based_duplicate_hashes = [key for key, val in multiindex_dict2_.items() if len(val) > 1]
        print('#fname_based_duplicate_dpaths = %r' % (len(fname_based_duplicate_hashes),))
        fname_based_duplicate_didxs = ut.dict_take(multiindex_dict2_, fname_based_duplicate_hashes)
        fname_based_duplicate_dpaths = ut.list_unflat_take(dpath_list, fname_based_duplicate_didxs)
        print(ut.repr3(fname_based_duplicate_dpaths[0:10]))
Ejemplo n.º 17
0
def show_all_colormaps():
    """
    Displays at a 90 degree angle. Weird

    FIXME: Remove call to pylab

    References:
        http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
        http://matplotlib.org/examples/color/colormaps_reference.html

    Notes:
        cmaps = [('Perceptually Uniform Sequential',
                            ['viridis', 'inferno', 'plasma', 'magma']),
         ('Sequential',     ['Blues', 'BuGn', 'BuPu',
                             'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd',
                             'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu',
                             'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']),
         ('Sequential (2)', ['afmhot', 'autumn', 'bone', 'cool',
                             'copper', 'gist_heat', 'gray', 'hot',
                             'pink', 'spring', 'summer', 'winter']),
         ('Diverging',      ['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr',
                             'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
                             'seismic']),
         ('Qualitative',    ['Accent', 'Dark2', 'Paired', 'Pastel1',
                             'Pastel2', 'Set1', 'Set2', 'Set3']),
         ('Miscellaneous',  ['gist_earth', 'terrain', 'ocean', 'gist_stern',
                             'brg', 'CMRmap', 'cubehelix',
                             'gnuplot', 'gnuplot2', 'gist_ncar',
                             'nipy_spectral', 'jet', 'rainbow',
                             'gist_rainbow', 'hsv', 'flag', 'prism'])
                             ]


    CommandLine:
        python -m plottool.color_funcs --test-show_all_colormaps --show
        python -m plottool.color_funcs --test-show_all_colormaps --show --type=Miscellaneous

    Example:
        >>> # DISABLE_DOCTEST
        >>> from plottool.color_funcs import *  # NOQA
        >>> import plottool as pt
        >>> show_all_colormaps()
        >>> pt.show_if_requested()
    """
    from matplotlib import pyplot as plt
    import pylab
    import numpy as np

    pylab.rc('text', usetex=False)
    TRANSPOSE = True
    a = np.outer(np.arange(0, 1, 0.01), np.ones(10))
    if TRANSPOSE:
        a = a.T
    pylab.figure(figsize=(10, 5))
    if TRANSPOSE:
        pylab.subplots_adjust(right=0.8, left=0.05, bottom=0.01, top=0.99)
    else:
        pylab.subplots_adjust(top=0.8, bottom=0.05, left=0.01, right=0.99)

    type_ =  ut.get_argval('--type', str, default=None)
    if type_ is None:
        maps = [m for m in pylab.cm.datad if not m.endswith("_r")]
        #maps += cmaps2.__all__
        maps.sort()
    else:
        maps = CMAP_DICT[type_]
        print('CMAP_DICT = %s' % (ut.repr3(CMAP_DICT),))

    l = len(maps) + 1
    for i, m in enumerate(maps):
        if TRANSPOSE:
            pylab.subplot(l, 1, i + 1)
        else:
            pylab.subplot(1, l, i + 1)

        #pylab.axis("off")
        ax = plt.gca()
        ax.set_xticks([])
        ax.set_yticks([])
        #try:
        cmap = pylab.get_cmap(m)
        #except Exception:
        #    cmap = getattr(cmaps2, m)

        pylab.imshow(a, aspect='auto', cmap=cmap)  # , origin="lower")
        if TRANSPOSE:
            ax.set_ylabel(m, rotation=0, fontsize=10,
                          horizontalalignment='right', verticalalignment='center')
        else:
            pylab.title(m, rotation=90, fontsize=10)
Ejemplo n.º 18
0
def dummy_example_depcacahe():
    r"""
    CommandLine:
        python -m dtool.example_depcache --exec-dummy_example_depcacahe

    Example:
        >>> # ENABLE_DOCTEST
        >>> from dtool.example_depcache import *  # NOQA
        >>> depc = dummy_example_depcacahe()
        >>> ut.show_if_requested()
    """
    fname = None
    # fname = 'dummy_default_depcache'
    fname = ':memory:'

    depc = testdata_depc(fname)

    tablename = 'fgweight'
    # print('[test] fgweight_path =\n%s' % (ut.repr3(depc.get_dependencies(tablename), nl=1),))
    # print('[test] keypoint =\n%s' % (ut.repr3(depc.get_dependencies('keypoint'), nl=1),))
    # print('[test] descriptor =\n%s' % (ut.repr3(depc.get_dependencies('descriptor'), nl=1),))
    # print('[test] spam =\n%s' % (ut.repr3(depc.get_dependencies('spam'), nl=1),))

    root_rowids = [5, 3]
    desc_rowids = depc.get_rowids('descriptor', root_rowids)  # NOQA

    table = depc[tablename]  # NOQA

    #example_getter_methods(depc, 'vsmany', root_rowids)
    # example_getter_methods(depc, 'chipmask', root_rowids)
    # example_getter_methods(depc, 'keypoint', root_rowids)
    # example_getter_methods(depc, 'chip', root_rowids)

    test_getters(depc)

    #import plottool as pt
    # pt.ensure_pylab_qt4()

    graph = depc.make_graph()  # NOQA
    #pt.show_nx(graph)

    print('---------- 111 -----------')

    # Try testing the algorithm
    req = depc.new_request('vsmany', root_rowids, root_rowids, {})
    print('req = %r' % (req,))
    req.execute()

    print('---------- 222 -----------')

    cfgdict = {'sver_on': False}
    req = depc.new_request('vsmany', root_rowids, root_rowids, cfgdict)
    req.execute()

    print('---------- 333 -----------')

    cfgdict = {'sver_on': False, 'adapt_shape': False}
    req = depc.new_request('vsmany', root_rowids, root_rowids, cfgdict)
    req.execute()

    print('---------- 444 -----------')

    req = depc.new_request('vsmany', root_rowids, root_rowids, {})
    req.execute()

    #ut.InstanceList(
    db = list(depc.fname_to_db.values())[0]
    #db_list = ut.InstanceList(depc.fname_to_db.values())
    #db_list.print_table_csv('config', exclude_columns='config_strid')

    print('config table')
    column_list, column_names = db.get_table_column_data(tablename,
                                                         ['config_strid'])
    print('\n'.join([ut.hz_str(*list(ut.interleave((r, [', '] * (len(r) - 1)))))
                     for r in list(zip(*[[ut.repr3(r, nl=2) for r in col] for col in column_list]))]))

    return depc
Ejemplo n.º 19
0
def postload_commands(ibs, back):
    """
    Postload commands deal with a specific ibeis database

    ibeis --db PZ_MTEST --occur "*All Images" --query 1
    ibeis --db PZ_MTEST --occur "*All Images" --query-intra

    """
    if ut.NOT_QUIET:
        print('\n[main_cmd] postload_commands')
    if params.args.view_database_directory:
        print('got arg --vdd')
        vdd(ibs)
    if params.args.set_default_dbdir:
        sysres.set_default_dbdir(ibs.get_dbdir())
    if params.args.update_query_cfg is not None:
        # Set query parameters from command line using the --cfg flag
        cfgdict = ut.parse_cfgstr_list(params.args.update_query_cfg)
        print('Custom cfgdict specified')
        print(ut.dict_str(cfgdict))
        ibs.update_query_cfg(**cfgdict)
        #print(ibs.cfg.query_cfg.get_cfgstr())
    if params.args.edit_notes:
        ut.editfile(ibs.get_dbnotes_fpath(ensure=True))
    if params.args.delete_cache:
        ibs.delete_cache()
    if params.args.delete_cache_complete:
        ibs.delete_cache(delete_chips=True, delete_imagesets=True)
    if params.args.delete_query_cache:
        ibs.delete_qres_cache()
    if params.args.set_all_species is not None:
        ibs._overwrite_all_annot_species_to(params.args.set_all_species)
    if params.args.dump_schema:
        ibs.db.print_schema()
    # DEPRICATE
    if params.args.set_notes is not None:
        ibs.set_dbnotes(params.args.set_notes)
    if params.args.set_aids_as_hard is not None:
        aid_list = params.args.set_aids_as_hard
        ibs.set_annot_is_hard(aid_list, [True] * len(aid_list))
    #/DEPRICATE

    if ut.get_argflag('--ipynb'):
        back.launch_ipy_notebook()

    select_imgsetid = ut.get_argval(('--select-imgsetid', '--imgsetid', '--occur'), None)
    if select_imgsetid is not None:
        print('\n+ --- CMD SELECT EID=%r ---' % (select_imgsetid,))
        # Whoa: this doesnt work. weird.
        #back.select_imgsetid(select_imgsetid)
        # This might be the root of gui problems
        #back.front._change_imageset(select_imgsetid)
        back.front.select_imageset_tab(select_imgsetid)
        print('L ___ CMD SELECT EID=%r ___\n' % (select_imgsetid,))
    # Send commands to GUIBack
    if params.args.select_aid is not None:
        if back is not None:
            try:
                ibsfuncs.assert_valid_aids(ibs, (params.args.select_aid,))
            except AssertionError:
                print('Valid RIDs are: %r' % (ibs.get_valid_aids(),))
                raise
            back.select_aid(params.args.select_aid)
    if params.args.select_gid is not None:
        back.select_gid(params.args.select_gid)
    if params.args.select_nid is not None:
        back.select_nid(params.args.select_nid)

    select_name = ut.get_argval('--select-name')
    if select_name is not None:
        import ibeis.gui.guiheaders as gh
        back.ibswgt.select_table_indicies_from_text(gh.NAMES_TREE, select_name,
                                                    allow_table_change=True)

    if ut.get_argflag(('--intra-occur-query', '--query-intra-occur', '--query-intra')):
        back.special_query_funcs['intra_occurrence'](cfgdict={'use_k_padding': False})

    qaid_list = ut.get_argval(('--query-aid', '--query'), type_=list, default=None)

    if qaid_list is not None:
        #qaid_list = params.args.query_aid
        # fix stride case
        if len(qaid_list) == 1 and isinstance(qaid_list[0], tuple):
            qaid_list = list(qaid_list[0])
        daids_mode = ut.get_argval('--daids-mode', type_=str, default=const.VS_EXEMPLARS_KEY)
        back.compute_queries(qaid_list=qaid_list, daids_mode=daids_mode, ranks_lt=10)

    if ut.get_argflag('--inc-query'):
        back.incremental_query()

    if ut.get_argflag(('--dbinfo', '--display_dbinfo')):
        back.display_dbinfo()
        pass

    aidcmd = ut.get_argval('--aidcmd', default=None)
    aid = ut.get_argval('--aid', type_=int, default=1)
    if aidcmd:
        #aidcmd = 'Interact image'
        metadata = ibs.get_annot_lazy_dict(aid)
        annot_context_options = metadata['annot_context_options']
        aidcmd_dict = dict(annot_context_options)
        print('aidcmd_dict = %s' % (ut.repr3(aidcmd_dict),))
        command = aidcmd_dict[aidcmd]
        command()
        #import utool
        #utool.embed()
        #back.start_web_server_parallel()

    if ut.get_argflag('--start-web'):
        back.start_web_server_parallel()

    screengrab_fpath = ut.get_argval('--screengrab')
    if screengrab_fpath:
        from guitool.__PYQT__.QtGui import QPixmap
        from PyQt4.QtTest import QTest
        from PyQt4.QtCore import Qt
        fpath = ut.truepath(screengrab_fpath)
        import guitool
        #ut.embed()
        timer2 = guitool.__PYQT__.QtCore.QTimer()
        done = [1000]
        def delayed_screenshot_func():
            if done[0] == 500:
                #back.mainwin.menubar.triggered.emit(back.mainwin.menuFile)
                print('Mouseclick')
                QTest.mouseClick(back.mainwin.menuFile, Qt.LeftButton)
                # This works
                #QTest.mouseClick(back.front.import_button, Qt.LeftButton)
            if done[0] == 1:
                timer2.stop()
                print('screengrab to %r' % (fpath,))
                screenimg = QPixmap.grabWindow(back.mainwin.winId())
                screenimg.save(fpath, 'jpg')
                ut.startfile(fpath)
                print('lub dub2')
            done[0] -= 1
            return None
        CLICK_FILE_MENU = True
        if CLICK_FILE_MENU:
            #ut.embed()
            #QTest::keyClick(menu, Qt::Key_Down)
            pass
        timer2.delayed_screenshot_func = delayed_screenshot_func
        timer2.timeout.connect(timer2.delayed_screenshot_func)
        timer2.start(1)
        back.mainwin.timer2 = timer2
        guitool.activate_qwindow(back.mainwin)
        #QPixmap.grabWindow(back.mainwin.winId()).save(fpath, 'jpg')
        #ut.startfile(fpath)
        #ut.embed()
        pass

    if params.args.postload_exit:
        print('[main_cmd] postload exit')
        sys.exit(0)
Ejemplo n.º 20
0
def show_all_colormaps():
    """
    Displays at a 90 degree angle. Weird

    FIXME: Remove call to pylab

    References:
        http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
        http://matplotlib.org/examples/color/colormaps_reference.html

    Notes:
        cmaps = [('Perceptually Uniform Sequential',
                            ['viridis', 'inferno', 'plasma', 'magma']),
         ('Sequential',     ['Blues', 'BuGn', 'BuPu',
                             'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd',
                             'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu',
                             'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']),
         ('Sequential (2)', ['afmhot', 'autumn', 'bone', 'cool',
                             'copper', 'gist_heat', 'gray', 'hot',
                             'pink', 'spring', 'summer', 'winter']),
         ('Diverging',      ['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr',
                             'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
                             'seismic']),
         ('Qualitative',    ['Accent', 'Dark2', 'Paired', 'Pastel1',
                             'Pastel2', 'Set1', 'Set2', 'Set3']),
         ('Miscellaneous',  ['gist_earth', 'terrain', 'ocean', 'gist_stern',
                             'brg', 'CMRmap', 'cubehelix',
                             'gnuplot', 'gnuplot2', 'gist_ncar',
                             'nipy_spectral', 'jet', 'rainbow',
                             'gist_rainbow', 'hsv', 'flag', 'prism'])
                             ]


    CommandLine:
        python -m wbia.plottool.color_funcs --test-show_all_colormaps --show
        python -m wbia.plottool.color_funcs --test-show_all_colormaps --show --type=Miscellaneous
        python -m wbia.plottool.color_funcs --test-show_all_colormaps --show --cmap=RdYlBu

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.plottool.color_funcs import *  # NOQA
        >>> import wbia.plottool as pt
        >>> show_all_colormaps()
        >>> pt.show_if_requested()
    """
    from matplotlib import pyplot as plt
    import pylab
    import numpy as np

    pylab.rc('text', usetex=False)
    TRANSPOSE = True
    a = np.outer(np.arange(0, 1, 0.01), np.ones(10))
    if TRANSPOSE:
        a = a.T
    pylab.figure(figsize=(10, 5))
    if TRANSPOSE:
        pylab.subplots_adjust(right=0.8, left=0.05, bottom=0.01, top=0.99)
    else:
        pylab.subplots_adjust(top=0.8, bottom=0.05, left=0.01, right=0.99)

    type_ = ut.get_argval('--type', str, default=None)
    if type_ is None:
        maps = [m for m in pylab.cm.datad if not m.endswith('_r')]
        # maps += cmaps2.__all__
        maps.sort()
    else:
        maps = CMAP_DICT[type_]
        print('CMAP_DICT = %s' % (ut.repr3(CMAP_DICT), ))

    cmap_ = ut.get_argval('--cmap', default=None)
    if cmap_ is not None:
        maps = [getattr(plt.cm, cmap_)]

    length = len(maps) + 1
    for i, m in enumerate(maps):
        if TRANSPOSE:
            pylab.subplot(length, 1, i + 1)
        else:
            pylab.subplot(1, length, i + 1)

        # pylab.axis("off")
        ax = plt.gca()
        ax.set_xticks([])
        ax.set_yticks([])
        # try:
        cmap = pylab.get_cmap(m)
        # except Exception:
        #    cmap = getattr(cmaps2, m)

        pylab.imshow(a, aspect='auto', cmap=cmap)  # , origin="lower")
        if TRANSPOSE:
            ax.set_ylabel(
                m,
                rotation=0,
                fontsize=10,
                horizontalalignment='right',
                verticalalignment='center',
            )
        else:
            pylab.title(m, rotation=90, fontsize=10)
Ejemplo n.º 21
0
 def __repr__(group):
     repr_ = ut.repr3(group.infohist, nl=1)
     print(repr_)
     return repr_
Ejemplo n.º 22
0
def compare_featscores():
    """
    CommandLine:

        ibeis --tf compare_featscores  --db PZ_MTEST \
            --nfscfg :disttype=[L2_sift,lnbnn],top_percent=[None,.5,.1] -a timectrl \
            -p default:K=[1,2],normalizer_rule=name \
            --save featscore{db}.png --figsize=13,20 --diskshow

        ibeis --tf compare_featscores  --db PZ_MTEST \
            --nfscfg :disttype=[L2_sift,normdist,lnbnn],top_percent=[None,.5] -a timectrl \
            -p default:K=[1],normalizer_rule=name,sv_on=[True,False] \
            --save featscore{db}.png --figsize=13,10 --diskshow

        ibeis --tf compare_featscores --nfscfg :disttype=[L2_sift,normdist,lnbnn] \
            -a timectrl -p default:K=1,normalizer_rule=name --db PZ_Master1 \
            --save featscore{db}.png  --figsize=13,13 --diskshow

        ibeis --tf compare_featscores --nfscfg :disttype=[L2_sift,normdist,lnbnn] \
            -a timectrl -p default:K=1,normalizer_rule=name --db GZ_ALL \
            --save featscore{db}.png  --figsize=13,13 --diskshow

        ibeis --tf compare_featscores  --db GIRM_Master1 \
            --nfscfg ':disttype=fg,L2_sift,normdist,lnbnn' \
            -a timectrl -p default:K=1,normalizer_rule=name \
            --save featscore{db}.png  --figsize=13,13

        ibeis --tf compare_featscores --nfscfg :disttype=[L2_sift,normdist,lnbnn] \
            -a timectrl -p default:K=[1,2,3],normalizer_rule=name,sv_on=False \
            --db PZ_Master1 --save featscore{db}.png  \
                --dpi=128 --figsize=15,20 --diskshow

        ibeis --tf compare_featscores --show --nfscfg :disttype=[L2_sift,normdist] -a timectrl -p :K=1 --db PZ_MTEST
        ibeis --tf compare_featscores --show --nfscfg :disttype=[L2_sift,normdist] -a timectrl -p :K=1 --db GZ_ALL
        ibeis --tf compare_featscores --show --nfscfg :disttype=[L2_sift,normdist] -a timectrl -p :K=1 --db PZ_Master1
        ibeis --tf compare_featscores --show --nfscfg :disttype=[L2_sift,normdist] -a timectrl -p :K=1 --db GIRM_Master1

        ibeis --tf compare_featscores  --db PZ_MTEST \
            --nfscfg :disttype=[L2_sift,normdist,lnbnn],top_percent=[None,.5,.2] -a timectrl \
            -p default:K=[1],normalizer_rule=name \
            --save featscore{db}.png --figsize=13,20 --diskshow

        ibeis --tf compare_featscores  --db PZ_MTEST \
            --nfscfg :disttype=[L2_sift,normdist,lnbnn],top_percent=[None,.5,.2] -a timectrl \
            -p default:K=[1],normalizer_rule=name \
            --save featscore{db}.png --figsize=13,20 --diskshow

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.scorenorm import *  # NOQA
        >>> result = compare_featscores()
        >>> print(result)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    import plottool as pt
    import ibeis
    nfs_cfg_list = NormFeatScoreConfig.from_argv_cfgs()
    learnkw = {}
    ibs, testres = ibeis.testdata_expts(
        defaultdb='PZ_MTEST', a=['default'], p=['default:K=1'])
    print('nfs_cfg_list = ' + ut.repr3(nfs_cfg_list))

    encoder_list = []
    lbl_list = []

    varied_nfs_lbls = ut.get_varied_cfg_lbls(nfs_cfg_list)
    varied_qreq_lbls = ut.get_varied_cfg_lbls(testres.cfgdict_list)
    #varies_qreq_lbls

    #func = ut.cached_func(cache_dir='.')(learn_featscore_normalizer)
    for datakw, nlbl in zip(nfs_cfg_list, varied_nfs_lbls):
        for qreq_, qlbl in zip(testres.cfgx2_qreq_, varied_qreq_lbls):
            lbl = qlbl + ' ' + nlbl
            cfgstr = '_'.join([datakw.get_cfgstr(), qreq_.get_full_cfgstr()])
            try:
                encoder = vt.ScoreNormalizer()
                encoder.load(cfgstr=cfgstr)
            except IOError:
                print('datakw = %r' % (datakw,))
                encoder = learn_featscore_normalizer(qreq_, datakw, learnkw)
                encoder.save(cfgstr=cfgstr)
            encoder_list.append(encoder)
            lbl_list.append(lbl)

    fnum = 1
    # next_pnum = pt.make_pnum_nextgen(nRows=len(encoder_list), nCols=3)
    next_pnum = pt.make_pnum_nextgen(nRows=len(encoder_list) + 1, nCols=3, start=3)

    iconsize = 94
    if len(encoder_list) > 3:
        iconsize = 64

    icon = qreq_.ibs.get_database_icon(max_dsize=(None, iconsize), aid=qreq_.qaids[0])
    score_range = (0, .6)
    for encoder, lbl in zip(encoder_list, lbl_list):
        #encoder.visualize(figtitle=encoder.get_cfgstr(), with_prebayes=False, with_postbayes=False)
        encoder._plot_score_support_hist(fnum, pnum=next_pnum(), titlesuf='\n' + lbl, score_range=score_range)
        encoder._plot_prebayes(fnum, pnum=next_pnum())
        encoder._plot_roc(fnum, pnum=next_pnum())
        if icon is not None:
            pt.overlay_icon(icon, coords=(1, 0), bbox_alignment=(1, 0))

    nonvaried_lbl = ut.get_nonvaried_cfg_lbls(nfs_cfg_list)[0]
    figtitle = qreq_.__str__() + '\n' + nonvaried_lbl

    pt.set_figtitle(figtitle)
    pt.adjust_subplots(hspace=.5, top=.92, bottom=.08, left=.1, right=.9)
    pt.update_figsize()
    pt.plt.tight_layout()
Ejemplo n.º 23
0
    for repo_dpath in IBEIS_REPO_DIRS:
        # ut.getp_
        mod_dpaths = ut.get_submodules_from_dpath(repo_dpath, recursive=False,
                                                  only_packages=True)
        modname_list = ut.lmap(ut.get_modname_from_modpath, mod_dpaths)
        print('Checking modules = %r' % (modname_list,))

        for modname in modname_list:
            try:
                ut.import_modname(modname)
                print(modname + ' success')
            except ImportError as ex:
                failures += [modname]
                print(modname + ' failure')

    print('failures = %s' % (ut.repr3(failures),))
    # print('repo_dpath = %r' % (repo_dpath,))
    # print('modules = %r' % (modules,))

    # import ibeis
    # print('found ibeis=%r' % (ibeis,))

if GET_ARGFLAG('--push'):
    ut.gg_command('git push')


commit_msg = GET_ARGVAL('--commit', type_=str, default=None)
if commit_msg is not None:
    ut.gg_command('git commit -am "{commit_msg}"'.format(**locals()))

if GET_ARGFLAG('--clean'):
Ejemplo n.º 24
0
 def basic_infostr(card):
     basic_infostr_ = ut.repr3(card.basic_infodict, nl=1)
     return basic_infostr_
Ejemplo n.º 25
0
 def infostr(card):
     infostr_ = ut.repr3(card.infodict, nl=1)
     return infostr_
Ejemplo n.º 26
0
def detect_opencv_keypoints():
    import cv2
    import vtool as vt
    import numpy as np  # NOQA

    #img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='lena.png'))
    img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='zebra.png'))
    imgBGR = vt.imread(img_fpath)
    imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY)

    def from_cv2_kpts(cv2_kp):
        kp = (cv2_kp.pt[0], cv2_kp.pt[1], cv2_kp.size, 0, cv2_kp.size, cv2_kp.angle)
        return kp

    print('\n'.join(ut.search_module(cv2, 'create', recursive=True)))

    detect_factory = {
        #'BLOB': cv2.SimpleBlobDetector_create,
        #'HARRIS' : HarrisWrapper.create,
        #'SIFT': cv2.xfeatures2d.SIFT_create,  # really DoG
        'SURF': cv2.xfeatures2d.SURF_create,  # really harris corners
        'MSER': cv2.MSER_create,
        #'StarDetector_create',

    }

    extract_factory = {
        'SIFT': cv2.xfeatures2d.SIFT_create,
        'SURF': cv2.xfeatures2d.SURF_create,
        #'DAISY': cv2.xfeatures2d.DAISY_create,
        'FREAK': cv2.xfeatures2d.FREAK_create,
        #'LATCH': cv2.xfeatures2d.LATCH_create,
        #'LUCID': cv2.xfeatures2d.LUCID_create,
        #'ORB': cv2.ORB_create,
    }
    mask = None

    type_to_kpts = {}
    type_to_desc = {}

    key = 'BLOB'
    key = 'MSER'

    for key in detect_factory.keys():
        factory = detect_factory[key]
        extractor = factory()

        # For MSERS need to adapt shape and then convert into a keypoint repr
        if hasattr(extractor, 'detectRegions'):
            # bboxes are x,y,w,h
            regions, bboxes = extractor.detectRegions(imgGray)
            # ellipse definition from [Fitzgibbon95]
            # http://www.bmva.org/bmvc/1995/bmvc-95-050.pdf p518
            # ell = [c_x, c_y, R_x, R_y, theta]
            # (cx, cy) = conic center
            # Rx and Ry = conic radii
            # theta is the counterclockwise angle
            fitz_ellipses = [cv2.fitEllipse(mser) for mser in regions]

            # http://answers.opencv.org/question/19015/how-to-use-mser-in-python/
            #hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
            #hull_ells = [cv2.fitEllipse(hull[:, 0]) for hull in hulls]
            kpts_ = []
            for ell in fitz_ellipses:
                ((cx, cy), (rx, ry), degrees) = ell
                theta = np.radians(degrees)  # opencv lives in radians
                S = vt.scale_mat3x3(rx, ry)
                T = vt.translation_mat3x3(cx, cy)
                R = vt.rotation_mat3x3(theta)
                #R = np.eye(3)
                invVR = T.dot(R.dot(S))
                kpt = vt.flatten_invV_mats_to_kpts(np.array([invVR]))[0]
                kpts_.append(kpt)
            kpts_ = np.array(kpts_)

        tt = ut.tic('Computing %r keypoints' % (key,))
        try:
            cv2_kpts = extractor.detect(imgGray, mask)
        except Exception as ex:
            ut.printex(ex, 'Failed to computed %r keypoints' % (key,), iswarning=True)
            pass
        else:
            ut.toc(tt)
            type_to_kpts[key] = cv2_kpts

    print(list(type_to_kpts.keys()))
    print(ut.depth_profile(list(type_to_kpts.values())))
    print('type_to_kpts = ' + ut.repr3(type_to_kpts, truncate=True))

    cv2_kpts = type_to_kpts['MSER']
    kp = cv2_kpts[0]  # NOQA
    #cv2.fitEllipse(cv2_kpts[0])
    cv2_kpts = type_to_kpts['SURF']

    for key in extract_factory.keys():
        factory = extract_factory[key]
        extractor = factory()
        tt = ut.tic('Computing %r descriptors' % (key,))
        try:
            filtered_cv2_kpts, desc = extractor.compute(imgGray, cv2_kpts)
        except Exception as ex:
            ut.printex(ex, 'Failed to computed %r descriptors' % (key,), iswarning=True)
            pass
        else:
            ut.toc(tt)
            type_to_desc[key] = desc

    print(list(type_to_desc.keys()))
    print(ut.depth_profile(list(type_to_desc.values())))
    print('type_to_desc = ' + ut.repr3(type_to_desc, truncate=True))
Ejemplo n.º 27
0
                def translated_call(**kwargs):
                    def html_newlines(text):
                        r = '<br />\n'
                        text = text.replace(' ', '&nbsp;')
                        text = text.replace('\r\n', r).replace('\n\r', r).replace('\r', r).replace('\n', r)
                        return text

                    __format__ = False  # Default __format__ value
                    ignore_cookie_set = False
                    try:
                        #print('Processing: %r with args: %r and kwargs: %r' % (func, args, kwargs, ))
                        # Pipe web input into Python web call
                        kwargs2 = _process_input(flask.request.args)
                        kwargs3 = _process_input(flask.request.form)
                        kwargs.update(kwargs2)
                        kwargs.update(kwargs3)
                        jQuery_callback = None
                        if 'callback' in kwargs and 'jQuery' in kwargs['callback']:
                            jQuery_callback = str(kwargs.pop('callback', None))
                            kwargs.pop('_', None)

                        #print('KWARGS:  %s' % (kwargs, ))
                        #print('COOKIES: %s' % (request.cookies, ))
                        __format__ = request.cookies.get('__format__', None)
                        __format__ = kwargs.pop('__format__', __format__)
                        if __format__ is not None:
                            __format__ = str(__format__).lower()
                            ignore_cookie_set = __format__ in ['onetime', 'true']
                            __format__ = __format__ in ['true', 'enabled', 'enable']

                        resp_tup = translate_ibeis_webcall(func, **kwargs)
                        rawreturn, success, code, message = resp_tup
                    except WebException as webex:
                        ut.printex(webex)
                        rawreturn = webex.get_rawreturn(
                            DEBUG_PYTHON_STACK_TRACE_JSON_RESPONSE)
                        success = False
                        code = webex.code
                        message = webex.message
                        jQuery_callback = None
                    except Exception as ex:
                        ut.printex(ex)
                        rawreturn = ''
                        if DEBUG_PYTHON_STACK_TRACE_JSON_RESPONSE:
                            rawreturn = str(traceback.format_exc())
                        success = False
                        code = 500
                        errmsg = str(ex)
                        message = 'API error, Python Exception thrown: %s' % (errmsg)
                        if "'int' object is not iterable" in message:
                            rawreturn = (
                                'HINT: the input for this call is most likely '
                                'expected to be a list.  Try adding a comma at '
                                'the end of the input (to cast the conversion '
                                'into a list) or encapsualte the input with '
                                '[].')
                        jQuery_callback = None

                    #print('RECEIVED FORMAT: %r' % (__format__, ))

                    if __format__:
                        # Hack for readable error messages
                        webreturn = translate_ibeis_webreturn(
                            rawreturn, success, code, message, jQuery_callback)
                        webreturn = ut.repr3(ut.from_json(webreturn), strvals=True)

                        try:
                            from ansi2html import Ansi2HTMLConverter
                            conv = Ansi2HTMLConverter()
                            webreturn = conv.convert(webreturn)
                        except ImportError as ex:
                            ut.printex(ex, 'pip install ansi2html', iswarning=True)
                            webreturn = ut.strip_ansi(webreturn)
                            webreturn = '<p><samp>\n' + html_newlines(webreturn) + '\n</samp></p>'
                            webreturn = '<meta http-equiv="Content-Type" content="text/html;charset=ISO-8859-8">\n' + webreturn

                        def get_func_href(funcname):
                            url = 'http://' + request.environ['HTTP_HOST'] + flask.url_for(funcname) + '?__format__=True'
                            return '<a href="{url}">{url}</a>'.format(url=url)

                        if not success:
                            webreturn += '<pre>See logs for details: %s</pre>' % get_func_href('get_current_log_text')
                            webreturn += '<pre>Might also look into db_info: %s</pre>' % get_func_href('get_dbinfo')
                    else:
                        webreturn = translate_ibeis_webreturn(
                            rawreturn, success, code, message, jQuery_callback)
                        webreturn = ut.strip_ansi(webreturn)

                    resp = flask.make_response(webreturn, code)
                    if not ignore_cookie_set:
                        if __format__:
                            resp.set_cookie('__format__', 'enabled')
                        else:
                            resp.set_cookie('__format__', '', expires=0)

                    return resp
Ejemplo n.º 28
0
    def detect(rf, forest, input_gpath_list, **kwargs):
        """
            Run detection with a given loaded forest on a list of images

            Args:
                forest (object): the forest obejct that you want to use during
                    detection
                input_gpath_list (list of str): the list of image paths that you want
                    to test

            Kwargs:
                output_gpath_list (list of str, optional): the paralell list of output
                    image paths for detection debugging or results; defaults to None

                    When this list is None no images are outputted for any test
                    images, whereas the list can be a parallel list where some values
                    are strings and others are None
                output_scale_gpath_list (list of str, optional): the paralell list of output
                    scale image paths for detection debugging or results; defaults
                    to None

                    When this list is None no images are outputted for any test
                    images, whereas the list can be a parallel list where some values
                    are strings and others are None
                mode (int, optional): the mode that the detector outputs; detaults to 0
                    0 - Hough Voting - the output is a Hough image that predicts the
                        locations of the obejct centeroids
                    0 - Classification Map - the output is a classification probability
                        map across the entire image where no regression information
                        is utilized
                sensitivity (float, optional): the sensitivity of the detector;

                        mode = 0 - defaults to 128.0
                        mode = 1 - defaults to 255.0

                scale_list (list of float, optional): the list of floats that specifies the scales
                    to try during testing;
                    defaults to [1.0, 0.80, 0.65, 0.50, 0.40, 0.30, 0.20, 0.10]

                        scale > 1.0 - Upscale the image
                        scale = 1.0 - Original image size
                        scale < 1.0 - Downscale the image

                    The list of scales highly impacts the performance of the detector and
                    should be carefully chosen

                    The scales are applied to BOTH the width and the height of the image
                    in order to scale the image and an interpolation of OpenCV's
                    CV_INTER_LANCZOS4 is used
                batch_size (int, optional): the number of images to test at a single
                    time in paralell (if None, the number of CPUs is used); defaults to None
                nms_min_area_contour (int, optional): the minimum size of a centroid
                    candidate region; defaults to 300
                nms_min_area_overlap (float, optional, DEPRICATED): the allowable overlap in
                    bounding box predictions; defaults to 0.75
                serial (bool, optional): flag to signify if to run detection in serial;

                        len(input_gpath_list) >= batch_size - defaults to False
                        len(input_gpath_list) <  batch_size - defaults to False

                verbose (bool, optional): verbose flag; defaults to object's verbose or
                    selectively enabled for this function

            Yields:
                (str, (list of dict)): tuple of the input image path and a list
                    of dictionaries specifying the detected bounding boxes

                    The dictionaries returned by this function are of the form:
                        centerx (int): the x position of the object's centroid

                            Note that the center of the bounding box and the location of
                            the object's centroid can be different
                        centery (int): the y position of the obejct's centroid

                            Note that the center of the bounding box and the location of
                            the object's centroid can be different
                        xtl (int): the top left x position of the bounding box
                        ytl (int): the top left y position of the bounding box
                        width (int): the width of the bounding box
                        height (int): the hiehgt of the bounding box
                        confidence (float): the confidence that this bounding box is of
                            the class specified by the trees used during testing
                        suppressed (bool, DEPRICATED): the flag of if this bounding
                            box has been marked to be suppressed by the detection
                            algorithm

        """
        # Default values
        params = odict([
            ('output_gpath_list',            None),
            ('output_scale_gpath_list',      None),
            ('mode',                         0),
            ('sensitivity',                  None),
            ('scale_list',                   [1.0, 0.80, 0.65, 0.50, 0.40, 0.30, 0.20, 0.10]),
            ('_scale_num',                   None),  # This value always gets overwritten
            ('batch_size',                   None),
            ('nms_min_area_contour',         100),
            ('nms_min_area_overlap',         0.75),
            ('results_val_array',            None),  # This value always gets overwritten
            ('results_len_array',            None),  # This value always gets overwritten
            ('RESULT_LENGTH',                None),  # This value always gets overwritten
            ('serial',                       False),
            ('verbose',                      rf.verbose),
            ('quiet',                        rf.quiet),
        ])

        ut.update_existing(params, kwargs)
        #print('Unused kwargs %r' % (set(kwargs.keys()) - set(params.keys()),))

        params['RESULT_LENGTH'] = RESULT_LENGTH
        output_gpath_list = params['output_gpath_list']
        output_scale_gpath_list = params['output_scale_gpath_list']
        # We no longer want these parameters in params
        del params['output_gpath_list']
        del params['output_scale_gpath_list']

        if params['sensitivity'] is None:
            assert params['mode'] in [0, 1], 'Invalid mode provided'
            if params['mode'] == 0:
                params['sensitivity'] = 128.0
            elif params['mode'] == 1:
                params['sensitivity'] = 255.0

        # Try to determine the parallel processing batch size
        if params['batch_size'] is None:
            try:
                cpu_count = multiprocessing.cpu_count()
                if not params['quiet']:
                    print('[pyrf py] Detecting with %d CPUs' % (cpu_count, ))
                params['batch_size'] = cpu_count
            except:
                params['batch_size'] = 8

        # To eleminate downtime, add 1 to batch_size
        # params['batch_size'] +=

        # Data integrity
        assert params['mode'] >= 0, \
            'Detection mode must be non-negative'
        assert 0.0 <= params['sensitivity'], \
            'Sensitivity must be non-negative'
        assert len(params['scale_list']) > 0 , \
            'The scale list cannot be empty'
        assert all( [ scale > 0.0 for scale in params['scale_list'] ]), \
            'All scales must be positive'
        assert params['batch_size'] > 0, \
            'Batch size must be positive'
        assert params['nms_min_area_contour'] > 0, \
            'Non-maximum suppression minimum contour area cannot be negative'
        assert 0.0 <= params['nms_min_area_overlap'] and params['nms_min_area_overlap'] <= 1.0, \
            'Non-maximum supression minimum area overlap percentage must be between 0 and 1 (inclusive)'

        # Convert optional parameters to C-valid default options
        if output_gpath_list is None:
            output_gpath_list = [''] * len(input_gpath_list)
        elif output_gpath_list is not None:
            assert len(output_gpath_list) == len(input_gpath_list), \
                'Output image path list is invalid or is not the same length as the input list'
            for index in range(len(output_gpath_list)):
                if output_gpath_list[index] is None:
                    output_gpath_list[index] = ''
        output_gpath_list = _cast_list_to_c(ensure_bytes_strings(output_gpath_list), C_CHAR)

        if output_scale_gpath_list is None:
            output_scale_gpath_list = [''] * len(input_gpath_list)
        elif output_scale_gpath_list is not None:
            assert len(output_scale_gpath_list) == len(input_gpath_list), \
                'Output scale image path list is invalid or is not the same length as the input list'
            for index in range(len(output_scale_gpath_list)):
                if output_scale_gpath_list[index] is None:
                    output_scale_gpath_list[index] = ''
        output_scale_gpath_list = _cast_list_to_c(ensure_bytes_strings(output_scale_gpath_list), C_CHAR)

        # Prepare for C
        params['_scale_num'] = len(params['scale_list'])
        params['scale_list'] = _cast_list_to_c(params['scale_list'], C_FLOAT)
        if not params['quiet']:
            print('[pyrf py] Detecting over %d scales' % (params['_scale_num'], ))

        # Run training algorithm
        batch_size = params['batch_size']
        del params['batch_size']  # Remove this value from params
        batch_num = int(len(input_gpath_list) / batch_size) + 1
        # Detect for each batch
        for batch in ut.ProgressIter(range(batch_num), lbl="[pyrf py]", freq=1, invert_rate=True):
            begin = time.time()
            start = batch * batch_size
            end   = start + batch_size
            if end > len(input_gpath_list):
                end = len(input_gpath_list)
            input_gpath_list_        = input_gpath_list[start:end]
            output_gpath_list_       = output_gpath_list[start:end]
            output_scale_gpath_list_ = output_scale_gpath_list[start:end]
            num_images = len(input_gpath_list_)
            # Set image detection to be run in serial if less than half a batch to run
            if num_images < min(batch_size / 2, 8):
                params['serial'] = True
            # Final sanity check
            assert len(input_gpath_list_) == len(output_gpath_list_) and len(input_gpath_list_) == len(output_scale_gpath_list_)
            params['results_val_array'] = np.empty(num_images, dtype=NP_ARRAY_FLOAT)
            params['results_len_array'] = np.empty(num_images, dtype=C_INT)
            # Make the params_list
            params_list = [
                forest,
                _cast_list_to_c(ensure_bytes_strings(input_gpath_list_), C_CHAR),
                num_images,
                _cast_list_to_c(ensure_bytes_strings(output_gpath_list_), C_CHAR),
                _cast_list_to_c(ensure_bytes_strings(output_scale_gpath_list_), C_CHAR)
            ] + list(params.values())
            try:
                RF_CLIB.detect(rf.detector_c_obj, *params_list)
            except C.ArgumentError as ex:
                print('ERROR passing arguments to pyrf')
                print(' * params_list = %s' % (ut.repr3(params_list, nl=3),))
                ut.printex(ex)
            results_list = _extract_np_array(params['results_len_array'], params['results_val_array'], NP_ARRAY_FLOAT, NP_FLOAT32, RESULT_LENGTH)
            conclude = time.time()
            if not params['quiet']:
                print('[pyrf py] Took %r seconds to compute %d images' % (conclude - begin, num_images, ))
            for input_gpath, result_list in zip(input_gpath_list_, results_list):
                if params['mode'] == 0:
                    result_list_ = []
                    for result in result_list:
                        # Unpack result into a nice Python dictionary and return
                        temp = {}
                        temp['centerx']    = int(result[0])
                        temp['centery']    = int(result[1])
                        temp['xtl']        = int(result[2])
                        temp['ytl']        = int(result[3])
                        temp['width']      = int(result[4])
                        temp['height']     = int(result[5])
                        temp['confidence'] = float(np.round(result[6], decimals=4))
                        temp['suppressed'] = int(result[7]) == 1
                        result_list_.append(temp)
                    yield (input_gpath, result_list_)
                else:
                    yield (input_gpath, None)
            results_list = None
            params['results_val_array'] = None
            params['results_len_array'] = None
Ejemplo n.º 29
0
def learn_featscore_normalizer(qreq_, datakw={}, learnkw={}):
    r"""
    Takes the result of queries and trains a score encoder

    Args:
        qreq_ (ibeis.QueryRequest):  query request object with hyper-parameters

    Returns:
        vtool.ScoreNormalizer: encoder

    CommandLine:
        python -m ibeis --tf learn_featscore_normalizer --show -t default:
        python -m ibeis --tf learn_featscore_normalizer --show --fsvx=0 --threshx=1 --show
        python -m ibeis --tf learn_featscore_normalizer --show -a default:size=40 -t default:fg_on=False,lnbnn_on=False,ratio_thresh=1.0,K=1,Knorm=6,sv_on=False,normalizer_rule=name --fsvx=0 --threshx=1 --show

        python -m ibeis --tf learn_featscore_normalizer --show --disttype=ratio
        python -m ibeis --tf learn_featscore_normalizer --show --disttype=lnbnn
        python -m ibeis --tf learn_featscore_normalizer --show --disttype=L2_sift -t default:K=1

        python -m ibeis --tf learn_featscore_normalizer --show --disttype=L2_sift -a timectrl -t default:K=1 --db PZ_Master1
        python -m ibeis --tf learn_featscore_normalizer --show --disttype=ratio -a timectrl -t default:K=1 --db PZ_Master1
        python -m ibeis --tf learn_featscore_normalizer --show --disttype=lnbnn -a timectrl -t default:K=1 --db PZ_Master1

        # LOOK AT THIS
        python -m ibeis --tf learn_featscore_normalizer --show --disttype=normdist -a timectrl -t default:K=1 --db PZ_Master1
        #python -m ibeis --tf learn_featscore_normalizer --show --disttype=parzen -a timectrl -t default:K=1 --db PZ_Master1
        #python -m ibeis --tf learn_featscore_normalizer --show --disttype=norm_parzen -a timectrl -t default:K=1 --db PZ_Master1

        python -m ibeis --tf learn_featscore_normalizer --show --disttype=lnbnn --db PZ_Master1 -a timectrl -t best

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.algo.hots.scorenorm import *  # NOQA
        >>> import ibeis
        >>> learnkw = {}
        >>> datakw = NormFeatScoreConfig.from_argv_dict()
        >>> qreq_ = ibeis.testdata_qreq_(
        >>>     defaultdb='PZ_MTEST', a=['default'], p=['default'])
        >>> encoder = learn_featscore_normalizer(qreq_, datakw, learnkw)
        >>> ut.quit_if_noshow()
        >>> encoder.visualize(figtitle=encoder.get_cfgstr())
        >>> ut.show_if_requested()
    """
    cm_list = qreq_.execute()
    print('learning scorenorm')
    print('datakw = %s' % ut.repr3(datakw))
    tp_scores, tn_scores, scorecfg = get_training_featscores(
        qreq_, cm_list, **datakw)
    _learnkw = dict(monotonize=True, adjust=2)
    _learnkw.update(learnkw)
    encoder = vt.ScoreNormalizer(**_learnkw)
    encoder.fit_partitioned(tp_scores, tn_scores, verbose=False)
    # ut.hashstr27(qreq_.get_cfgstr())

    # Maintain regen command info: TODO: generalize and integrate
    encoder._regen_info = {
        'cmd': 'python -m ibeis --tf learn_featscore_normalizer',
        'scorecfg': scorecfg,
        'learnkw': learnkw,
        'datakw': datakw,
        'qaids': qreq_.qaids,
        'daids': qreq_.daids,
        'qreq_cfg': qreq_.get_full_cfgstr(),
        'qreq_regen_info': getattr(qreq_, '_regen_info', {}),
    }
    # 'timestamp': ut.get_printable_timestamp(),

    scorecfg_safe = scorecfg
    scorecfg_safe = re.sub('[' + re.escape('()= ') + ']', '', scorecfg_safe)
    scorecfg_safe = re.sub('[' + re.escape('+*<>[]') + ']', '_', scorecfg_safe)

    hashid = ut.hashstr27(ut.to_json(encoder._regen_info))
    naidinfo = ('q%s_d%s' % (len(qreq_.qaids), len(qreq_.daids)))
    cfgstr = 'featscore_{}_{}_{}_{}'.format(scorecfg_safe, qreq_.ibs.get_dbname(), naidinfo, hashid)
    encoder.cfgstr = cfgstr
    return encoder
Ejemplo n.º 30
0
def merge_level_order(level_orders, topsort):
    """
    Merge orders of individual subtrees into a total ordering for
    computation.

    >>> level_orders = {
    >>>     'multi_chip_multitest': [['dummy_annot'], ['chip'], ['multitest'],
    >>>         ['multitest_score'], ],
    >>>     'multi_fgweight_multitest': [ ['dummy_annot'], ['chip', 'probchip'],
    >>>         ['keypoint'], ['fgweight'], ['multitest'], ['multitest_score'], ],
    >>>     'multi_keypoint_nnindexer': [ ['dummy_annot'], ['chip'], ['keypoint'],
    >>>         ['nnindexer'], ['multitest'], ['multitest_score'], ],
    >>>     'normal': [ ['dummy_annot'], ['chip', 'probchip'], ['keypoint'],
    >>>         ['fgweight'], ['spam'], ['multitest'], ['multitest_score'], ],
    >>>     'nwise_notch_multitest_1': [ ['dummy_annot'], ['notch'], ['multitest'],
    >>>         ['multitest_score'], ],
    >>>     'nwise_notch_multitest_2': [ ['dummy_annot'], ['notch'], ['multitest'],
    >>>         ['multitest_score'], ],
    >>>     'nwise_notch_notchpair_1': [ ['dummy_annot'], ['notch'], ['notchpair'],
    >>>         ['multitest'], ['multitest_score'], ],
    >>>     'nwise_notch_notchpair_2': [ ['dummy_annot'], ['notch'], ['notchpair'],
    >>>         ['multitest'], ['multitest_score'], ],
    >>> }
    >>> topsort = [u'dummy_annot', u'notch', u'probchip', u'chip', u'keypoint',
    >>>            u'fgweight', u'nnindexer', u'spam', u'notchpair', u'multitest',
    >>>            u'multitest_score']
    >>> print(ut.repr3(ut.merge_level_order(level_orders, topsort)))

    EG2:
        level_orders = {u'normal': [[u'dummy_annot'], [u'chip', u'probchip'], [u'keypoint'], [u'fgweight'], [u'spam']]}
        topsort = [u'dummy_annot', u'probchip', u'chip', u'keypoint', u'fgweight', u'spam']
    """

    import utool as ut
    if False:
        compute_order = []
        level_orders = ut.map_dict_vals(ut.total_flatten, level_orders)
        level_sets = ut.map_dict_vals(set, level_orders)
        for tablekey in topsort:
            compute_order.append((tablekey, [groupkey for groupkey, set_ in level_sets.items() if tablekey in set_]))
        return compute_order
    else:
        # Do on common subgraph
        import itertools
        # Pointer to current level.: Start at the end and
        # then work your way up.
        main_ptr = len(topsort) - 1
        stack = []
        #from six.moves import zip_longest
        keys = list(level_orders.keys())
        type_to_ptr = {key: -1 for key in keys}
        print('level_orders = %s' % (ut.repr3(level_orders),))
        for count in itertools.count(0):
            print('----')
            print('count = %r' % (count,))
            ptred_levels = []
            for key in keys:
                levels = level_orders[key]
                ptr = type_to_ptr[key]
                try:
                    level = tuple(levels[ptr])
                except IndexError:
                    level = None
                ptred_levels.append(level)
            print('ptred_levels = %r' % (ptred_levels,))
            print('main_ptr = %r' % (main_ptr,))
            # groupkeys, groupxs = ut.group_indices(ptred_levels)
            # Group keys are tablenames
            # They point to the (type) of the input
            # num_levelkeys = len(ut.total_flatten(ptred_levels))
            groupkeys, groupxs = ut.group_indices(ptred_levels)
            main_idx = None
            while main_idx is None and main_ptr >= 0:
                target = topsort[main_ptr]
                print('main_ptr = %r' % (main_ptr,))
                print('target = %r' % (target,))
                # main_idx = ut.listfind(groupkeys, (target,))
                # if main_idx is None:
                possible_idxs = [idx for idx, keytup in enumerate(groupkeys) if keytup is not None and target in keytup]
                if len(possible_idxs) == 1:
                    main_idx = possible_idxs[0]
                else:
                    main_idx = None
                if main_idx is None:
                    main_ptr -= 1
            if main_idx is None:
                print('break I')
                break
            found_groups = ut.apply_grouping(keys, groupxs)[main_idx]
            print('found_groups = %r' % (found_groups,))
            stack.append((target, found_groups))
            for k in found_groups:
                type_to_ptr[k] -= 1

            if len(found_groups) == len(keys):
                main_ptr -= 1
                if main_ptr < 0:
                    print('break E')
                    break
        print('stack = %s' % (ut.repr3(stack),))
        print('have = %r' % (sorted(ut.take_column(stack, 0)),))
        print('need = %s' % (sorted(ut.total_flatten(level_orders.values())),))
        compute_order = stack[::-1]

    return compute_order
Ejemplo n.º 31
0
def cluster_query(model, query_vars=None, evidence=None, soft_evidence=None,
                  method=None, operation='maximize'):
    """
    CommandLine:
        python -m ibeis.algo.hots.bayes --exec-cluster_query --show

    ParamGrid:
        >>> param_grid = dict(
        >>>     #method=['approx', 'bf', 'bp'],
        >>>     method=['approx', 'bp'],
        >>> )
        >>> combos = ut.all_dict_combinations(param_grid)
        >>> index = 0
        >>> keys = 'method'.split(', ')
        >>> method, = ut.dict_take(combos[index], keys)

    Setup:
        >>> from ibeis.algo.hots.bayes import *  # NOQA
        >>> verbose = True
        >>> other_evidence = {}
        >>> name_evidence = [1, None, None, 0]
        >>> score_evidence = [2, 0, 2]
        >>> special_names = ['fred', 'sue', 'tom', 'paul']
        >>> model = make_name_model(
        >>>     num_annots=4, num_names=4, num_scores=3, verbose=True, mode=1,
        >>>     special_names=special_names)
        >>> method = None
        >>> model, evidence, soft_evidence = update_model_evidence(
        >>>     model, name_evidence, score_evidence, other_evidence)
        >>> evidence = model._ensure_internal_evidence(evidence)
        >>> query_vars = ut.list_getattr(model.ttype2_cpds['name'], 'variable')

    Example:
        >>> # DISABLE_DOCTEST
        >>> query_results = cluster_query(model, query_vars, evidence,
        >>>                               method=method)
        >>> print(ut.repr2(query_results['top_assignments'], nl=1))
        >>> ut.quit_if_noshow()
        >>> pgm_ext.show_model(model, evidence=evidence, **query_results)
        >>> ut.show_if_requested()
    """
    evidence = model._ensure_internal_evidence(evidence)
    if query_vars is None:
        query_vars = model.nodes()
    orig_query_vars = query_vars  # NOQA
    query_vars = ut.setdiff(query_vars, list(evidence.keys()))

    if method is None:
        method = ut.get_argval('--method', type_=str, default='bp')

    reduced_joint = compute_reduced_joint(model, query_vars, evidence,
                                          method, operation)

    new_reduced_joint = collapse_factor_labels(model, reduced_joint, evidence)

    if False:
        report_partitioning_statistics(new_reduced_joint)

    # FIXME: are these max marginals?
    max_marginals = {}
    for i, var in enumerate(query_vars):
        one_out = query_vars[:i] + query_vars[i + 1:]
        max_marginals[var] = new_reduced_joint.marginalize(one_out,
                                                           inplace=False)
        # max_marginals[var] = joint2.maximize(one_out, inplace=False)
    factor_list = max_marginals.values()

    # Now find the most likely state
    reduced_variables = new_reduced_joint.variables
    new_state_idxs = np.array(new_reduced_joint._row_labels(asindex=True))
    new_values = new_reduced_joint.values.ravel()
    sortx = new_values.argsort()[::-1]
    sort_new_state_idxs = new_state_idxs.take(sortx, axis=0)
    sort_new_values = new_values.take(sortx)
    sort_new_states = list(zip(*[
        ut.dict_take(model.statename_dict[var], idx)
        for var, idx in
        zip(reduced_variables, sort_new_state_idxs.T)]))

    # Better map assignment based on knowledge of labels
    map_assign = dict(zip(reduced_variables, sort_new_states[0]))

    sort_reduced_rowstr_lbls = [
        ut.repr2(dict(zip(reduced_variables, lbls)), explicit=True,
                 nobraces=True,
                 strvals=True)
        for lbls in sort_new_states
    ]

    top_assignments = list(zip(sort_reduced_rowstr_lbls[:4], sort_new_values))
    if len(sort_new_values) > 3:
        top_assignments += [('other', 1 - sum(sort_new_values[:4]))]
    query_results = {
        'factor_list': factor_list,
        'top_assignments': top_assignments,
        'map_assign': map_assign,
        'method': method,
    }
    print('query_results = %s' % (ut.repr3(query_results, nl=2),))
    return query_results
Ejemplo n.º 32
0
def cluster_query(model, query_vars=None, evidence=None, soft_evidence=None,
                  method=None, operation='maximize'):
    """
    CommandLine:
        python -m ibeis.algo.hots.bayes --exec-cluster_query --show

    GridParams:
        >>> param_grid = dict(
        >>>     #method=['approx', 'bf', 'bp'],
        >>>     method=['approx', 'bp'],
        >>> )
        >>> combos = ut.all_dict_combinations(param_grid)
        >>> index = 0
        >>> keys = 'method'.split(', ')
        >>> method, = ut.dict_take(combos[index], keys)

    GridSetup:
        >>> from ibeis.algo.hots.bayes import *  # NOQA
        >>> verbose = True
        >>> other_evidence = {}
        >>> name_evidence = [1, None, None, 0]
        >>> score_evidence = [2, 0, 2]
        >>> special_names = ['fred', 'sue', 'tom', 'paul']
        >>> model = make_name_model(
        >>>     num_annots=4, num_names=4, num_scores=3, verbose=True, mode=1,
        >>>     special_names=special_names)
        >>> method = None
        >>> model, evidence, soft_evidence = update_model_evidence(
        >>>     model, name_evidence, score_evidence, other_evidence)
        >>> evidence = model._ensure_internal_evidence(evidence)
        >>> query_vars = ut.list_getattr(model.ttype2_cpds[NAME_TTYPE], 'variable')

    GridExample:
        >>> # DISABLE_DOCTEST
        >>> query_results = cluster_query(model, query_vars, evidence,
        >>>                               method=method)
        >>> print(ut.repr2(query_results['top_assignments'], nl=1))
        >>> ut.quit_if_noshow()
        >>> from ibeis.algo.hots import pgm_viz
        >>> pgm_viz.show_model(model, evidence=evidence, **query_results)
        >>> ut.show_if_requested()
    """
    evidence = model._ensure_internal_evidence(evidence)
    if query_vars is None:
        query_vars = model.nodes()
    orig_query_vars = query_vars  # NOQA
    query_vars = ut.setdiff(query_vars, list(evidence.keys()))

    if method is None:
        method = ut.get_argval('--method', type_=str, default='bp')

    reduced_joint = compute_reduced_joint(model, query_vars, evidence,
                                          method, operation)

    new_reduced_joint = collapse_factor_labels(model, reduced_joint, evidence)

    if False:
        report_partitioning_statistics(new_reduced_joint)

    # FIXME: are these max marginals?
    max_marginals = {}
    for i, var in enumerate(query_vars):
        one_out = query_vars[:i] + query_vars[i + 1:]
        max_marginals[var] = new_reduced_joint.marginalize(one_out,
                                                           inplace=False)
        # max_marginals[var] = joint2.maximize(one_out, inplace=False)
    factor_list = max_marginals.values()

    # Now find the most likely state
    reduced_variables = new_reduced_joint.variables
    new_state_idxs = np.array(new_reduced_joint._row_labels(asindex=True))
    new_values = new_reduced_joint.values.ravel()
    sortx = new_values.argsort()[::-1]
    sort_new_state_idxs = new_state_idxs.take(sortx, axis=0)
    sort_new_values = new_values.take(sortx)
    sort_new_states = list(zip(*[
        ut.dict_take(model.statename_dict[var], idx)
        for var, idx in
        zip(reduced_variables, sort_new_state_idxs.T)]))

    # Better map assignment based on knowledge of labels
    map_assign = dict(zip(reduced_variables, sort_new_states[0]))

    sort_reduced_rowstr_lbls = [
        ut.repr2(dict(zip(reduced_variables, lbls)), explicit=True,
                 nobraces=True,
                 strvals=True)
        for lbls in sort_new_states
    ]

    top_assignments = list(zip(sort_reduced_rowstr_lbls[:4], sort_new_values))
    if len(sort_new_values) > 3:
        top_assignments += [('other', 1 - sum(sort_new_values[:4]))]
    query_results = {
        'factor_list': factor_list,
        'top_assignments': top_assignments,
        'map_assign': map_assign,
        'method': method,
    }
    print('query_results = %s' % (ut.repr3(query_results, nl=2),))
    return query_results