コード例 #1
0
ファイル: register_files.py プロジェクト: Erotemic/local
    def update_registry(drive):
        print('Updating registered files in %r' % (drive,))
        # Update existing files
        fpath_exists_list = list(map(exists, ut.ProgIter(drive.fpath_list, 'checkexist fpath', freq=1000)))
        dpath_exists_list = list(map(exists, ut.ProgIter(drive.dpath_list, 'checkexist dpath', freq=1000)))
        if all(fpath_exists_list):
            print('No change in file structure')
        else:
            print('%d/%d files no longer exist' % (
                len(drive.fpath_list) - sum(fpath_exists_list),
                len(drive.fpath_list)))
            removed_fpaths = ut.compress(drive.fpath_list, ut.not_list(fpath_exists_list))
            print('removed_fpaths = %s' % (ut.repr2(removed_fpaths),))
        if all(dpath_exists_list):
            print('No change in dpath structure')
        else:
            print('%d/%d dirs no longer exist' % (
                len(drive.dpath_list) - sum(dpath_exists_list),
                len(drive.dpath_list)))
            removed_dpaths = ut.compress(
                drive.dpath_list,
                ut.not_list(dpath_exists_list))
            print('removed_dpaths = %s' % (ut.repr2(removed_dpaths),))

        drive.fpath_list = ut.compress(drive.fpath_list, fpath_exists_list)
        drive.dpath_list = ut.compress(drive.dpath_list, dpath_exists_list)
        drive.cache.save('fpath_list', drive.fpath_list)
        drive.cache.save('dpath_list', drive.dpath_list)
コード例 #2
0
ファイル: pgm_viz.py プロジェクト: heroinlin/ibeis
def _debug_repr_model(model):
    cpd_code_list = [_debug_repr_cpd(cpd) for cpd in model.cpds]
    code_fmt = ut.codeblock(
        '''
        import numpy as np
        import pgmpy
        import pgmpy.inference
        import pgmpy.factors
        import pgmpy.models

        {cpds}

        cpd_list = {nodes}
        input_graph = {edges}
        model = pgmpy.models.BayesianModel(input_graph)
        model.add_cpds(*cpd_list)
        infr = pgmpy.inference.BeliefPropagation(model)
        ''')

    code = code_fmt.format(
        cpds='\n'.join(cpd_code_list),
        nodes=ut.repr2(sorted(model.nodes()), strvals=True),
        edges=ut.repr2(sorted(model.edges()), nl=1),
    )
    ut.print_code(code)
    ut.copy_text_to_clipboard(code)
コード例 #3
0
def on_pick(event, infr=None):
    import wbia.plottool as pt

    logger.info('ON PICK: %r' % (event, ))
    artist = event.artist
    plotdat = pt.get_plotdat_dict(artist)
    if plotdat:
        if 'node' in plotdat:
            all_node_data = ut.sort_dict(plotdat['node_data'].copy())
            visual_node_data = ut.dict_subset(all_node_data,
                                              infr.visual_node_attrs, None)
            node_data = ut.delete_dict_keys(all_node_data,
                                            infr.visual_node_attrs)
            node = plotdat['node']
            node_data['degree'] = infr.graph.degree(node)
            node_label = infr.pos_graph.node_label(node)
            logger.info('visual_node_data: ' +
                        ut.repr2(visual_node_data, nl=1))
            logger.info('node_data: ' + ut.repr2(node_data, nl=1))
            ut.cprint('node: ' + ut.repr2(plotdat['node']), 'blue')
            logger.info('(pcc) node_label = %r' % (node_label, ))
            logger.info('artist = %r' % (artist, ))
        elif 'edge' in plotdat:
            all_edge_data = ut.sort_dict(plotdat['edge_data'].copy())
            logger.info(infr.repr_edge_data(all_edge_data))
            ut.cprint('edge: ' + ut.repr2(plotdat['edge']), 'blue')
            logger.info('artist = %r' % (artist, ))
        else:
            logger.info('???: ' + ut.repr2(plotdat))
    logger.info(ut.get_timestamp())
コード例 #4
0
ファイル: pgm_viz.py プロジェクト: whaozl/ibeis
def _debug_repr_model(model):
    cpd_code_list = [_debug_repr_cpd(cpd) for cpd in model.cpds]
    code_fmt = ut.codeblock('''
        import numpy as np
        import pgmpy
        import pgmpy.inference
        import pgmpy.factors
        import pgmpy.models

        {cpds}

        cpd_list = {nodes}
        input_graph = {edges}
        model = pgmpy.models.BayesianModel(input_graph)
        model.add_cpds(*cpd_list)
        infr = pgmpy.inference.BeliefPropagation(model)
        ''')

    code = code_fmt.format(
        cpds='\n'.join(cpd_code_list),
        nodes=ut.repr2(sorted(model.nodes()), strvals=True),
        edges=ut.repr2(sorted(model.edges()), nl=1),
    )
    ut.print_code(code)
    ut.copy_text_to_clipboard(code)
コード例 #5
0
def api_remote_wbia(remote_wbia_url, remote_api_func, remote_wbia_port=5001, **kwargs):
    import requests

    if GLOBAL_APP_ENABLED and GLOBAL_APP is None:
        raise ValueError('Flask has not been initialized')
    api_name = remote_api_func.__name__
    route_list = list(GLOBAL_APP.url_map.iter_rules(api_name))
    assert len(route_list) == 1, 'More than one route resolved'
    route = route_list[0]
    api_route = route.rule
    assert api_route.startswith('/api/'), 'Must be an API route'
    method_list = sorted(list(route.methods - set(['HEAD', 'OPTIONS'])))
    remote_api_method = method_list[0].upper()

    assert api_route is not None, 'Route could not be found'

    args = (remote_wbia_url, remote_wbia_port, api_route)
    remote_api_url = 'http://%s:%s%s' % args
    headers = {'Authorization': get_url_authorization(remote_api_url)}

    for key in kwargs.keys():
        value = kwargs[key]
        if isinstance(value, (tuple, list, set)):
            value = str(list(value))
        kwargs[key] = value

    logger.info('[REMOTE] %s' % ('-' * 80,))
    logger.info('[REMOTE] Calling remote IBEIS API: %r' % (remote_api_url,))
    logger.info('[REMOTE] \tMethod:  %r' % (remote_api_method,))
    if ut.DEBUG2 or ut.VERBOSE:
        logger.info('[REMOTE] \tHeaders: %s' % (ut.repr2(headers),))
        logger.info('[REMOTE] \tKWArgs:  %s' % (ut.repr2(kwargs),))

    # Make request to server
    try:
        if remote_api_method == 'GET':
            req = requests.get(remote_api_url, headers=headers, data=kwargs, verify=False)
        elif remote_api_method == 'POST':
            req = requests.post(
                remote_api_url, headers=headers, data=kwargs, verify=False
            )
        elif remote_api_method == 'PUT':
            req = requests.put(remote_api_url, headers=headers, data=kwargs, verify=False)
        elif remote_api_method == 'DELETE':
            req = requests.delete(
                remote_api_url, headers=headers, data=kwargs, verify=False
            )
        else:
            message = '_api_result got unsupported method=%r' % (remote_api_method,)
            raise KeyError(message)
    except requests.exceptions.ConnectionError as ex:
        message = '_api_result could not connect to server %s' % (ex,)
        raise IOError(message)
    response = req.text
    converted = ut.from_json(value)
    response = converted.get('response', None)
    logger.info('[REMOTE] got response')
    if ut.DEBUG2:
        logger.info('response = %s' % (response,))
    return response
コード例 #6
0
ファイル: register_files.py プロジェクト: dilas12345/local
    def update_registry(drive):
        print('Updating registered files in %r' % (drive, ))
        # Update existing files
        fpath_exists_list = list(
            map(exists,
                ut.ProgIter(drive.fpath_list, 'checkexist fpath', freq=1000)))
        dpath_exists_list = list(
            map(exists,
                ut.ProgIter(drive.dpath_list, 'checkexist dpath', freq=1000)))
        if all(fpath_exists_list):
            print('No change in file structure')
        else:
            print('%d/%d files no longer exist' %
                  (len(drive.fpath_list) - sum(fpath_exists_list),
                   len(drive.fpath_list)))
            removed_fpaths = ut.compress(drive.fpath_list,
                                         ut.not_list(fpath_exists_list))
            print('removed_fpaths = %s' % (ut.repr2(removed_fpaths), ))
        if all(dpath_exists_list):
            print('No change in dpath structure')
        else:
            print('%d/%d dirs no longer exist' %
                  (len(drive.dpath_list) - sum(dpath_exists_list),
                   len(drive.dpath_list)))
            removed_dpaths = ut.compress(drive.dpath_list,
                                         ut.not_list(dpath_exists_list))
            print('removed_dpaths = %s' % (ut.repr2(removed_dpaths), ))

        drive.fpath_list = ut.compress(drive.fpath_list, fpath_exists_list)
        drive.dpath_list = ut.compress(drive.dpath_list, dpath_exists_list)
        drive.cache.save('fpath_list', drive.fpath_list)
        drive.cache.save('dpath_list', drive.dpath_list)
コード例 #7
0
 def _print_previous_loop_statistics(infr, count):
     # Print stats about what happend in the this loop
     history = infr.metrics_list[-count:]
     recover_blocks = ut.group_items([
         (k, sum(1 for i in g))
         for k, g in it.groupby(ut.take_column(history, 'recovering'))
     ]).get(True, [])
     infr.print((
         'Recovery mode entered {} times, '
         'made {} recovery decisions.').format(
             len(recover_blocks), sum(recover_blocks)), color='green')
     testaction_hist = ut.dict_hist(ut.take_column(history, 'test_action'))
     infr.print(
         'Test Action Histogram: {}'.format(
             ut.repr4(testaction_hist, si=True)), color='yellow')
     if infr.params['inference.enabled']:
         action_hist = ut.dict_hist(
             ut.emap(frozenset, ut.take_column(history, 'action')))
         infr.print(
             'Inference Action Histogram: {}'.format(
                 ub.repr2(action_hist, si=True)), color='yellow')
     infr.print(
         'Decision Histogram: {}'.format(ut.repr2(ut.dict_hist(
             ut.take_column(history, 'pred_decision')
         ), si=True)), color='yellow')
     infr.print(
         'User Histogram: {}'.format(ut.repr2(ut.dict_hist(
             ut.take_column(history, 'user_id')
         ), si=True)), color='yellow')
コード例 #8
0
def make_wbia_cell_list(ibs):
    cell_template_list = get_default_cell_template_list(ibs)
    autogen_str = '# python -m wbia autogen_ipynb --launch --dbdir %r' % (ibs.get_dbdir())
    # autogen_str = ut.make_autogen_str()
    dbname = ibs.get_dbname()
    dbdir = ibs.dbdir
    default_acfgstr = ut.get_argval('-a', type_=str, default='default:is_known=True')

    asreport = ut.get_argflag('--asreport')

    default_pcfgstr_list = ut.get_argval(('-t', '-p'), type_=list, default='default')
    default_pcfgstr = ut.repr3(default_pcfgstr_list, nobr=True)

    if asreport:
        annotconfig_list_body = ut.codeblock(ut.repr2(default_acfgstr))
        pipeline_list_body = ut.codeblock(default_pcfgstr)
    else:
        annotconfig_list_body = ut.codeblock(
            ut.repr2(default_acfgstr)
            + '\n'
            + ut.codeblock(
                """
            #'default:has_any=(query,),dpername=1,exclude_reference=True',
            #'default:is_known=True',
            #'default:is_known=True,minqual=good,require_timestamp=True,dcrossval_enc=1,view=left'
            #'default:qsame_imageset=True,been_adjusted=True,excluderef=True,qsize=10,dsize=20',
            #'default:require_timestamp=True,min_timedelta=3600',
            #'default:species=primary',
            #'unctrl:been_adjusted=True',
            #'timectrl:',
            #'timectrl:view=primary,minqual=good',

            #'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=1,joinme=1',
            #'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=1,joinme=1',
            #'default:minqual=ok,require_timestamp=True,view=left,dcrossval_enc=1,joinme=2',
            #'default:minqual=ok,require_timestamp=True,view=right,dcrossval_enc=1,joinme=2',

            """
            )
        )
        pipeline_list_body = ut.codeblock(
            default_pcfgstr
            + '\n'
            + ut.codeblock(
                """
            #'default',
            #'default:K=1,AI=False,QRH=True',
            #'default:K=1,RI=True,AI=False',
            #'default:K=1,adapteq=True',
            #'default:fg_on=[True,False]',
            """
            )
        )

    locals_ = locals()
    _format = partial(ut.format_cells, locals_=locals_)
    cell_list = ut.flatten(map(_format, cell_template_list))
    return cell_list
コード例 #9
0
def filter_duplicate_acfgs(expanded_aids_list, acfg_list, acfg_name_list, verbose=None):
    """
    Removes configs with the same expanded aids list

    CommandLine:
        # The following will trigger this function:
        wbia -m wbia get_annotcfg_list:0 -a timectrl timectrl:view=left --db PZ_MTEST

    """
    from wbia.expt import annotation_configs

    if verbose is None:
        verbose = ut.VERBOSE
    acfg_list_ = []
    expanded_aids_list_ = []
    seen_ = ut.ddict(list)
    for acfg, (qaids, daids) in zip(acfg_list, expanded_aids_list):
        key = (ut.hashstr_arr27(qaids, 'qaids'), ut.hashstr_arr27(daids, 'daids'))
        if key in seen_:
            seen_[key].append(acfg)
            continue
        else:
            seen_[key].append(acfg)
            expanded_aids_list_.append((qaids, daids))
            acfg_list_.append(acfg)
    if verbose:
        duplicate_configs = dict(
            [(key_, val_) for key_, val_ in seen_.items() if len(val_) > 1]
        )
        if len(duplicate_configs) > 0:
            logger.info('The following configs produced duplicate annnotation configs')
            for key, val in duplicate_configs.items():
                # Print the difference between the duplicate configs
                _tup = annotation_configs.compress_acfg_list_for_printing(val)
                nonvaried_compressed_dict, varied_compressed_dict_list = _tup
                logger.info('+--')
                logger.info('key = %r' % (key,))
                logger.info(
                    'duplicate_varied_cfgs = %s'
                    % (ut.repr2(varied_compressed_dict_list),)
                )
                logger.info(
                    'duplicate_nonvaried_cfgs = %s'
                    % (ut.repr2(nonvaried_compressed_dict),)
                )
                logger.info('L__')

        if verbose >= 1:
            logger.info(
                '[harn.help] parsed %d / %d unique annot configs'
                % (len(acfg_list_), len(acfg_list))
            )
        if verbose > 2:
            logger.info('[harn.help] parsed from: %r' % (acfg_name_list,))
    return expanded_aids_list_, acfg_list_
コード例 #10
0
ファイル: dataset.py プロジェクト: simplesoftMX/ibeis_cnn
 def print_dataset_info(data, labels, key):
     labelhist = {key: len(val) for key, val in ut.group_items(labels, labels).items()}
     stats_dict = ut.get_stats(data.ravel())
     ut.delete_keys(stats_dict, ['shape', 'nMax', 'nMin'])
     print('[dataset] Dataset Info: ')
     print('[dataset] * Data:')
     print('[dataset]     %s_data(shape=%r, dtype=%r)' % (key, data.shape, data.dtype))
     print('[dataset]     %s_memory(data) = %r' % (key, ut.get_object_size_str(data),))
     print('[dataset]     %s_stats(data) = %s' % (key, ut.repr2(stats_dict, precision=2),))
     print('[dataset] * Labels:')
     print('[dataset]     %s_labels(shape=%r, dtype=%r)' % (key, labels.shape, labels.dtype))
     print('[dataset]     %s_label histogram = %s' % (key, ut.repr2(labelhist)))
コード例 #11
0
 def repr_edge_data(infr, all_edge_data, visual=True):
     visual_edge_data = {
         k: v
         for k, v in all_edge_data.items() if k in infr.visual_edge_attrs
     }
     edge_data = ut.delete_dict_keys(all_edge_data.copy(),
                                     infr.visual_edge_attrs)
     lines = []
     if visual:
         lines += [('visual_edge_data: ' + ut.repr2(visual_edge_data, nl=1))
                   ]
     lines += [('edge_data: ' + ut.repr2(edge_data, nl=1))]
     return '\n'.join(lines)
コード例 #12
0
ファイル: bayes.py プロジェクト: whaozl/ibeis
def report_partitioning_statistics(new_reduced_joint):
    # compute partitioning statistics
    import vtool as vt
    vals, idxs = vt.group_indices(new_reduced_joint.values.ravel())
    #groupsize = list(map(len, idxs))
    #groupassigns = ut.unflat_vecmap(new_reduced_joint.assignment, idxs)
    all_states = new_reduced_joint._row_labels(asindex=True)
    clusterstats = [tuple(sorted(list(ut.dict_hist(a).values())))
                    for a in all_states]
    grouped_vals = ut.group_items(new_reduced_joint.values.ravel(),
                                  clusterstats)

    #probs_assigned_to_clustertype = [(
    #    sorted(np.unique(np.array(b).round(decimals=5)).tolist())[::-1], a)
    #    for a, b in grouped_vals.items()]
    probs_assigned_to_clustertype = [(
        ut.dict_hist(np.array(b).round(decimals=5)), a)
        for a, b in grouped_vals.items()]
    sortx = ut.argsort([max(c[0].keys())
                        for c in probs_assigned_to_clustertype])
    probs_assigned_to_clustertype = ut.take(probs_assigned_to_clustertype, sortx)

    # This list of 2-tuples with the first item being the unique
    # probabilies that are assigned to a cluster type along with the number
    # of times they were assigned. A cluster type is the second item. Every
    # number represents how many annotations were assigned to a specific
    # label. The length of that list is the number of total labels.  For
    # all low scores you will see [[{somenum: 1}, {0: 800}], [1, 1, 1, ... 1]]
    # indicating that that the assignment of everyone to a different label happend once
    # where the probability was somenum and a 800 times where the probability was 0.

    #print(sorted([(b, a) for a, b in ut.map_dict_vals(sum, x)]).items())
    #z = sorted([(b, a) for a, b in ut.map_dict_vals(sum, grouped_vals).items()])
    print(ut.repr2(probs_assigned_to_clustertype, nl=2, precision=2, sorted_=True))
コード例 #13
0
 def print_annot_info():
     logger.info('[interact_chip] Annotation Info = ' +
                 ut.repr2(annot_info, nl=4))
     logger.info('config2_ = %r' % (config2_, ))
     if config2_ is not None:
         logger.info('config2_.__dict__ = %s' %
                     (ut.repr3(config2_.__dict__), ))
コード例 #14
0
def draw_precision_recall_curve_(recall_range_,
                                 p_interp_curve,
                                 title_pref=None,
                                 fnum=1):
    import wbia.plottool as pt

    if recall_range_ is None:
        recall_range_ = np.array([])
        p_interp_curve = np.array([])
    fig = pt.figure(fnum=fnum, docla=True, doclf=True)  # NOQA

    if recall_range_ is None:
        ave_p = np.nan
    else:
        ave_p = p_interp_curve.sum() / p_interp_curve.size

    pt.plot2(
        recall_range_,
        p_interp_curve,
        marker='o--',
        x_label='recall',
        y_label='precision',
        unitbox=True,
        flipx=False,
        color='r',
        title='Interplated Precision Vs Recall\n' + 'avep = %r' % ave_p,
    )
    logger.info('Interplated Precision')
    logger.info(ut.repr2(list(zip(recall_range_, p_interp_curve))))
コード例 #15
0
ファイル: main_commands.py プロジェクト: simplesoftMX/ibeis
def preload_commands(dbdir, **kwargs):
    """ Preload commands work with command line arguments and global caches """
    #print('[main_cmd] preload_commands')
    if params.args.dump_argv:
        print(ut.repr2(vars(params.args), sorted_=False))
    if params.args.dump_global_cache:
        ut.global_cache_dump()  # debug command, dumps to stdout
    if params.args.set_workdir is not None:
        sysres.set_workdir(params.args.set_workdir)
    if params.args.get_workdir:
        print(' Current work dir = %s' % sysres.get_workdir())
    if params.args.logdir is not None:
        sysres.set_logdir(params.args.logdir)
    if params.args.get_logdir:
        print(' Current local  log dir = %s' % (sysres.get_logdir_local(),))
        print(' Current global log dir = %s' % (sysres.get_logdir_global(),))
    if params.args.view_logdir:
        ut.view_directory(sysres.get_logdir_local())
        ut.view_directory(sysres.get_logdir_global())
    if params.args.view_logdir_local:
        ut.view_directory(sysres.get_logdir_local())
    if params.args.view_logdir_global:
        ut.view_directory(sysres.get_logdir_local())
    if ut.get_argflag('--vwd'):
        vwd()
    if ut.get_argflag('--vdq'):
        print('got arg --vdq')
        vdq(dbdir)
    if kwargs.get('delete_ibsdir', False):
        ibsfuncs.delete_ibeis_database(dbdir)
    if params.args.preload_exit:
        print('[main_cmd] preload exit')
        sys.exit(0)
コード例 #16
0
def make_option_dict(options, shortcuts=True):
    """ helper for popup menu callbacks """
    keys = ut.take_column(options, 0)
    values = ut.take_column(options, 1)
    if shortcuts:
        shortcut_keys = [
            key[key.find('&') + 1] if '&' in key else None for key in keys
        ]
        try:
            ut.assert_unique(shortcut_keys,
                             name='shortcut_keys',
                             ignore=[None])
        except AssertionError:
            print('shortcut_keys = %r' % (shortcut_keys, ))
            print('options = %r' % (ut.repr2(options), ))
            raise
        shortcut_dict = {
            sc_key: val
            # sc_key: (make_option_dict(val, shortcuts=True)
            #         if isinstance(val, list) else val)
            for (sc_key, val) in zip(shortcut_keys, values)
            if sc_key is not None and not isinstance(val, list)
        }
        return shortcut_dict
    else:
        ut.assert_unique(keys, name='option_keys')
        fulltext_dict = {
            key: (make_option_dict(val, shortcuts=False) if isinstance(
                val, list) else val)
            for (key, val) in zip(keys, values) if key is not None
        }
        return fulltext_dict
コード例 #17
0
ファイル: devcases.py プロジェクト: yeniherdiyeni/wildbook-ia
def load_gztest(ibs):
    r"""
    CommandLine:
        python -m wbia.algo.hots.special_query --test-load_gztest

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.algo.hots.devcases import *  # NOQA
        >>> import wbia
        >>> ibs = wbia.opendb('GZ_ALL')
    """
    from os.path import join
    from wbia.algo.hots import match_chips4 as mc4

    dir_ = ut.get_module_dir(mc4)
    eval_text = ut.read_from(join(dir_, 'GZ_TESTTUP.txt'))
    testcases = eval(eval_text)
    count_dict = ut.count_dict_vals(testcases)
    logger.info(ut.repr2(count_dict))

    testtup_list = ut.flatten(
        ut.dict_take_list(
            testcases,
            [
                'vsone_wins', 'vsmany_outperformed', 'vsmany_dominates',
                'vsmany_wins'
            ],
        ))
    qaid_list = [testtup.qaid_t for testtup in testtup_list]
    visual_uuids = ibs.get_annot_visual_uuids(qaid_list)
    visual_uuids
コード例 #18
0
ファイル: devcases.py プロジェクト: yeniherdiyeni/wildbook-ia
def find_close_incorrect_match(ibs, qaids):
    use_cache = False
    save_qcache = False
    cfgdict_vsmany = dict(
        index_method='single',
        pipeline_root='vsmany',
    )
    qres_vsmany_list, qreq_vsmany_ = ibs.query_chips(
        qaids,
        ibs.get_valid_aids(),
        cfgdict=cfgdict_vsmany,
        return_request=True,
        use_cache=use_cache,
        save_qcache=save_qcache,
        verbose=True,
    )
    qres_vsmany = qres_vsmany_list[0]
    qres_vsmany.ishow_top(ibs)
    top_aids = qres_vsmany.get_top_aids()
    top_nids = ibs.get_annot_nids(top_aids)
    qaid = qaids[0]
    qnid = ibs.get_annot_nids(qaid)
    is_groundfalse = [nid != qnid for nid in top_nids]
    top_gf_aids = ut.compress(top_aids, is_groundfalse)
    # top_gt_aids = ut.filterfalse_items(top_aids, is_groundfalse)
    top_gf_vuuids = ibs.get_annot_visual_uuids(top_gf_aids)
    qvuuid = ibs.get_annot_visual_uuids(qaid)
    gf_mapping = {qvuuid: top_gf_vuuids[0:1]}
    logger.info('gf_mapping = ' + ut.repr2(gf_mapping))
    pass
コード例 #19
0
ファイル: bayes.py プロジェクト: heroinlin/ibeis
def report_partitioning_statistics(new_reduced_joint):
    # compute partitioning statistics
    import vtool as vt
    vals, idxs = vt.group_indices(new_reduced_joint.values.ravel())
    #groupsize = list(map(len, idxs))
    #groupassigns = ut.unflat_vecmap(new_reduced_joint.assignment, idxs)
    all_states = new_reduced_joint._row_labels(asindex=True)
    clusterstats = [tuple(sorted(list(ut.dict_hist(a).values())))
                    for a in all_states]
    grouped_vals = ut.group_items(new_reduced_joint.values.ravel(),
                                  clusterstats)

    #probs_assigned_to_clustertype = [(
    #    sorted(np.unique(np.array(b).round(decimals=5)).tolist())[::-1], a)
    #    for a, b in grouped_vals.items()]
    probs_assigned_to_clustertype = [(
        ut.dict_hist(np.array(b).round(decimals=5)), a)
        for a, b in grouped_vals.items()]
    sortx = ut.argsort([max(c[0].keys())
                        for c in probs_assigned_to_clustertype])
    probs_assigned_to_clustertype = ut.take(probs_assigned_to_clustertype, sortx)

    # This list of 2-tuples with the first item being the unique
    # probabilies that are assigned to a cluster type along with the number
    # of times they were assigned. A cluster type is the second item. Every
    # number represents how many annotations were assigned to a specific
    # label. The length of that list is the number of total labels.  For
    # all low scores you will see [[{somenum: 1}, {0: 800}], [1, 1, 1, ... 1]]
    # indicating that that the assignment of everyone to a different label happend once
    # where the probability was somenum and a 800 times where the probability was 0.

    #print(sorted([(b, a) for a, b in ut.map_dict_vals(sum, x)]).items())
    #z = sorted([(b, a) for a, b in ut.map_dict_vals(sum, grouped_vals).items()])
    print(ut.repr2(probs_assigned_to_clustertype, nl=2, precision=2, sorted_=True))
コード例 #20
0
ファイル: generate_notebook.py プロジェクト: heroinlin/ibeis
def make_ibeis_cell_list(ibs):
    cell_template_list = get_default_cell_template_list(ibs)
    autogen_str = make_autogen_str()
    dbname = ibs.get_dbname()
    #if ut.get_argflag('--hacktestscore'):
    #    annotconfig_list_body = ut.codeblock(
    #        '''
    #        'timectrl',
    #        '''
    #    )
    #else:
    default_acfgstr = ut.get_argval('-a', type_=str, default='default:is_known=True')
    annotconfig_list_body = ut.codeblock(
        ut.repr2(default_acfgstr) + '\n' +
        ut.codeblock('''
        # See ibeis/expt/annotation_configs.py for names of annot configuration options
        #'default:has_any=(query,),dpername=1,exclude_reference=True',
        #'default:is_known=True',
        #'default:qsame_encounter=True,been_adjusted=True,excluderef=True'
        #'default:qsame_encounter=True,been_adjusted=True,excluderef=True,qsize=10,dsize=20',
        #'default:require_timestamp=True,min_timedelta=3600',
        #'default:species=primary',
        #'timectrl:',
        #'timectrl:been_adjusted=True,dpername=3',
        #'timectrl:qsize=10,dsize=20',
        #'unctrl:been_adjusted=True',
        ''')
    )
    #if ut.get_argflag('--hacktestscore'):
    #    pipeline_list_body = ut.codeblock(
    #        '''
    #        # See ibeis/algo/Config.py for names of pipeline config options
    #        'default:lnbnn_on=True,bar_l2_on=False,normonly_on=False,fg_on=True',
    #        'default:lnbnn_on=False,bar_l2_on=True,normonly_on=False,fg_on=True',
    #        'default:lnbnn_on=False,bar_l2_on=False,normonly_on=True,fg_on=True',
    #        'default:lnbnn_on=True,bar_l2_on=False,normonly_on=False,fg_on=False',
    #        'default:lnbnn_on=False,bar_l2_on=True,normonly_on=False,fg_on=False',
    #        'default:lnbnn_on=False,bar_l2_on=False,normonly_on=True,fg_on=False',
    #        '''
    #    )
    #elif True:
    default_pcfgstr_list = ut.get_argval(('-t', '-p'), type_=list, default='default')
    default_pcfgstr = ut.repr3(default_pcfgstr_list, nobr=True)

    pipeline_list_body = ut.codeblock(
        default_pcfgstr + '\n' +
        ut.codeblock('''
        #'default',
        #'default:K=1',
        #'default:K=1,AI=False',
        #'default:K=1,AI=False,QRH=True',
        #'default:K=1,RI=True,AI=False',
        #'default:K=1,adapteq=True',
        #'default:fg_on=[True,False]',
        ''')
    )
    locals_ = locals()
    _format = partial(format_cells, locals_=locals_)
    cell_list = ut.flatten(map(_format, cell_template_list))
    return cell_list
コード例 #21
0
ファイル: util_hash.py プロジェクト: Erotemic/utool
def _test_int_byte_conversion():
    import itertools as it
    import utool as ut
    inputs = list(it.chain(
        range(0, 10),
        (2 ** i for i in range(0, 256, 32)),
        (2 ** i + 1 for i in range(0, 256, 32)),
    ))
    for int_0 in inputs:
        print('---')
        print('int_0 = %s' % (ut.repr2(int_0),))
        bytes_ = _int_to_bytes(int_0)
        int_ = _bytes_to_int(bytes_)
        print('bytes_ = %s' % (ut.repr2(bytes_),))
        print('int_ = %s' % (ut.repr2(int_),))
        assert int_ == int_0
コード例 #22
0
ファイル: assert_modules.py プロジェクト: simplesoftMX/ibeis
 def checkinfo_wrapper(*args, **kwargs):
     suggested_fix = ''
     funcname = get_funcname(func)
     packagename = funcname.replace('_version', '')
     pipname_ = pipname if pipname is not None else packagename
     try:
         infodict = func(*args, **kwargs)
     except ImportError as ex:
         infodict = module_stdinfo_dict(None, name=pipname_)
         suggested_fix = 'pip install ' + pipname_
         if not sys.platform.startswith('win32'):
             suggested_fix = 'sudo ' + suggested_fix
         return False, 'None', target, infodict, ut.formatex(
             ex), suggested_fix
     except Exception as ex:
         infodict = module_stdinfo_dict(None, name=pipname_)
         return False, 'None', target, infodict, ut.formatex(
             ex), 'Some unknown error in ' + packagename
     current_version = infodict['__version__']
     # Build status text
     msg = ut.repr2(infodict, strvals=True)
     msg += '\n' + '%s: %r >= (target=%r)?' % (funcname,
                                               current_version, target)
     statustext = ut.msgblock(infodict['__name__'], msg)
     # Check if passed
     passed = version_ge_target(current_version, target)
     # Suggest possible fix
     if not passed:
         suggested_fix = 'pip install ' + infodict[
             '__name__'] + ' --upgrade'
         if not sys.platform.startswith('win32'):
             suggested_fix = 'sudo ' + suggested_fix
     return passed, current_version, target, infodict, statustext, suggested_fix
コード例 #23
0
    def figure_clicked(self, event=None):
        from wbia.viz import viz_helpers as vh
        import wbia.guitool as gt

        ax = event.inaxes
        if ih.clicked_inside_axis(event):
            viztype = vh.get_ibsdat(ax, 'viztype')
            if viztype == 'chip':
                aid = vh.get_ibsdat(ax, 'aid')
                print('... aid=%r' % aid)
                if event.button == 3:  # right-click
                    from wbia.viz.interact import interact_chip

                    height = self.fig.canvas.geometry().height()
                    qpoint = gt.newQPoint(event.x, height - event.y)
                    if self.qreq_ is None:
                        config2_ = None
                    else:
                        if aid in self.qreq_.qaids:
                            config2_ = self.qreq_.query_config2_
                        else:
                            config2_ = self.qreq_.data_config2_
                    callback_list = interact_chip.build_annot_context_options(
                        self.ibs, aid, refresh_func=self.show_page, config2_=config2_
                    )
                    gt.popup_menu(self.fig.canvas, qpoint, callback_list)
                    # interact_chip.show_annot_context_menu(
                    #    self.ibs, aid, self.fig.canvas, qpoint, refresh_func=self.show_page)
                    # self.show_page()
                    # ibs.print_annotation_table()
                print(ut.repr2(event.__dict__))
コード例 #24
0
ファイル: pgm_viz.py プロジェクト: heroinlin/ibeis
def _debug_repr_cpd(cpd):
    import re
    import utool as ut
    code_fmt = ut.codeblock(
        '''
        {variable} = pgmpy.factors.TabularCPD(
            variable={variable_repr},
            variable_card={variable_card_repr},
            values={get_cpd_repr},
            evidence={evidence_repr},
            evidence_card={evidence_card_repr},
        )
        ''')
    keys = ['variable', 'variable_card', 'values', 'evidence', 'evidence_card']
    dict_ = ut.odict(zip(keys, [getattr(cpd, key) for key in keys]))
    # HACK
    dict_['values'] = cpd.get_cpd()
    r = ut.repr2(dict_, explicit=True, nobraces=True, nl=True)
    print(r)

    # Parse props that are needed for this fmtstr
    fmt_keys = [match.groups()[0] for match in re.finditer('{(.*?)}', code_fmt)]
    need_reprs = [key[:-5] for key in fmt_keys if key.endswith('_repr')]
    need_keys = [key for key in fmt_keys if not key.endswith('_repr')]
    # Get corresponding props
    # Call methods if needbe
    tmp = [(prop, getattr(cpd, prop)) for prop in need_reprs]
    tmp = [(x, y()) if ut.is_funclike(y) else (x, y) for (x, y) in tmp]
    fmtdict = dict(tmp)
    fmtdict = ut.map_dict_vals(ut.repr2, fmtdict)
    fmtdict = ut.map_dict_keys(lambda x: x + '_repr', fmtdict)
    tmp2 = [(prop, getattr(cpd, prop)) for prop in need_keys]
    fmtdict.update(dict(tmp2))
    code = code_fmt.format(**fmtdict)
    return code
コード例 #25
0
    def get_cfgstr(nnindexer, noquery=False):
        r""" returns string which uniquely identified configuration and support data

        Args:
            noquery (bool): if True cfgstr is only relevant to building the
                index. No search params are returned (default = False)

        Returns:
            str: flann_cfgstr

        CommandLine:
            python -m wbia.algo.hots.neighbor_index --test-get_cfgstr

        Example:
            >>> # DISABLE_DOCTEST
            >>> from wbia.algo.hots.neighbor_index import *  # NOQA
            >>> import wbia
            >>> cfgdict = dict(fg_on=False)
            >>> qreq_ = wbia.testdata_qreq_(defaultdb='testdb1', p='default:fg_on=False')
            >>> qreq_.load_indexer()
            >>> nnindexer = qreq_.indexer
            >>> noquery = True
            >>> flann_cfgstr = nnindexer.get_cfgstr(noquery)
            >>> result = ('flann_cfgstr = %s' % (str(flann_cfgstr),))
            >>> print(result)
            flann_cfgstr = _FLANN((algo=kdtree,seed=42,t=8,))_VECS((11260,128)gj5nea@ni0%f3aja)
        """
        flann_cfgstr_list = []
        use_params_hash = True
        use_data_hash = True
        if use_params_hash:
            flann_defaults = vt.get_flann_params(
                nnindexer.flann_params['algorithm'])
            # flann_params_clean = flann_defaults.copy()
            flann_params_clean = ut.sort_dict(flann_defaults)
            ut.update_existing(flann_params_clean, nnindexer.flann_params)
            if noquery:
                ut.delete_dict_keys(flann_params_clean, ['checks'])
            shortnames = dict(algorithm='algo',
                              checks='chks',
                              random_seed='seed',
                              trees='t')
            short_params = ut.odict([
                (shortnames.get(key, key), str(val)[0:7])
                for key, val in six.iteritems(flann_params_clean)
            ])
            flann_valsig_ = ut.repr2(short_params,
                                     nl=False,
                                     explicit=True,
                                     strvals=True)
            flann_valsig_ = flann_valsig_.lstrip('dict').replace(' ', '')
            # flann_valsig_ = str(list(flann_params.values()))
            # flann_valsig = ut.remove_chars(flann_valsig_, ', \'[]')
            flann_cfgstr_list.append('_FLANN(' + flann_valsig_ + ')')
        if use_data_hash:
            vecs_hashstr = ut.hashstr_arr(nnindexer.idx2_vec, '_VECS')
            flann_cfgstr_list.append(vecs_hashstr)
        flann_cfgstr = ''.join(flann_cfgstr_list)
        return flann_cfgstr
コード例 #26
0
ファイル: generate_notebook.py プロジェクト: Erotemic/ibeis
def make_ibeis_cell_list(ibs):
    cell_template_list = get_default_cell_template_list(ibs)
    autogen_str = ut.make_autogen_str()
    dbname = ibs.get_dbname()
    default_acfgstr = ut.get_argval('-a', type_=str, default='default:is_known=True')

    asreport = ut.get_argflag('--asreport')

    default_pcfgstr_list = ut.get_argval(('-t', '-p'), type_=list, default='default')
    default_pcfgstr = ut.repr3(default_pcfgstr_list, nobr=True)

    if asreport:
        annotconfig_list_body = ut.codeblock(
            ut.repr2(default_acfgstr) )
        pipeline_list_body = ut.codeblock(
            default_pcfgstr
        )
    else:
        annotconfig_list_body = ut.codeblock(
            ut.repr2(default_acfgstr) + '\n' +
            ut.codeblock('''
            # See ibeis/expt/annotation_configs.py for names of annot configuration options
            #'default:has_any=(query,),dpername=1,exclude_reference=True',
            #'default:is_known=True',
            #'default:qsame_imageset=True,been_adjusted=True,excluderef=True,qsize=10,dsize=20',
            #'default:require_timestamp=True,min_timedelta=3600',
            #'default:species=primary',
            #'timectrl:',
            #'unctrl:been_adjusted=True',
            ''')
        )
        pipeline_list_body = ut.codeblock(
            default_pcfgstr + '\n' +
            ut.codeblock('''
            #'default',
            #'default:K=1,AI=False,QRH=True',
            #'default:K=1,RI=True,AI=False',
            #'default:K=1,adapteq=True',
            #'default:fg_on=[True,False]',
            ''')
        )

    locals_ = locals()
    _format = partial(ut.format_cells, locals_=locals_)
    cell_list = ut.flatten(map(_format, cell_template_list))
    return cell_list
コード例 #27
0
ファイル: register_files.py プロジェクト: Erotemic/local
 def biggest_files(drive):
     print('Biggest Files in %r' % (drive,))
     sortx = ut.list_argsort(drive.fpath_bytes_list)[::-1]
     sel = sortx[0:10]
     biggest_nbytes = ut.take(drive.fpath_bytes_list, sel)
     biggest_files = ut.take(drive.fpath_list, sel)
     biginfo_list = list(zip(map(ut.byte_str2, biggest_nbytes), biggest_files))
     print(ut.repr2(biginfo_list, strvals=True))
コード例 #28
0
        def wrp_cache_invalidator(self, *args, **kwargs):
            # the class must have a table_cache property
            colscache_ = self.table_cache[tblname]
            colnames_ = list(
                six.iterkeys(colscache_)) if colnames is None else colnames
            if DEBUG_API_CACHE:
                indenter = ut.Indenter('[%s]' % (tblname, ))
                indenter.start()
                logger.info('+------')
                logger.info(
                    'INVALIDATING tblname=%r, colnames=%r, rowidx=%r, force=%r'
                    % (tblname, colnames, rowidx, force))
                logger.info('self = %r' % (self, ))
                logger.info('args = %r' % (args, ))
                logger.info('kwargs = %r' % (kwargs, ))
                logger.info('colscache_ = ' + ut.repr2(colscache_, truncate=1))

            # Clear the cache of any specified colname
            # when the invalidator is called
            if rowidx is None:
                for colname in colnames_:
                    kwargs_cache_ = colscache_[colname]
                    # We dont know the rowsids so clear everything
                    for cache_ in six.itervalues(kwargs_cache_):
                        cache_.clear()
            else:
                rowid_list = args[rowidx]
                for colname in colnames_:
                    kwargs_cache_ = colscache_[colname]
                    # We know the rowids to delete
                    # iterate over all getter kwargs values
                    for cache_ in six.itervalues(kwargs_cache_):
                        ut.delete_dict_keys(cache_, rowid_list)

            # Preform set/delete action
            if DEBUG_API_CACHE:
                logger.info('After:')
                logger.info('colscache_ = ' + ut.repr2(colscache_, truncate=1))
                logger.info('L__________')

            writer_result = writer_func(self, *args, **kwargs)

            if DEBUG_API_CACHE:
                indenter.stop()
            return writer_result
コード例 #29
0
ファイル: register_files.py プロジェクト: dilas12345/local
 def biggest_files(drive):
     print('Biggest Files in %r' % (drive, ))
     sortx = ut.list_argsort(drive.fpath_bytes_list)[::-1]
     sel = sortx[0:10]
     biggest_nbytes = ut.take(drive.fpath_bytes_list, sel)
     biggest_files = ut.take(drive.fpath_list, sel)
     biginfo_list = list(
         zip(map(ut.byte_str2, biggest_nbytes), biggest_files))
     print(ut.repr2(biginfo_list, strvals=True))
コード例 #30
0
def drive_test_script(ibs):
    r"""
    Test script where we drive around and take pictures of animals
    both in a given database and not in a given databse to make sure
    the system works.

    CommandLine:
        python -m wbia.viz.viz_image --test-drive_test_script
        python -m wbia.viz.viz_image --test-drive_test_script --db PZ_MTEST --show
        python -m wbia.viz.viz_image --test-drive_test_script --db GIR_Tanya --show
        python -m wbia.viz.viz_image --test-drive_test_script --db GIR_Master0 --show
        python -m wbia.viz.viz_image --test-drive_test_script --db PZ_Master0 --show
        python -m wbia.viz.viz_image --test-drive_test_script --db PZ_FlankHack --show

        python -m wbia.viz.viz_image --test-drive_test_script --db PZ_FlankHack --show
        python -m wbia.viz.viz_image --test-drive_test_script --dbdir /raid/work2/Turk/GIR_Master --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from wbia.viz.viz_image import *  # NOQA
        >>> import wbia
        >>> ibs = wbia.opendb()
        >>> drive_test_script(ibs)
    """
    import wbia

    aid_list = wbia.testdata_aids(a='default:pername=1')
    logger.info('Running with (annot) aid_list = %r' % (aid_list))
    gid_list = ibs.get_annot_gids(aid_list)
    logger.info('Running with (image) gid_list = %r' % (gid_list))
    avuuid_list = ibs.get_annot_visual_uuids(aid_list)
    guuid_list = ibs.get_image_uuids(gid_list)
    logger.info('Running with annot_visual_uuid_list = %s' %
                (ut.repr2(zip(aid_list, avuuid_list))))
    logger.info('Running with image_uuid_list = %s' %
                (ut.repr2(zip(gid_list, guuid_list))))
    for gid, aid in ut.ProgressIter(zip(gid_list, aid_list), lbl='progress '):
        logger.info('\ngid, aid, nid = %r, %r, %r' % (
            gid,
            aid,
            ibs.get_annot_nids(aid),
        ))
        show_image(ibs, gid, annote=False, rich_title=True)
        pt.show_if_requested()
コード例 #31
0
ファイル: mtgmonte.py プロジェクト: Erotemic/mtgmonte
def inspect_deck(deck):
    def get_card_tags(card, deck):
        tags = []
        stats = card.mana_source_stats(deck)
        if stats is not None:
            tags.append("land")
            if len(stats[1]) > 0:
                tags.append("tapland")
            else:
                tags.append("untapland")
        return tags

    # ------------
    print("len(deck) = %r" % (len(deck),))
    tags_list = [get_card_tags(card, deck) for card in deck.card_list]
    print("Deck Counts:")
    print(ut.repr2(ut.dict_hist(ut.flatten(tags_list)), nl=True))

    hand = deck.sample_hand()
    manastats_list = [card.mana_source_stats(deck) for card in hand]
    print(ut.list_str([card.name + ": " + text_type(stats) for card, stats in zip(hand, manastats_list)]))
    tags_list = [get_card_tags(card, deck) for card in hand]
    print("Hand Counts")
    print(ut.repr2(ut.dict_hist(ut.flatten(tags_list)), nl=True))

    valid_tags = ["land", "tapland", "untapland"]
    x = {tag: [] for tag in valid_tags}

    for _ in range(500):
        hand = deck.sample_hand()
        tags_list = [get_card_tags(card, deck) for card in hand]
        taghist = ut.dict_hist(ut.flatten(tags_list))
        for key, val in x.items():
            val.append(taghist.get(key, 0))

    print("Monte Stats:")
    for key, val in list(x.items()):
        print("%15s: %s" % (key, ut.repr2(ut.get_stats(val), precision=2)))

    def hand_stats():
        # [card.types for card in hand]
        # [card.rrr() for card in hand]
        [card.mana_source_stats(deck) for card in hand]
        card.types
コード例 #32
0
    def _make_deploy_metadata(self, task_key=None):
        pblm = self.pblm
        if pblm.samples is None:
            pblm.setup()

        if task_key is None:
            task_key = pblm.primary_task_key

        # task_keys = list(pblm.samples.supported_tasks())
        clf_key = pblm.default_clf_key
        data_key = pblm.default_data_key

        # Save the classifie
        data_info = pblm.feat_extract_info[data_key]
        feat_extract_config, feat_dims = data_info

        samples = pblm.samples
        labels = samples.subtasks[task_key]

        edge_hashid = samples.edge_set_hashid()
        label_hashid = samples.task_label_hashid(task_key)
        tasksamp_hashid = samples.task_sample_hashid(task_key)

        annot_hashid = ut.hashid_arr(samples._unique_annots.visual_uuids,
                                     'annots')

        # species = pblm.infr.ibs.get_primary_database_species(
        #     samples._unique_annots.aid)
        species = '+'.join(sorted(set(samples._unique_annots.species)))

        metadata = {
            'tasksamp_hashid': tasksamp_hashid,
            'edge_hashid': edge_hashid,
            'label_hashid': label_hashid,
            'annot_hashid': annot_hashid,
            'class_hist': labels.make_histogram(),
            'class_names': labels.class_names,
            'data_info': data_info,
            'task_key': task_key,
            'species': species,
            'data_key': data_key,
            'clf_key': clf_key,
            'n_dims': len(feat_dims),
            # 'aid_pairs': samples.aid_pairs,
        }

        meta_cfgstr = ut.repr2(metadata, kvsep=':', itemsep='', si=True)
        hashid = ut.hash_data(meta_cfgstr)[0:16]

        deploy_fname = self.fname_fmtstr.format(hashid=hashid, **
                                                metadata) + '.cPkl'

        deploy_metadata = metadata.copy()
        deploy_metadata['hashid'] = hashid
        deploy_metadata['fname'] = deploy_fname
        return deploy_metadata, deploy_fname
コード例 #33
0
 def on_contextMenuRequested(widget, index, pos):
     logger.info('context request')
     if widget.api is not None:
         logger.info(ut.repr2(widget.api.get_available_colnames()))
         # HACK test
         # widget.api.add_column_names(['qx2_gt_rank', 'qx2_gf_rank', 'qx2_gt_raw_score', 'qx2_gf_raw_score'])
         widget.refresh_headers()
         # widget.change_headers(widget.api.make_headers())
     if VERBOSE_ITEM_WIDGET:
         logger.info('context request')
コード例 #34
0
ファイル: generate_notebook.py プロジェクト: whaozl/ibeis
def make_ibeis_cell_list(ibs):
    cell_template_list = get_default_cell_template_list(ibs)
    autogen_str = ut.make_autogen_str()
    dbname = ibs.get_dbname()
    default_acfgstr = ut.get_argval('-a',
                                    type_=str,
                                    default='default:is_known=True')

    asreport = ut.get_argflag('--asreport')

    default_pcfgstr_list = ut.get_argval(('-t', '-p'),
                                         type_=list,
                                         default='default')
    default_pcfgstr = ut.repr3(default_pcfgstr_list, nobr=True)

    if asreport:
        annotconfig_list_body = ut.codeblock(ut.repr2(default_acfgstr))
        pipeline_list_body = ut.codeblock(default_pcfgstr)
    else:
        annotconfig_list_body = ut.codeblock(
            ut.repr2(default_acfgstr) + '\n' + ut.codeblock('''
            # See ibeis/expt/annotation_configs.py for names of annot configuration options
            #'default:has_any=(query,),dpername=1,exclude_reference=True',
            #'default:is_known=True',
            #'default:qsame_imageset=True,been_adjusted=True,excluderef=True,qsize=10,dsize=20',
            #'default:require_timestamp=True,min_timedelta=3600',
            #'default:species=primary',
            #'timectrl:',
            #'unctrl:been_adjusted=True',
            '''))
        pipeline_list_body = ut.codeblock(default_pcfgstr + '\n' +
                                          ut.codeblock('''
            #'default',
            #'default:K=1,AI=False,QRH=True',
            #'default:K=1,RI=True,AI=False',
            #'default:K=1,adapteq=True',
            #'default:fg_on=[True,False]',
            '''))

    locals_ = locals()
    _format = partial(ut.format_cells, locals_=locals_)
    cell_list = ut.flatten(map(_format, cell_template_list))
    return cell_list
コード例 #35
0
ファイル: gistfile1.py プロジェクト: RTHMaK/Skynet
def main():
    if True:
        import pandas as pd
        pd.options.display.max_rows = 1000
        pd.options.display.width = 1000

        basis = {
            #'n_clusters': [10, 100, 1000, 2000][::-1],
            #'n_features': [4, 32, 128, 512][::-1],
            #'per_cluster': [1, 10, 100, 200][::-1],
            'n_clusters': [10, 100, 500][::-1],
            'n_features': [32, 128][::-1],
            'per_cluster': [1, 10, 20][::-1],
            'asint': [True, False],
        }
        vals = []
        for kw in ut.ProgIter(ut.all_dict_combinations(basis),
                              lbl='gridsearch',
                              bs=False,
                              adjust=False,
                              freq=1):
            print('kw = ' + ut.repr2(kw))
            exec(ut.execstr_dict(kw))
            centers1, new_speed = test_kmeans_plus_plus_speed(fix=True, **kw)
            centers2, old_speed = test_kmeans_plus_plus_speed(fix=False, **kw)
            import utool
            with utool.embed_on_exception_context:
                assert np.all(centers1 == centers2), 'new code disagrees'

            kw['new_speed'] = new_speed
            kw['old_speed'] = old_speed
            vals.append(kw)
            print('---------')

        df = pd.DataFrame.from_dict(vals)
        df['percent_change'] = 100 * (df['old_speed'] -
                                      df['new_speed']) / df['old_speed']
        df = df.reindex_axis(list(basis.keys()) +
                             ['new_speed', 'old_speed', 'percent_change'],
                             axis=1)
        df['absolute_change'] = (df['old_speed'] - df['new_speed'])
        print(df.sort('absolute_change', ascending=False))
        #print(df)

        print(df['percent_change'][df['absolute_change'] > .1].mean())

    #print(df.loc[df['percent_change'].argsort()[::-1]])
    else:
        new_speed = test_kmeans_plus_plus_speed()
        try:
            profile.dump_stats('out.lprof')
            profile.print_stats(stripzeros=True)
        except Exception:
            pass
        print('new_speed = %r' % (new_speed, ))
コード例 #36
0
 def _image_view(sel_aids=sel_aids, **_kwargs):
     try:
         viz.show_image(ibs,
                        gid,
                        sel_aids=sel_aids,
                        fnum=self.fnum,
                        **_kwargs)
         df2.set_figtitle('Image View')
     except TypeError as ex:
         ut.printex(ex, ut.repr2(_kwargs))
         raise
コード例 #37
0
ファイル: util_csv.py プロジェクト: SU-ECE-18-7/utool
 def csv_format(r):
     text = ut.repr2(r, precision=precision)
     #text = six.text_type(r)
     # Check if needs escape
     escape_chars = ['"', ' ', ',']
     if any([c in text for c in escape_chars]):
         # escape quotes with quotes
         text = text.replace('"', '""')
         # encapsulate with quotes
         text = '"' + text + '"'
     return text
コード例 #38
0
 def _grep_dpath(dpath):
     grep_tup = ut.grep([pat], dpath_list=[dpath],
                        exclude_patterns=['*.pyc'], verbose=False)
     reflags = 0
     (found_fpath_list, found_lines_list, found_lxs_list) = grep_tup
     regex_list = [pat]
     _exprs_flags = [ut.util_regex.extend_regex2(expr, reflags)
                     for expr in regex_list]
     extended_regex_list = ut.take_column(_exprs_flags, 0)
     grep_result = ut.GrepResult(found_fpath_list, found_lines_list,
                                 found_lxs_list, extended_regex_list,
                                 reflags=reflags)
     text = '\n'.join([
         'Greping Directory "{}"'.format(dpath),
         'tofind_list={}'.format(ut.repr2(extended_regex_list)),
         grep_result.make_resultstr(colored=False),
         '=============',
         'found_fpath_list = {}'.format(ut.repr2(found_fpath_list, nl=1))
     ])
     return text
コード例 #39
0
 def csv_format(r):
     text = ut.repr2(r, precision=precision)
     #text = six.text_type(r)
     # Check if needs escape
     escape_chars = ['"', ' ', ',']
     if any([c in text for c in escape_chars]):
         # escape quotes with quotes
         text = text.replace('"', '""')
         # encapsulate with quotes
         text = '"' + text + '"'
     return text
コード例 #40
0
 def expand_closure_source(funcname, func):
     source = ut.get_func_sourcecode(func)
     closure_vars = [(k, v.cell_contents) for k, v in
                     zip(func.func_code.co_freevars, func.func_closure)]
     source = ut.unindent(source)
     import re
     for k, v in closure_vars:
         source = re.sub('\\b' + k + '\\b', ut.repr2(v), source)
     source = re.sub(r'def .*\(self', 'def ' + funcname + '(self', source)
     source = ut.indent(source.strip(), '    ') + '\n'
     return source
コード例 #41
0
ファイル: job_engine.py プロジェクト: Erotemic/ibeis
def rcv_multipart_json(sock, num=2, print=print):
    """ helper """
    # note that the first two parts will be ['Controller.ROUTER', 'Client.<id_>']
    # these are needed for the reply to propagate up to the right client
    multi_msg = sock.recv_multipart()
    if VERBOSE_JOBS:
        print('----')
        print('RCV Json: %s' % (ut.repr2(multi_msg, truncate=True),))
    idents = multi_msg[:num]
    request_json = multi_msg[num]
    request = ut.from_json(request_json)
    return idents, request
コード例 #42
0
ファイル: register_files.py プロジェクト: Erotemic/local
    def biggest_dirs(drive):
        print('Biggest Dirs in %r' % (drive,))
        dpath_list = drive.dpath_list
        fidxs_list = ut.dict_take(drive.dpath_to_fidx, dpath_list)
        unflat_dpath_bytes_list = ut.list_unflat_take(drive.fpath_bytes_list, fidxs_list)
        dpath_nbytes_list = list(map(sum, unflat_dpath_bytes_list))

        sortx = ut.list_argsort(dpath_nbytes_list)[::-1]
        sel = sortx[0:10]
        biggest_nbytes = ut.take(dpath_nbytes_list, sel)
        biggest_dpaths = ut.take(dpath_list, sel)
        biginfo_list = list(zip(map(ut.byte_str2, biggest_nbytes), biggest_dpaths))
        print(ut.repr2(biginfo_list, strvals=True))
        pass
コード例 #43
0
ファイル: zmq_task_queue.py プロジェクト: heroinlin/ibeis
 def get_job_result(jobiface, jobid):
     with ut.Indenter('[client %d] ' % (jobiface.id_)):
         if jobiface.verbose >= 1:
             print = partial(ut.colorprint, color='teal')
             print('----')
             print('Request result of jobid=%r' % (jobid,))
         pair_msg = dict(action='job_result', jobid=jobid)
         jobiface.collect_deal_sock.send_json(pair_msg)
         if jobiface.verbose >= 3:
             print('... waiting for collector reply')
         reply = jobiface.collect_deal_sock.recv_json()
         if jobiface.verbose >= 2:
             print('got reply = %s' % (ut.repr2(reply, truncate=True),))
     return reply
コード例 #44
0
ファイル: pandas_highlight.py プロジェクト: Erotemic/utool
def pandas_repr(df):
    import utool as ut
    args = [
        df.values,
    ]
    kwargs = [
        ('columns', df.columns.values.tolist()),
        ('index', df.index.values.tolist()),
    ]
    header = 'pd.DataFrame('
    footer = ')'

    arg_parts = [
        ut.hz_str('    ', ut.repr2(arg))
        for arg in args if arg is not None
    ]
    kwarg_parts = [
        ut.hz_str('    {}={}'.format(key, ut.repr2(val)))
        for key, val in kwargs if val is not None
    ]
    body = ',\n'.join(arg_parts + kwarg_parts)
    dfrepr = '\n'.join([header, body, footer])
    print(dfrepr)
    pass
コード例 #45
0
ファイル: file_organizer.py プロジェクト: Erotemic/local
 def find_nonunique_names(self):
     fnames = map(basename, self.rel_fpath_list)
     duplicate_map = ut.find_duplicate_items(fnames)
     groups = []
     for dupname, idxs in duplicate_map.items():
         uuids = self.get_prop('uuids', idxs)
         fpaths = self.get_prop('abs', idxs)
         groups = ut.group_items(fpaths, uuids)
         if len(groups) > 1:
             if all(x == 1 for x in map(len, groups.values())):
                 # All groups are different, this is an simpler case
                 print(ut.repr2(groups, nl=3))
             else:
                 # Need to handle the multi-item groups first
                 pass
コード例 #46
0
ファイル: mtgobjs.py プロジェクト: Erotemic/mtgmonte
    def __nice__(card):
        if not hasattr(card, 'state'):
            card.state = []

        body = card.name
        if 'mana_cost' in card.nice_attrs and card.mana_cost:
            body += ' (' + card.mana_cost + ')'
        if 'state' in card.nice_attrs and len(card.state) > 0:
            state_nice =  {
                # 'tapped': TAPPED
            }
            body += ' ' + ut.repr2([state_nice.get(s, s) for s in card.state])
        if 'rarity' in card.nice_attrs and card.rarity:
            body += ' ' + card.rarity
        return body
コード例 #47
0
ファイル: util_project.py プロジェクト: Erotemic/utool
    def find_module_callers():
        """
        TODO:
        attempt to build a call graph between module functions to make it easy to see
        what can be removed and what cannot.
        """
        import utool as ut
        from os.path import normpath
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_analyzer.py')
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_all.py')
        mod_fpath = ut.truepath('~/code/ibeis/ibeis/expt/results_organizer.py')
        module = ut.import_module_from_fpath(mod_fpath)
        user_profile = ut.ensure_user_profile()
        doctestables = list(ut.iter_module_doctestable(module, include_builtin=False))
        grepkw = {}
        grepkw['exclude_dirs'] = user_profile.project_exclude_dirs
        grepkw['dpath_list'] = user_profile.project_dpaths
        grepkw['verbose'] = True

        usage_map = {}
        for funcname, func in doctestables:
            print('Searching for funcname = %r' % (funcname,))
            found_fpath_list, found_lines_list, found_lxs_list = ut.grep([funcname], **grepkw)
            used_in = (found_fpath_list, found_lines_list, found_lxs_list)
            usage_map[funcname] = used_in

        external_usage_map = {}
        for funcname, used_in in usage_map.items():
            (found_fpath_list, found_lines_list, found_lxs_list) = used_in
            isexternal_flag = [normpath(fpath) != normpath(mod_fpath) for fpath in found_fpath_list]
            ext_used_in = (ut.compress(found_fpath_list, isexternal_flag),
                           ut.compress(found_lines_list, isexternal_flag),
                           ut.compress(found_lxs_list, isexternal_flag))
            external_usage_map[funcname] = ext_used_in

        for funcname, used_in in external_usage_map.items():
            (found_fpath_list, found_lines_list, found_lxs_list) = used_in

        print('Calling modules: \n' +
              ut.repr2(ut.unique_ordered(ut.flatten([used_in[0] for used_in in  external_usage_map.values()])), nl=True))
コード例 #48
0
ファイル: dbinfo.py プロジェクト: Erotemic/ibeis
def get_short_infostr(ibs):
    """ Returns printable database information

    Args:
        ibs (IBEISController):  ibeis controller object

    Returns:
        str: infostr

    CommandLine:
        python -m ibeis.other.dbinfo --test-get_short_infostr

    Example:
        >>> # ENABLE_DOCTEST
        >>> from ibeis.other.dbinfo import *  # NOQA
        >>> import ibeis
        >>> ibs = ibeis.opendb('testdb1')
        >>> infostr = get_short_infostr(ibs)
        >>> result = str(infostr)
        >>> print(result)
        dbname = 'testdb1'
        num_images = 13
        num_annotations = 13
        num_names = 7
    """
    dbname = ibs.get_dbname()
    #workdir = ut.unixpath(ibs.get_workdir())
    num_images = ibs.get_num_images()
    num_annotations = ibs.get_num_annotations()
    num_names = ibs.get_num_names()
    #workdir = %r
    infostr = ut.codeblock('''
    dbname = %s
    num_images = %r
    num_annotations = %r
    num_names = %r
    ''' % (ut.repr2(dbname), num_images, num_annotations, num_names))
    return infostr
コード例 #49
0
ファイル: bayes.py プロジェクト: heroinlin/ibeis
def test_model(num_annots, num_names, score_evidence=[], name_evidence=[],
               other_evidence={}, noquery=False, verbose=None,
               **kwargs):
    if verbose is None:
        verbose = ut.VERBOSE

    method = kwargs.pop('method', None)
    model = make_name_model(num_annots, num_names, verbose=verbose, **kwargs)

    if verbose:
        model.print_priors(ignore_ttypes=['match', 'score'])

    model, evidence, soft_evidence = update_model_evidence(
        model, name_evidence, score_evidence, other_evidence)

    if verbose and len(soft_evidence) != 0:
        model.print_priors(ignore_ttypes=['match', 'score'],
                           title='Soft Evidence', color='green')

    #if verbose:
    #    ut.colorprint('\n --- Soft Evidence ---', 'white')
    #    for ttype, cpds in model.ttype2_cpds.items():
    #        if ttype != 'match':
    #            for fs_ in ut.ichunks(cpds, 4):
    #                ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]),
    #                              'green')

    if verbose:
        ut.colorprint('\n --- Inference ---', 'red')

    if (len(evidence) > 0 or len(soft_evidence) > 0) and not noquery:
        evidence = model._ensure_internal_evidence(evidence)
        query_vars = []
        query_vars += ut.list_getattr(model.ttype2_cpds['name'], 'variable')
        #query_vars += ut.list_getattr(model.ttype2_cpds['match'], 'variable')
        query_vars = ut.setdiff(query_vars, evidence.keys())
        #query_vars = ut.setdiff(query_vars, soft_evidence.keys())
        query_results = cluster_query(model, query_vars, evidence,
                                      soft_evidence, method)
    else:
        query_results = {}

    factor_list = query_results['factor_list']

    if verbose:
        if verbose:
            print('+--------')
        semtypes = [model.var2_cpd[f.variables[0]].ttype
                    for f in factor_list]
        for type_, factors in ut.group_items(factor_list, semtypes).items():
            print('Result Factors (%r)' % (type_,))
            factors = ut.sortedby(factors, [f.variables[0] for f in factors])
            for fs_ in ut.ichunks(factors, 4):
                ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]),
                              'yellow')
        print('MAP assignments')
        top_assignments = query_results.get('top_assignments', [])
        tmp = []
        for lbl, val in top_assignments:
            tmp.append('%s : %.4f' % (ut.repr2(lbl), val))
        print(ut.align('\n'.join(tmp), ' :'))
        print('L_____\n')

    showkw = dict(evidence=evidence,
                  soft_evidence=soft_evidence,
                  **query_results)

    pgm_viz.show_model(model, **showkw)
    return (model, evidence, query_results)
コード例 #50
0
ファイル: pyStereoComp.py プロジェクト: Kitware/VIAME
    def triangulatePoint(self, xL, xR, mode=None):
        """
        Args:
            xL (ndarray): [2 x N] array of N 2D points in left cam pixels
            xR (ndarray): [2 x N] array of N 2D points in right cam pixels
        """
        if mode is None:
            mode = self.mode

        if mode.lower() == 'opencv':
            if False:
                hxL = np.zeros((xL.shape[1], 1, 2))
                hxR = np.zeros((xL.shape[1], 1, 2))
                for i in range(xL.shape[1]):
                    hxL[i, 0, 0] = xL[0, i]
                    hxL[i, 0, 1] = xL[1, i]
                    hxR[i, 0, 0] = xR[0, i]
                    hxR[i, 0, 1] = xR[1, i]
            else:
                # Much easier way to do above code
                hxL = xL.T[:, None, :]
                hxR = xR.T[:, None, :]

            KL = self.calData['cameraMatrixL']
            KR = self.calData['cameraMatrixR']
            kcR = self.calData['distCoeffsR']
            kcL = self.calData['distCoeffsL']

            R, T = self.calData['R'], self.calData['T']

            # THIS SEEMS WRONG
            PL = np.hstack((np.eye(3), np.zeros((3, 1))))
            PR = np.hstack((R, T))
            # PL = KL.dot(np.hstack((np.eye(3), np.zeros((3, 1)))))
            # PR = KR.dot(np.hstack((R, T)))

            imgptsL = cv2.undistortPoints(hxL, KL, kcL)
            imgptsR = cv2.undistortPoints(hxR, KR, kcR)

            import utool as ut
            print('imgptsL =\n{}'.format(ut.repr2(imgptsL[:, 0, :].T, precision=3)))
            print('imgptsR =\n{}'.format(ut.repr2(imgptsR[:, 0, :].T, precision=3)))

            XL = cv2.triangulatePoints(PL, PR, imgptsL, imgptsR)
            XL /= XL[3]
            XL = XL[0:3]
            XR = np.dot(R, XL) + T

        elif mode == 'matlab':
            '''% [XL,XR] = stereo_triangulation(xL,xR,om,T,fc_left,cc_left,kc_left,alpha_c_left,fc_right,cc_right,kc_right,alpha_c_right),
                %
                % Function that computes the position of a set on N points given the left and right image projections.
                % The cameras are assumed to be calibrated, intrinsically, and extrinsically.
                %
                % Input:
                %           xL: 2xN matrix of pixel coordinates in the left image
                %           xR: 2xN matrix of pixel coordinates in the right image
                %           om,T: rotation vector and translation vector between right and left cameras (output of stereo calibration)
                %           fc_left,cc_left,...: intrinsic parameters of the left camera  (output of stereo calibration)
                %           fc_right,cc_right,...: intrinsic parameters of the right camera (output of stereo calibration)
                %
                % Output:
                %
                %           XL: 3xN matrix of coordinates of the points in the left camera reference frame
                %           XR: 3xN matrix of coordinates of the points in the right camera reference frame
                %
                % Note: XR and XL are related to each other through the rigid motion equation: XR = R * XL + T, where R = rodrigues(om)
                % For more information, visit http://www.vision.caltech.edu/bouguetj/calib_doc/htmls/example5.html
                %
                %
                % (c) Jean-Yves Bouguet - Intel Corporation - April 9th, 2003
                '''
            #--- Normalize the image projection according to the intrinsic parameters of the left and right cameras
            xt = self.normalizePixel(
                xL, self.calData['fc_left'], self.calData['cc_left'], self.calData['kc_left'], self.calData['alpha_c_left'])
            xtt = self.normalizePixel(
                xR, self.calData['fc_right'], self.calData['cc_right'], self.calData['kc_right'], self.calData['alpha_c_right'])

            import utool as ut
            print('xt =\n{}'.format(ut.repr2(xt, precision=3)))
            print('xtt =\n{}'.format(ut.repr2(xtt, precision=3)))

            T = self.calData['T']
            R = self.calData['R']
            #R=np.array([[0.982261529882744,       -0.0231885098133093,         0.186076811895112],[0.0236366236373557,         0.999720597968969,      -0.00018978828345735],[-0.186020420748467,       0.00458464930006644,          0.98253518209546]])
            #--- Extend the normalized projections in homogeneous coordinates
            g = np.array(np.ones((1, xt.shape[1])))
            xt = np.vstack((xt, g))
            xtt = np.vstack((xtt, g))

            #--- Number of points:
            N = xt.shape[1]

            #--- Triangulation of the rays in 3D space:

            u = np.dot(R,  xt)

            n_xt2 = (xt * xt).sum(axis=0)
            n_xtt2 = (xtt * xtt).sum(axis=0)

            T_vect = np.tile(T, (1, N))

            DD = n_xt2 * n_xtt2 - (u * xtt).sum(axis=0)**2

            dot_uT = (u * T_vect).sum(axis=0)
            dot_xttT = (xtt * T_vect).sum(axis=0)
            dot_xttu = (u * xtt).sum(axis=0)

            NN1 = dot_xttu * dot_xttT - n_xtt2 * dot_uT
            NN2 = n_xt2 * dot_xttT - dot_uT * dot_xttu

            Zt = NN1 / DD
            Ztt = NN2 / DD

            X1 = xt * np.tile(Zt, (3,  1))
            X2 = np.dot(R.T, (xtt * np.tile(Ztt, (3, 1)) - T_vect))

            #--- Left coordinates:
            XL = 0.5 * (X1 + X2)

            #--- Right coordinates:
            XR = np.dot(R, XL) + T_vect

        import utool as ut
        print('XL =\n{}'.format(ut.repr2(XL, precision=3)))
        print('XR =\n{}'.format(ut.repr2(XR, precision=3)))
        print('----')
        return (XL, XR)
コード例 #51
0
ファイル: mtgmonte.py プロジェクト: Erotemic/mtgmonte
 def get_statestr(player):
     statestr = ut.repr2(player.infodict2(), nl=2, strvals=True)
     return statestr
コード例 #52
0
ファイル: mtgmonte.py プロジェクト: Erotemic/mtgmonte
 def draw_cards(player, num=1):
     cards = player.deck.draw_card(num)
     if player.verbose >= 2:
         print("Player draws %d cards: %s" % (num, ut.repr2(mtgobjs.CardGroup(cards).infohist, nl=1)))
     player.hand.extend(cards)
コード例 #53
0
ファイル: bayes.py プロジェクト: heroinlin/ibeis
def cluster_query(model, query_vars=None, evidence=None, soft_evidence=None,
                  method=None, operation='maximize'):
    """
    CommandLine:
        python -m ibeis.algo.hots.bayes --exec-cluster_query --show

    ParamGrid:
        >>> param_grid = dict(
        >>>     #method=['approx', 'bf', 'bp'],
        >>>     method=['approx', 'bp'],
        >>> )
        >>> combos = ut.all_dict_combinations(param_grid)
        >>> index = 0
        >>> keys = 'method'.split(', ')
        >>> method, = ut.dict_take(combos[index], keys)

    Setup:
        >>> from ibeis.algo.hots.bayes import *  # NOQA
        >>> verbose = True
        >>> other_evidence = {}
        >>> name_evidence = [1, None, None, 0]
        >>> score_evidence = [2, 0, 2]
        >>> special_names = ['fred', 'sue', 'tom', 'paul']
        >>> model = make_name_model(
        >>>     num_annots=4, num_names=4, num_scores=3, verbose=True, mode=1,
        >>>     special_names=special_names)
        >>> method = None
        >>> model, evidence, soft_evidence = update_model_evidence(
        >>>     model, name_evidence, score_evidence, other_evidence)
        >>> evidence = model._ensure_internal_evidence(evidence)
        >>> query_vars = ut.list_getattr(model.ttype2_cpds['name'], 'variable')

    Example:
        >>> # DISABLE_DOCTEST
        >>> query_results = cluster_query(model, query_vars, evidence,
        >>>                               method=method)
        >>> print(ut.repr2(query_results['top_assignments'], nl=1))
        >>> ut.quit_if_noshow()
        >>> pgm_ext.show_model(model, evidence=evidence, **query_results)
        >>> ut.show_if_requested()
    """
    evidence = model._ensure_internal_evidence(evidence)
    if query_vars is None:
        query_vars = model.nodes()
    orig_query_vars = query_vars  # NOQA
    query_vars = ut.setdiff(query_vars, list(evidence.keys()))

    if method is None:
        method = ut.get_argval('--method', type_=str, default='bp')

    reduced_joint = compute_reduced_joint(model, query_vars, evidence,
                                          method, operation)

    new_reduced_joint = collapse_factor_labels(model, reduced_joint, evidence)

    if False:
        report_partitioning_statistics(new_reduced_joint)

    # FIXME: are these max marginals?
    max_marginals = {}
    for i, var in enumerate(query_vars):
        one_out = query_vars[:i] + query_vars[i + 1:]
        max_marginals[var] = new_reduced_joint.marginalize(one_out,
                                                           inplace=False)
        # max_marginals[var] = joint2.maximize(one_out, inplace=False)
    factor_list = max_marginals.values()

    # Now find the most likely state
    reduced_variables = new_reduced_joint.variables
    new_state_idxs = np.array(new_reduced_joint._row_labels(asindex=True))
    new_values = new_reduced_joint.values.ravel()
    sortx = new_values.argsort()[::-1]
    sort_new_state_idxs = new_state_idxs.take(sortx, axis=0)
    sort_new_values = new_values.take(sortx)
    sort_new_states = list(zip(*[
        ut.dict_take(model.statename_dict[var], idx)
        for var, idx in
        zip(reduced_variables, sort_new_state_idxs.T)]))

    # Better map assignment based on knowledge of labels
    map_assign = dict(zip(reduced_variables, sort_new_states[0]))

    sort_reduced_rowstr_lbls = [
        ut.repr2(dict(zip(reduced_variables, lbls)), explicit=True,
                 nobraces=True,
                 strvals=True)
        for lbls in sort_new_states
    ]

    top_assignments = list(zip(sort_reduced_rowstr_lbls[:4], sort_new_values))
    if len(sort_new_values) > 3:
        top_assignments += [('other', 1 - sum(sort_new_values[:4]))]
    query_results = {
        'factor_list': factor_list,
        'top_assignments': top_assignments,
        'map_assign': map_assign,
        'method': method,
    }
    print('query_results = %s' % (ut.repr3(query_results, nl=2),))
    return query_results
コード例 #54
0
ファイル: viz_matches.py プロジェクト: Erotemic/ibeis
def annotate_matches3(ibs, aid_list, bbox_list, offset_list, name_fm_list,
                      name_fs_list, qreq_=None, **kwargs):
    """
    TODO: use this as the main function.
    """
    # TODO Use this function when you clean show_matches
    in_image    = kwargs.get('in_image', False)
    #show_query  = kwargs.get('show_query', True)
    draw_border = kwargs.get('draw_border', True)
    draw_lbl    = kwargs.get('draw_lbl', True)
    notitle     = kwargs.get('notitle', False)
    # List of annotation scores for each annot in the name

    #printDBG('[viz] annotate_matches3()')
    #truth = ibs.get_match_truth(aid1, aid2)

    #name_equality = (
    #    np.array(ibs.get_annot_nids(aid_list[1:])) == ibs.get_annot_nids(aid_list[0])
    #).tolist()
    #truth = 1 if all(name_equality) else (2 if any(name_equality) else 0)
    #truth_color = vh.get_truth_color(truth)
    ## Build title

    #score         = kwargs.pop('score', None)
    #rawscore      = kwargs.pop('rawscore', None)
    #aid2_raw_rank = kwargs.pop('aid2_raw_rank', None)
    #print(kwargs)
    #title = vh.get_query_text(ibs, None, aid2, truth, qaid=aid1, **kwargs)
    # Build xlbl
    ax = pt.gca()
    ph.set_plotdat(ax, 'viztype', 'multi_match')
    ph.set_plotdat(ax, 'qaid', aid_list[0])
    ph.set_plotdat(ax, 'num_matches', len(aid_list) - 1)
    ph.set_plotdat(ax, 'aid_list', aid_list[1:])
    for count, aid in enumerate(aid_list, start=1):
        ph.set_plotdat(ax, 'aid%d' % (count,), aid)

    #name_equality = (ibs.get_annot_nids(aid_list[0]) ==
    #                 np.array(ibs.get_annot_nids(aid_list[1:])))
    #truth = 1 if np.all(name_equality) else (2 if np.any(name_equality) else 0)
    truth = get_multitruth(ibs, aid_list)
    if any(ibs.is_aid_unknown(aid_list[1:])) or ibs.is_aid_unknown(aid_list[0]):
        truth = ibs.const.TRUTH_UNKNOWN
    truth_color = vh.get_truth_color(truth)

    name_annot_scores = kwargs.get('name_annot_scores', None)
    if len(aid_list) == 2:
        # HACK; generalize to multple annots
        title = vh.get_query_text(ibs, None, aid_list[1], truth, qaid=aid_list[0], **kwargs)
        if not notitle:
            pt.set_title(title, ax)

    if draw_lbl:
        # Build labels
        nid_list = ibs.get_annot_nids(aid_list, distinguish_unknowns=False)
        name_list = ibs.get_annot_names(aid_list)
        lbls_list = [[] for _ in range(len(aid_list))]
        if kwargs.get('show_name', False):
            for count, (lbls, name) in enumerate(zip(lbls_list, name_list)):
                lbls.append(ut.repr2((name)))
        if kwargs.get('show_nid', True):
            for count, (lbls, nid) in enumerate(zip(lbls_list, nid_list)):
                # only label the first two images with nids
                LABEL_ALL_NIDS = False
                if count <= 1 or LABEL_ALL_NIDS:
                    #lbls.append(vh.get_nidstrs(nid))
                    lbls.append(('q' if count == 0 else '') + vh.get_nidstrs(nid))
        if kwargs.get('show_aid', True):
            for count, (lbls, aid) in enumerate(zip(lbls_list, aid_list)):
                lbls.append(('q' if count == 0 else '') + vh.get_aidstrs(aid))
        if (kwargs.get('show_annot_score', True) and
              name_annot_scores is not None):
            max_digits = kwargs.get('score_precision', None)
            for (lbls, score) in zip(lbls_list[1:], name_annot_scores):
                lbls.append(ut.num_fmt(score, max_digits=max_digits))
        lbl_list = [' : '.join(lbls) for lbls in lbls_list]
    else:
        lbl_list = [None] * len(aid_list)
    #pt.set_title(title, ax)
    # Plot annotations over images
    if in_image:
        in_image_bbox_list = vh.get_bboxes(ibs, aid_list, offset_list)
        in_image_theta_list = ibs.get_annot_thetas(aid_list)
        # HACK!
        #if show_query:
        #    pt.draw_bbox(bbox1, bbox_color=pt.ORANGE, lbl=lbl1, theta=theta1)
        bbox_color = pt.ORANGE
        bbox_color = truth_color if draw_border else pt.ORANGE
        for bbox, theta, lbl in zip(in_image_bbox_list, in_image_theta_list,
                                    lbl_list):
            pt.draw_bbox(bbox, bbox_color=bbox_color, lbl=lbl, theta=theta)
            pass
    else:
        xy, w, h = pt.get_axis_xy_width_height(ax)
        #theta2 = 0

        #if xywh2 is None:
        #    #xywh2 = (xy[0], xy[1], w, h)
        #    # weird when sidebyside is off y seems to be inverted
        #    xywh2 = (0,  0, w, h)

        #if not show_query and xywh1 is None:
        #    data_config2 = None if qreq_ is None else
        #    qreq_.get_external_data_config2()
        #    kpts2 = ibs.get_annot_kpts([aid2], config2_=data_config2)[0]
        #    #pt.draw_kpts2(kpts2.take(fm.T[1], axis=0))
        #    # Draw any selected matches
        #    #sm_kw = dict(rect=True, colors=pt.BLUE)
        #    pt.plot_fmatch(None, xywh2, None, kpts2, fm, fs=fs, **kwargs)
        #if draw_border:
        #    pt.draw_border(ax, truth_color, 4, offset=offset2)
        if draw_border:
            pt.draw_border(ax, color=truth_color, lw=4)
        if draw_lbl:
            # Custom user lbl for chips 1 and 2
            #if show_query:
            #    (x1, y1, w1, h1) = xywh1
            #    pt.absolute_lbl(x1 + w1, y1, lbl1)
            for bbox, lbl in zip(bbox_list, lbl_list):
                (x, y, w, h) = bbox
                pt.absolute_lbl(x + w, y, lbl)
    # No matches draw a red box
    if True:
        no_matches = name_fm_list is None or all([True if fm is None else len(fm) == 0 for fm in name_fm_list])
        if no_matches:
            xy, w, h = pt.get_axis_xy_width_height(ax)
            #axes_bbox = (xy[0], xy[1], w, h)
            if draw_border:
                pass
コード例 #55
0
 def print_status(self):
     print('is_down = ' + ut.repr2(self.is_down))
     print('is_drag = ' + ut.repr2(self.is_drag))
コード例 #56
0
ファイル: workspace.py プロジェクト: Erotemic/ibeis
def segmentation_example():
    import vigra
    import opengm
    import sklearn
    import sklearn.mixture
    import numpy as np
    from vigra import graphs
    import matplotlib as mpl
    import plottool as pt

    pt.ensure_pylab_qt4()

    # load image and convert to LAB
    img_fpath = str(ut.grab_test_imgpath(str('lena.png')))
    img = vigra.impex.readImage(img_fpath)
    imgLab = vigra.colors.transform_RGB2Lab(img)

    superpixelDiameter = 15   # super-pixel size
    slicWeight = 15.0        # SLIC color - spatial weight
    labels, nseg = vigra.analysis.slicSuperpixels(imgLab, slicWeight,
                                                  superpixelDiameter)
    labels = vigra.analysis.labelImage(labels) - 1

    # get 2D grid graph and RAG
    gridGraph = graphs.gridGraph(img.shape[0:2])
    rag = graphs.regionAdjacencyGraph(gridGraph, labels)

    # Node Features
    nodeFeatures = rag.accumulateNodeFeatures(imgLab)
    nodeFeaturesImg = rag.projectNodeFeaturesToGridGraph(nodeFeatures)
    nodeFeaturesImg = vigra.taggedView(nodeFeaturesImg, "xyc")
    nodeFeaturesImgRgb = vigra.colors.transform_Lab2RGB(nodeFeaturesImg)

    nCluster = 5
    g = sklearn.mixture.GMM(n_components=nCluster)
    g.fit(nodeFeatures[:, :])
    clusterProb = g.predict_proba(nodeFeatures)
    # https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/Irregular%20Factor%20Graphs.ipynb
    # https://github.com/opengm/opengm/blob/master/src/interfaces/python/examples/tutorial/Hard%20and%20Soft%20Constraints.ipynb
    clusterProbImg = rag.projectNodeFeaturesToGridGraph(
        clusterProb.astype(np.float32))
    clusterProbImg = vigra.taggedView(clusterProbImg, "xyc")

    ndim_data = clusterProbImg.reshape((-1, nCluster))
    pca = sklearn.decomposition.PCA(n_components=3)
    print(ndim_data.shape)
    pca.fit(ndim_data)
    print(ut.repr2(pca.explained_variance_ratio_, precision=2))
    oldshape = (clusterProbImg.shape[0:2] + (-1,))
    clusterProgImg3 = pca.transform(ndim_data).reshape(oldshape)
    print(clusterProgImg3.shape)

    # graphical model with as many variables
    # as superpixels, each has 3 states
    gm = opengm.gm(np.ones(rag.nodeNum, dtype=opengm.label_type) * nCluster)
    # convert probabilites to energies
    probs = np.clip(clusterProb, 0.00001, 0.99999)
    costs = -1.0 * np.log(probs)
    # add ALL unaries AT ONCE
    fids = gm.addFunctions(costs)
    gm.addFactors(fids, np.arange(rag.nodeNum))
    # add a potts function
    beta = 40.0  # strength of potts regularizer
    regularizer = opengm.pottsFunction([nCluster] * 2, 0.0, beta)
    fid = gm.addFunction(regularizer)
    # get variable indices of adjacent superpixels
    # - or "u" and "v" node id's for edges
    uvIds = rag.uvIds()
    uvIds = np.sort(uvIds, axis=1)
    # add all second order factors at once
    gm.addFactors(fid, uvIds)

    # get super-pixels with slic on LAB image
    Inf = opengm.inference.BeliefPropagation
    parameter = opengm.InfParam(steps=10, damping=0.5, convergenceBound=0.001)
    inf = Inf(gm, parameter=parameter)

    class PyCallback(object):

        def __init__(self,):
            self.labels = []

        def begin(self, inference):
            print("begin of inference")

        def end(self, inference):
            self.labels.append(inference.arg())

        def visit(self, inference):
            gm = inference.gm()
            labelVector = inference.arg()
            print("energy  %r" % (gm.evaluate(labelVector),))
            self.labels.append(labelVector)

    callback = PyCallback()
    visitor = inf.pythonVisitor(callback, visitNth=1)

    inf.infer(visitor)

    pt.imshow(clusterProgImg3.swapaxes(0, 1))
    # plot superpixels
    cmap = mpl.colors.ListedColormap(np.random.rand(nseg, 3))
    pt.imshow(labels.swapaxes(0, 1).squeeze(), cmap=cmap)
    pt.imshow(nodeFeaturesImgRgb)

    cmap = mpl.colors.ListedColormap(np.random.rand(nCluster, 3))
    for arg in callback.labels:
        arg = vigra.taggedView(arg, "n")
        argImg = rag.projectNodeFeaturesToGridGraph(arg.astype(np.uint32))
        argImg = vigra.taggedView(argImg, "xy")
        # plot superpixels
        pt.imshow(argImg.swapaxes(0, 1).squeeze(), cmap=cmap)
コード例 #57
0
def execstr_dict(dict_, local_name=None, exclude_list=None, explicit=False):
    """
    returns execable python code that declares variables using keys and values

    execstr_dict

    Args:
        dict_ (dict):
        local_name (str): optional: local name of dictionary. Specifying this
            is much safer
        exclude_list (list):

    Returns:
        str: execstr --- the executable string that will put keys from dict
            into local vars

    CommandLine:
        python -m utool.util_dbg --test-execstr_dict

    Example:
        >>> # UNSTABLE_DOCTEST
        >>> from utool.util_dbg import *  # NOQA
        >>> my_dictionary = {'a': True, 'b': False}
        >>> execstr = execstr_dict(my_dictionary)
        >>> exec(execstr)
        >>> assert 'a' in vars() and 'b' in vars(), 'execstr failed'
        >>> assert b is False and a is True, 'execstr failed'
        >>> result = execstr
        >>> print(result)
        a = my_dictionary['a']
        b = my_dictionary['b']

    Example:
        >>> # ENABLE_DOCTEST
        >>> from utool.util_dbg import *  # NOQA
        >>> import utool as ut
        >>> my_dictionary = {'a': True, 'b': False}
        >>> execstr = execstr_dict(my_dictionary)
        >>> locals_ = locals()
        >>> exec(execstr, locals_)
        >>> a, b = ut.dict_take(locals_, ['a', 'b'])
        >>> assert 'a' in locals_ and 'b' in locals_, 'execstr failed'
        >>> assert b is False and a is True, 'execstr failed'
        >>> result = execstr
        >>> print(result)
        a = my_dictionary['a']
        b = my_dictionary['b']

    Example:
        >>> # ENABLE_DOCTEST
        >>> from utool.util_dbg import *  # NOQA
        >>> import utool as ut
        >>> my_dictionary = {'a': True, 'b': False}
        >>> execstr = execstr_dict(my_dictionary, explicit=True)
        >>> result = execstr
        >>> print(result)
        a = True
        b = False
    """
    import utool as ut
    if explicit:
        expr_list = []
        for (key, val) in sorted(dict_.items()):
            assert isinstance(key, six.string_types), 'keys must be strings'
            expr_list.append('%s = %s' % (key, ut.repr2(val),))
        execstr = '\n'.join(expr_list)
        #print(execstr)
        return execstr
    else:
        if local_name is None:
            # Magic way of getting the local name of dict_
            local_name = get_varname_from_locals(dict_, get_parent_locals())
        try:
            if exclude_list is None:
                exclude_list = []
            assert isinstance(exclude_list, list)
            exclude_list.append(local_name)
            expr_list = []
            assert isinstance(dict_, dict), 'incorrect type type(dict_)=%r, dict_=%r' % (type(dict), dict_)
            for (key, val) in sorted(dict_.items()):
                assert isinstance(key, six.string_types), 'keys must be strings'
                if not is_valid_varname(key):
                    continue
                if not any((fnmatch.fnmatch(key, pat) for pat in exclude_list)):
                    expr = '%s = %s[%s]' % (key, local_name, ut.repr2(key))
                    expr_list.append(expr)
            execstr = '\n'.join(expr_list)
            #print(execstr)
            return execstr
        except Exception as ex:
            locals_ = locals()
            ut.printex(ex, key_list=['locals_'])
            raise
コード例 #58
0
ファイル: util_config.py プロジェクト: Erotemic/utool
def write_default_repo_config():
    import utool
    CONFIG_DICT = utool.get_default_repo_config()
    config_str = utool.repr2(CONFIG_DICT, strvals=True, newlines=True,
                             recursive=True)
    print(config_str)
コード例 #59
0
ファイル: zmq_task_queue.py プロジェクト: heroinlin/ibeis
 def helloworld(time_=0, *args, **kwargs):
     time.sleep(time_)
     retval = ('HELLO time_=%r ' % (time_,)) + ut.repr2((args, kwargs))
     return retval
コード例 #60
0
ファイル: demobayes.py プロジェクト: heroinlin/ibeis
def get_toy_data_1vM(num_annots, num_names=None, **kwargs):
    r"""
    Args:
        num_annots (int):
        num_names (int): (default = None)

    Kwargs:
        initial_aids, initial_nids, nid_sequence, seed

    Returns:
        tuple: (pair_list, feat_list)

    CommandLine:
        python -m ibeis.algo.hots.demobayes --exec-get_toy_data_1vM --show

    Example:
        >>> # DISABLE_DOCTEST
        >>> from ibeis.algo.hots.demobayes import *  # NOQA
        >>> num_annots = 1000
        >>> num_names = 40
        >>> get_toy_data_1vM(num_annots, num_names)
        >>> ut.quit_if_noshow()
        >>> import plottool as pt
        >>> ut.show_if_requested()
    """
    import vtool as vt
    tup_ = get_toy_annots(num_annots, num_names, **kwargs)
    aids, nids, aids1, nids1, all_aids, all_nids = tup_
    rng = vt.ensure_rng(None)

    # Test a simple SVM classifier
    nid2_nexemp = ut.dict_hist(nids1)
    aid2_nid = dict(zip(aids, nids))

    ut.fix_embed_globals()

    #def add_to_globals(globals_, subdict):
    #    globals_.update(subdict)

    unique_nids = list(nid2_nexemp.keys())

    def annot_to_class_feats2(aid, aid2_nid, top=None):
        pair_list = []
        score_list = []
        nexemplar_list = []
        for nid in unique_nids:
            label = (aid2_nid[aid] == nid)
            num_exemplars = nid2_nexemp.get(nid, 0)
            if num_exemplars == 0:
                continue
            params = toy_params[label]
            mu, sigma = ut.dict_take(params, ['mu', 'sigma'])
            score_ = rng.normal(mu, sigma, size=num_exemplars).max()
            score = np.clip(score_, 0, np.inf)
            pair_list.append((aid, nid))
            score_list.append(score)
            nexemplar_list.append(num_exemplars)
        rank_list = ut.argsort(score_list, reverse=True)
        feat_list = np.array([score_list, rank_list, nexemplar_list]).T
        sortx = np.argsort(rank_list)
        feat_list = feat_list.take(sortx, axis=0)
        pair_list = np.array(pair_list).take(sortx, axis=0)
        if top is not None:
            feat_list = feat_list[:top]
            pair_list = pair_list[0:top]
        return pair_list, feat_list

    toclass_features = [annot_to_class_feats2(aid, aid2_nid, top=5) for aid in aids]
    aidnid_pairs = np.vstack(ut.get_list_column(toclass_features, 0))
    feat_list = np.vstack(ut.get_list_column(toclass_features, 1))
    score_list = feat_list.T[0:1].T
    lbl_list = [aid2_nid[aid] == nid for aid, nid in aidnid_pairs]

    from sklearn import svm
    #clf1 = svm.LinearSVC()
    print('Learning classifiers')

    clf3 = svm.SVC()
    clf3.fit(feat_list, lbl_list)

    clf1 = svm.LinearSVC()
    clf1.fit(score_list, lbl_list)

    # Score new annots against the training database
    tup_ = get_toy_annots(num_annots * 2, num_names, initial_aids=all_aids, initial_nids=all_nids)
    aids, nids, aids1, nids1, all_aids, all_nids = tup_
    aid2_nid = dict(zip(aids, nids))
    toclass_features = [annot_to_class_feats2(aid, aid2_nid) for aid in aids]
    aidnid_pairs = np.vstack(ut.get_list_column(toclass_features, 0))
    feat_list = np.vstack(ut.get_list_column(toclass_features, 1))
    lbl_list = np.array([aid2_nid[aid] == nid for aid, nid in aidnid_pairs])

    print('Running tests')

    score_list = feat_list.T[0:1].T

    tp_feat_list = feat_list[lbl_list]
    tn_feat_list = feat_list[~lbl_list]
    tp_lbls = lbl_list[lbl_list]
    tn_lbls = lbl_list[~lbl_list]
    print('num tp: %d' % len(tp_lbls))
    print('num fp: %d' % len(tn_lbls))

    tp_score_list = score_list[lbl_list]
    tn_score_list = score_list[~lbl_list]

    print('tp_feat' + ut.repr3(ut.get_stats(tp_feat_list, axis=0), precision=2))
    print('tp_feat' + ut.repr3(ut.get_stats(tn_feat_list, axis=0), precision=2))

    print('tp_score' + ut.repr2(ut.get_stats(tp_score_list), precision=2))
    print('tp_score' + ut.repr2(ut.get_stats(tn_score_list), precision=2))

    tp_pred3 = clf3.predict(tp_feat_list)
    tn_pred3 = clf3.predict(tn_feat_list)
    print((tp_pred3.sum(), tp_pred3.shape))
    print((tn_pred3.sum(), tn_pred3.shape))

    tp_score3 = clf3.score(tp_feat_list, tp_lbls)
    tn_score3 = clf3.score(tn_feat_list, tn_lbls)

    tp_pred1 = clf1.predict(tp_score_list)
    tn_pred1 = clf1.predict(tn_score_list)
    print((tp_pred1.sum(), tp_pred1.shape))
    print((tn_pred1.sum(), tn_pred1.shape))

    tp_score1 = clf1.score(tp_score_list, tp_lbls)
    tn_score1 = clf1.score(tn_score_list, tn_lbls)
    print('tp score with rank    = %r' % (tp_score3,))
    print('tn score with rank    = %r' % (tn_score3,))

    print('tp score without rank = %r' % (tp_score1,))
    print('tn score without rank = %r' % (tn_score1,))
    toy_data = {}

    return toy_data