def hack(ibs): #ibs.get_imageset_text(imgsetid_list) #imgsetid = ibs.get_imageset_imgsetids_from_text("NNP GZC Car '1PURPLE'") def get_name_linked_imagesets_by_imgsetid(ibs, imgsetid): import utool as ut #gid_list = ibs.get_imageset_gids(imgsetid) aid_list_ = ibs.get_imageset_aids(imgsetid) aid_list = ut.filterfalse_items(aid_list_, ibs.is_aid_unknown(aid_list_)) #all(ibs.db.check_rowid_exists(const.ANNOTATION_TABLE, aid_list)) #aids_list2 = ibs.get_image_aids(gid_list) #assert ut.flatten(aids_list2) == aids_list1 nid_list = list(set(ibs.get_annot_nids(aid_list, distinguish_unknowns=False))) # remove unknown annots name_imgsetids = ibs.get_name_imgsetids(nid_list) name_imagesettexts = ibs.get_imageset_text(name_imgsetids) return name_imagesettexts imgsetid_list = ibs.get_valid_imgsetids() linked_imagesettexts = [get_name_linked_imagesets_by_imgsetid(ibs, imgsetid) for imgsetid in imgsetid_list] imagesettext_list = ibs.get_imageset_text(imgsetid_list) print(ut.dict_str(dict(zip(imgsetid_list, linked_imagesettexts)))) print(ut.align(ut.dict_str(dict(zip(imagesettext_list, linked_imagesettexts))), ':')) print(ut.align(ut.dict_str(dict(zip(imagesettext_list, imgsetid_list)), sorted_=True), ':'))
def api_remote_ibeis(remote_ibeis_url, remote_api_func, remote_ibeis_port=5001, **kwargs): import requests if GLOBAL_APP_ENABLED and GLOBAL_APP is None: raise ValueError('Flask has not been initialized') api_name = remote_api_func.__name__ route_list = list(GLOBAL_APP.url_map.iter_rules(api_name)) assert len(route_list) == 1, 'More than one route resolved' route = route_list[0] api_route = route.rule assert api_route.startswith('/api/'), 'Must be an API route' method_list = sorted(list(route.methods - set(['HEAD', 'OPTIONS']))) remote_api_method = method_list[0].upper() assert api_route is not None, 'Route could not be found' args = (remote_ibeis_url, remote_ibeis_port, api_route) remote_api_url = 'http://%s:%s%s' % args headers = { 'Authorization': get_url_authorization(remote_api_url) } for key in kwargs.keys(): value = kwargs[key] if isinstance(value, (tuple, list, set)): value = str(list(value)) kwargs[key] = value print('[REMOTE] %s' % ('-' * 80, )) print('[REMOTE] Calling remote IBEIS API: %r' % (remote_api_url, )) print('[REMOTE] \tMethod: %r' % (remote_api_method, )) print('[REMOTE] \tHeaders: %s' % (ut.dict_str(headers), )) print('[REMOTE] \tKWArgs: %s' % (ut.dict_str(kwargs), )) # Make request to server try: if remote_api_method == 'GET': req = requests.get(remote_api_url, headers=headers, data=kwargs, verify=False) elif remote_api_method == 'POST': req = requests.post(remote_api_url, headers=headers, data=kwargs, verify=False) elif remote_api_method == 'PUT': req = requests.put(remote_api_url, headers=headers, data=kwargs, verify=False) elif remote_api_method == 'DELETE': req = requests.delete(remote_api_url, headers=headers, data=kwargs, verify=False) else: message = '_api_result got unsupported method=%r' % (remote_api_method, ) raise KeyError(message) except requests.exceptions.ConnectionError as ex: message = '_api_result could not connect to server %s' % (ex, ) raise IOError(message) response = req.text converted = ut.from_json(value) response = converted.get('response', None) print('response = %s' % (response,)) return response
def dictinfo(dict_): if not isinstance(dict_, dict): return 'expected dict got %r' % type(dict_) keys = list(dict_.keys()) vals = list(dict_.values()) num_keys = len(keys) key_types = list(set(map(type, keys))) val_types = list(set(map(type, vals))) fmtstr_ = '\n' + ut.unindent(''' * num_keys = {num_keys} * key_types = {key_types} * val_types = {val_types} '''.strip('\n')) if len(val_types) == 1: if val_types[0] == np.ndarray: # each key holds an ndarray val_shape_stats = ut.get_stats(set(map(np.shape, vals)), axis=0) val_shape_stats_str = ut.dict_str(val_shape_stats, strvals=True, newlines=False) val_dtypes = list(set([val.dtype for val in vals])) fmtstr_ += ut.unindent(''' * val_shape_stats = {val_shape_stats_str} * val_dtypes = {val_dtypes} '''.strip('\n')) elif val_types[0] == list: # each key holds a list val_len_stats = ut.get_stats(set(map(len, vals))) val_len_stats_str = ut.dict_str(val_len_stats, strvals=True, newlines=False) depth = ut.list_depth(vals) deep_val_types = list(set(ut.list_deep_types(vals))) fmtstr_ += ut.unindent(''' * list_depth = {depth} * val_len_stats = {val_len_stats_str} * deep_types = {deep_val_types} '''.strip('\n')) if len(deep_val_types) == 1: if deep_val_types[0] == np.ndarray: deep_val_dtypes = list(set([val.dtype for val in vals])) fmtstr_ += ut.unindent(''' * deep_val_dtypes = {deep_val_dtypes} ''').strip('\n') elif val_types[0] in [ np.uint8, np.int8, np.int32, np.int64, np.float16, np.float32, np.float64 ]: # each key holds a scalar val_stats = ut.get_stats(vals) fmtstr_ += ut.unindent(''' * val_stats = {val_stats} ''').strip('\n') fmtstr = fmtstr_.format(**locals()) return ut.indent(fmtstr)
def dictinfo(dict_): if not isinstance(dict_, dict): return 'expected dict got %r' % type(dict_) keys = list(dict_.keys()) vals = list(dict_.values()) num_keys = len(keys) key_types = list(set(map(type, keys))) val_types = list(set(map(type, vals))) fmtstr_ = '\n' + ut.unindent(''' * num_keys = {num_keys} * key_types = {key_types} * val_types = {val_types} '''.strip('\n')) if len(val_types) == 1: if val_types[0] == np.ndarray: # each key holds an ndarray val_shape_stats = ut.get_stats(set(map(np.shape, vals)), axis=0) val_shape_stats_str = ut.dict_str(val_shape_stats, strvals=True, newlines=False) val_dtypes = list(set([val.dtype for val in vals])) fmtstr_ += ut.unindent(''' * val_shape_stats = {val_shape_stats_str} * val_dtypes = {val_dtypes} '''.strip('\n')) elif val_types[0] == list: # each key holds a list val_len_stats = ut.get_stats(set(map(len, vals))) val_len_stats_str = ut.dict_str(val_len_stats, strvals=True, newlines=False) depth = ut.list_depth(vals) deep_val_types = list(set(ut.list_deep_types(vals))) fmtstr_ += ut.unindent(''' * list_depth = {depth} * val_len_stats = {val_len_stats_str} * deep_types = {deep_val_types} '''.strip('\n')) if len(deep_val_types) == 1: if deep_val_types[0] == np.ndarray: deep_val_dtypes = list(set([val.dtype for val in vals])) fmtstr_ += ut.unindent(''' * deep_val_dtypes = {deep_val_dtypes} ''').strip('\n') elif val_types[0] in [np.uint8, np.int8, np.int32, np.int64, np.float16, np.float32, np.float64]: # each key holds a scalar val_stats = ut.get_stats(vals) fmtstr_ += ut.unindent(''' * val_stats = {val_stats} ''').strip('\n') fmtstr = fmtstr_.format(**locals()) return ut.indent(fmtstr)
def update_query_cfg(query_cfg, **cfgdict): # Each config paramater should be unique # So updating them all should not cause conflicts # FIXME: Should be able to infer all the children that need updates # # apply codename before updating subconfigs query_cfg.apply_codename(cfgdict.get('codename', None)) # update subconfigs query_cfg.nn_cfg.update(**cfgdict) query_cfg.nnweight_cfg.update(**cfgdict) query_cfg.sv_cfg.update(**cfgdict) query_cfg.agg_cfg.update(**cfgdict) query_cfg.flann_cfg.update(**cfgdict) query_cfg.smk_cfg.update(**cfgdict) query_cfg.smk_cfg.vocabassign_cfg.update(**cfgdict) query_cfg.smk_cfg.vocabtrain_cfg.update(**cfgdict) query_cfg.rrvsone_cfg.update(**cfgdict) query_cfg._featweight_cfg.update(**cfgdict) query_cfg._featweight_cfg._feat_cfg.update(**cfgdict) query_cfg._featweight_cfg._feat_cfg._chip_cfg.update(**cfgdict) query_cfg.update(**cfgdict) # Ensure feasibility of the configuration try: query_cfg.make_feasible() except AssertionError as ex: print(ut.dict_str(cfgdict, sorted_=True)) ut.printex(ex) raise
def load_gztest(ibs): r""" CommandLine: python -m ibeis.algo.hots.special_query --test-load_gztest Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.devcases import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb('GZ_ALL') """ from os.path import join from ibeis.algo.hots import match_chips4 as mc4 dir_ = ut.get_module_dir(mc4) eval_text = ut.read_from(join(dir_, 'GZ_TESTTUP.txt')) testcases = eval(eval_text) count_dict = ut.count_dict_vals(testcases) print(ut.dict_str(count_dict)) testtup_list = ut.flatten(ut.dict_take_list(testcases, ['vsone_wins', 'vsmany_outperformed', 'vsmany_dominates', 'vsmany_wins'])) qaid_list = [testtup.qaid_t for testtup in testtup_list] visual_uuids = ibs.get_annot_visual_uuids(qaid_list) visual_uuids
def vector_normal_stats(vectors): import numpy.linalg as npl norm_list = npl.norm(vectors, axis=1) #norm_list2 = np.sqrt((vectors ** 2).sum(axis=1)) #assert np.all(norm_list == norm_list2) norm_stats = ut.get_stats(norm_list) print('normal_stats:' + ut.dict_str(norm_stats, newlines=False))
def checkinfo_wrapper(*args, **kwargs): suggested_fix = '' funcname = get_funcname(func) packagename = funcname.replace('_version', '') pipname_ = pipname if pipname is not None else packagename try: infodict = func(*args, **kwargs) except ImportError as ex: infodict = module_stdinfo_dict(None, name=pipname_) suggested_fix = 'pip install ' + pipname_ if not sys.platform.startswith('win32'): suggested_fix = 'sudo ' + suggested_fix return False, 'None', target, infodict, ut.formatex(ex), suggested_fix except Exception as ex: infodict = module_stdinfo_dict(None, name=pipname_) return False, 'None', target, infodict, ut.formatex(ex), 'Some unknown error in ' + packagename current_version = infodict['__version__'] # Build status text msg = ut.dict_str(infodict, strvals=True) msg += '\n' + '%s: %r >= (target=%r)?' % (funcname, current_version, target) statustext = ut.msgblock(infodict['__name__'], msg) # Check if passed passed = version_ge_target(current_version, target) # Suggest possible fix if not passed: suggested_fix = 'pip install ' + infodict['__name__'] + ' --upgrade' if not sys.platform.startswith('win32'): suggested_fix = 'sudo ' + suggested_fix return passed, current_version, target, infodict, statustext, suggested_fix
def load_gztest(ibs): r""" CommandLine: python -m ibeis.algo.hots.special_query --test-load_gztest Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.devcases import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb('GZ_ALL') """ from os.path import join from ibeis.algo.hots import match_chips4 as mc4 dir_ = ut.get_module_dir(mc4) eval_text = ut.read_from(join(dir_, 'GZ_TESTTUP.txt')) testcases = eval(eval_text) count_dict = ut.count_dict_vals(testcases) print(ut.dict_str(count_dict)) testtup_list = ut.flatten( ut.dict_take_list(testcases, [ 'vsone_wins', 'vsmany_outperformed', 'vsmany_dominates', 'vsmany_wins' ])) qaid_list = [testtup.qaid_t for testtup in testtup_list] visual_uuids = ibs.get_annot_visual_uuids(qaid_list) visual_uuids
def figure_clicked(self, event=None): from ibeis.viz import viz_helpers as vh ax = event.inaxes if ih.clicked_inside_axis(event): viztype = vh.get_ibsdat(ax, 'viztype') if viztype == 'chip': aid = vh.get_ibsdat(ax, 'aid') print('... aid=%r' % aid) if event.button == 3: # right-click from ibeis.viz.interact import interact_chip import guitool height = self.fig.canvas.geometry().height() qpoint = guitool.newQPoint(event.x, height - event.y) if self.qreq_ is None: config2_ = None else: if aid in self.qreq_.qaids: config2_ = self.qreq_.query_config2_ else: config2_ = self.qreq_.data_config2_ callback_list = interact_chip.build_annot_context_options( self.ibs, aid, refresh_func=self.show_page, config2_=config2_) guitool.popup_menu(self.fig.canvas, qpoint, callback_list) #interact_chip.show_annot_context_menu( # self.ibs, aid, self.fig.canvas, qpoint, refresh_func=self.show_page) #self.show_page() #ibs.print_annotation_table() print(ut.dict_str(event.__dict__))
def cached_wraper(*args, **kwargs): try: if True: print('[utool] computing cached function fname_=%s' % (fname_,)) # Implicitly adds use_cache to kwargs cfgstr = get_cfgstr_from_args(func, args, kwargs, key_argx, key_kwds, kwdefaults, argnames) assert cfgstr is not None, 'cfgstr=%r cannot be None' % (cfgstr,) if kwargs.get('use_cache', use_cache_): # Make cfgstr from specified input data = cacher.tryload(cfgstr) if data is not None: return data # Cached missed compute function data = func(*args, **kwargs) # Cache save cacher.save(data, cfgstr) return data except Exception as ex: import utool _dbgdict2 = dict(key_argx=key_argx, lenargs=len(args), lenkw=len(kwargs),) msg = '\n'.join([ '+--- UTOOL --- ERROR IN CACHED FUNCTION', #'dbgdict = ' + utool.dict_str(_dbgdict), 'dbgdict2 = ' + utool.dict_str(_dbgdict2), ]) utool.printex(ex, msg) raise
def preload_commands(dbdir, **kwargs): """ Preload commands work with command line arguments and global caches """ #print('[main_cmd] preload_commands') if params.args.dump_argv: print(ut.dict_str(vars(params.args), sorted_=False)) if params.args.dump_global_cache: ut.global_cache_dump() # debug command, dumps to stdout if params.args.set_workdir is not None: sysres.set_workdir(params.args.set_workdir) if params.args.get_workdir: print(' Current work dir = %s' % sysres.get_workdir()) if params.args.logdir is not None: sysres.set_logdir(params.args.logdir) if params.args.get_logdir: print(' Current log dir = %s' % (sysres.get_logdir(),)) if params.args.view_logdir: ut.view_directory(sysres.get_logdir()) if ut.get_argflag('--vwd'): vwd() if ut.get_argflag('--vdq'): print('got arg --vdq') vdq(dbdir) if kwargs.get('delete_ibsdir', False): ibsfuncs.delete_ibeis_database(dbdir) if params.args.convert: preload_convert_hsdb(dbdir) if params.args.preload_exit: print('[main_cmd] preload exit') sys.exit(1)
def get_inspect_str(qres): assert_qres(qres) nFeatMatch_list = get_num_feats_in_matches(qres) nFeatMatch_stats = utool.mystats(nFeatMatch_list) top_lbl = utool.unindent(''' top aids scores ranks''').strip() top_aids = qres.get_top_aids(num=5) top_scores = qres.get_aid_scores(top_aids) top_ranks = qres.get_aid_ranks(top_aids) top_stack = np.vstack((top_aids, top_scores, top_ranks)) top_stack = np.array(top_stack, dtype=np.int32) top_str = str(top_stack) inspect_str = '\n'.join([ 'QueryResult', 'qaid=%r ' % qres.qaid, utool.horiz_string(top_lbl, ' ', top_str), 'num Feat Matches stats:', utool.indent(utool.dict_str(nFeatMatch_stats)), ]) inspect_str = utool.indent(inspect_str, '[INSPECT] ') return inspect_str
def _image_view(sel_aids=sel_aids, **_kwargs): try: viz.show_image(ibs, gid, sel_aids=sel_aids, fnum=self.fnum, **_kwargs) df2.set_figtitle('Image View') except TypeError as ex: ut.printex(ex, ut.dict_str(_kwargs)) raise
def find_close_incorrect_match(ibs, qaids): use_cache = False save_qcache = False cfgdict_vsmany = dict( index_method='single', pipeline_root='vsmany', ) qres_vsmany_list, qreq_vsmany_ = ibs.query_chips(qaids, ibs.get_valid_aids(), cfgdict=cfgdict_vsmany, return_request=True, use_cache=use_cache, save_qcache=save_qcache, verbose=True) qres_vsmany = qres_vsmany_list[0] qres_vsmany.ishow_top(ibs) top_aids = qres_vsmany.get_top_aids() top_nids = ibs.get_annot_nids(top_aids) qaid = qaids[0] qnid = ibs.get_annot_nids(qaid) is_groundfalse = [nid != qnid for nid in top_nids] top_gf_aids = ut.compress(top_aids, is_groundfalse) #top_gt_aids = ut.filterfalse_items(top_aids, is_groundfalse) top_gf_vuuids = ibs.get_annot_visual_uuids(top_gf_aids) qvuuid = ibs.get_annot_visual_uuids(qaid) gf_mapping = {qvuuid: top_gf_vuuids[0:1]} print('gf_mapping = ' + ut.dict_str(gf_mapping)) pass
def preload_commands(dbdir, **kwargs): """ Preload commands work with command line arguments and global caches """ #print('[main_cmd] preload_commands') if params.args.dump_argv: print(utool.dict_str(vars(params.args))) if params.args.dump_global_cache: utool.global_cache_dump() # debug command, dumps to stdout if params.args.workdir is not None: sysres.set_workdir(params.args.workdir) if params.args.logdir is not None: sysres.set_logdir(params.args.logdir) if utool.get_flag('--vwd'): vwd() if utool.get_flag('--vdq'): print('got arg --vdq') vdq(dbdir) if kwargs.get('delete_ibsdir', False): ibsfuncs.delete_ibeis_database(dbdir) if params.args.convert: preload_convert_hsdb(dbdir) if params.args.merge_species is not None: ibsfuncs.merge_species_databases(params.args.merge_species) if params.args.preload_exit: print('[main_cmd] preload exit') sys.exit(1)
def general_annot_coverage_mask_generator(make_mask_func, qreq_, cm, config, cov_cfg): """ Yeilds: daid, weight_mask_m, weight_mask CommandLine: python -m ibeis.algo.hots.scoring --test-general_annot_coverage_mask_generator --show python -m ibeis.algo.hots.scoring --test-general_annot_coverage_mask_generator --show --qaid 18 Note: Evaluate output one at a time or it will get clobbered Example0: >>> # SLOW_DOCTEST >>> # (IMPORTANT) >>> from ibeis.algo.hots.scoring import * # NOQA >>> qreq_, cm = plh.testdata_scoring('PZ_MTEST', qaid_list=[18]) >>> config = qreq_.qparams >>> make_mask_func, cov_cfg = get_mask_func(config) >>> masks_iter = general_annot_coverage_mask_generator(make_mask_func, qreq_, cm, config, cov_cfg) >>> daid_list, score_list, masks_list = evaluate_masks_iter(masks_iter) >>> #assert daid_list[idx] == >>> ut.quit_if_noshow() >>> idx = score_list.argmax() >>> daids = [daid_list[idx]] >>> daid, weight_mask_m, weight_mask = masks_list[idx] >>> show_single_coverage_mask(qreq_, cm, weight_mask_m, weight_mask, daids) >>> ut.show_if_requested() """ if ut.VERYVERBOSE: print('[acov] make_mask_func = %r' % (make_mask_func,)) print('[acov] cov_cfg = %s' % (ut.dict_str(cov_cfg),)) return general_coverage_mask_generator(make_mask_func, qreq_, cm.qaid, cm.daid_list, cm.fm_list, cm.fs_list, config, cov_cfg)
def author_hist(): #print(all_authors) hist_ = ut.dict_hist(all_authors, ordered=True) hist_[''] = None del hist_[''] print('Author histogram') print(ut.dict_str(hist_)[-1000:])
def preload_commands(dbdir, **kwargs): """ Preload commands work with command line arguments and global caches """ #print('[main_cmd] preload_commands') if params.args.dump_argv: print(ut.dict_str(vars(params.args), sorted_=False)) if params.args.dump_global_cache: ut.global_cache_dump() # debug command, dumps to stdout if params.args.set_workdir is not None: sysres.set_workdir(params.args.set_workdir) if params.args.get_workdir: print(' Current work dir = %s' % sysres.get_workdir()) if params.args.logdir is not None: sysres.set_logdir(params.args.logdir) if params.args.get_logdir: print(' Current log dir = %s' % (sysres.get_logdir(), )) if params.args.view_logdir: ut.view_directory(sysres.get_logdir()) if ut.get_argflag('--vwd'): vwd() if ut.get_argflag('--vdq'): print('got arg --vdq') vdq(dbdir) if kwargs.get('delete_ibsdir', False): ibsfuncs.delete_ibeis_database(dbdir) if params.args.convert: preload_convert_hsdb(dbdir) if params.args.preload_exit: print('[main_cmd] preload exit') sys.exit(0)
def unused_important(): important_authors = [ 'hinton', 'chum', 'Jegou', 'zisserman', 'schmid', 'sivic', 'matas', 'lowe', 'perronnin', 'douze', ] for key in unused_keys: entry = bibtex_dict[key] author = entry.get('author', '') #authors = author.split(' and ') hasimportant = any(auth in author.lower() for auth in important_authors) if hasimportant or 'smk' in str(entry).lower(): toremove = [ 'note', 'month', 'type', 'pages', 'urldate', 'language', 'volume', 'number', 'publisher' ] entry = ut.delete_dict_keys(entry, toremove) print( ut.dict_str(entry, strvals=True, key_order=['title', 'author', 'id']))
def get_buildtime_data(**kwargs): flann_params = vt.get_flann_params(**kwargs) print('flann_params = %r' % (ut.dict_str(flann_params), )) data_list = [] num = 1000 print('-----') for count in ut.ProgressIter(itertools.count(), nTotal=-1, freq=1, autoadjust=False): num = int(num * 1.2) print('num = %r' % (num, )) #if num > 1E6: # break data = pool.get_testdata(num) print('object size ' + ut.get_object_size_str(data, 'data')) flann = pyflann.FLANN(**flann_params) with ut.Timer(verbose=False) as t: flann.build_index(data) print('t.ellapsed = %r' % (t.ellapsed, )) if t.ellapsed > 5 or count > 1000: break data_list.append((count, num, t.ellapsed)) print('-----') return data_list, flann_params
def testdata_ibeis(**kwargs): """ DEPRICATE Step 1 builds ibs for testing Example: >>> from ibeis.algo.hots.smk.smk_debug import * # NOQA >>> kwargs = {} """ print(' === Test Data IBEIS ===') print('kwargs = ' + ut.dict_str(kwargs)) print('[smk_debug] testdata_ibeis') db = kwargs.get('db', ut.get_argval('--db', str, 'PZ_MTEST')) #with ut.Indenter('ENSURE'): if db == 'PZ_MTEST': ibeis.ensure_pz_mtest() ibs = ibeis.opendb(db=db) ibs._default_config() aggregate = kwargs.get('aggregate', ut.get_argflag(('--agg', '--aggregate'))) nWords = kwargs.get( 'nWords', ut.get_argval(('--nWords', '--nCentroids'), int, default=8E3)) nAssign = kwargs.get( 'nAssign', ut.get_argval(('--nAssign', '--K'), int, default=10)) # Configs ibs.cfg.query_cfg.pipeline_root = 'smk' ibs.cfg.query_cfg.smk_cfg.aggregate = aggregate ibs.cfg.query_cfg.smk_cfg.smk_alpha = 3 ibs.cfg.query_cfg.smk_cfg.smk_thresh = 0 ibs.cfg.query_cfg.smk_cfg.vocabtrain_cfg.nWords = nWords ibs.cfg.query_cfg.smk_cfg.vocabassign_cfg.nAssign = nAssign if ut.VERYVERBOSE: ibs.cfg.query_cfg.smk_cfg.printme3() return ibs
def get_cfgstr(nnindexer, noquery=False): r""" returns string which uniquely identified configuration and support data Args: noquery (bool): if True cfgstr is only relevant to building the index. No search params are returned (default = False) Returns: str: flann_cfgstr CommandLine: python -m ibeis.algo.hots.neighbor_index --test-get_cfgstr Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.neighbor_index import * # NOQA >>> import ibeis >>> cfgdict = dict(fg_on=False) >>> qreq_ = ibeis.testdata_qreq_(defaultdb='testdb1', p='default:fg_on=False') >>> qreq_.load_indexer() >>> nnindexer = qreq_.indexer >>> noquery = True >>> flann_cfgstr = nnindexer.get_cfgstr(noquery) >>> result = ('flann_cfgstr = %s' % (str(flann_cfgstr),)) >>> print(result) flann_cfgstr = _FLANN((algo=kdtree,seed=42,t=8,))_VECS((11260,128)gj5nea@ni0%f3aja) """ flann_cfgstr_list = [] use_params_hash = True use_data_hash = True if use_params_hash: flann_defaults = vt.get_flann_params( nnindexer.flann_params['algorithm']) #flann_params_clean = flann_defaults.copy() flann_params_clean = ut.sort_dict(flann_defaults) ut.updateif_haskey(flann_params_clean, nnindexer.flann_params) if noquery: ut.delete_dict_keys(flann_params_clean, ['checks']) shortnames = dict(algorithm='algo', checks='chks', random_seed='seed', trees='t') short_params = ut.odict([ (shortnames.get(key, key), str(val)[0:7]) for key, val in six.iteritems(flann_params_clean) ]) flann_valsig_ = ut.dict_str(short_params, nl=False, explicit=True, strvals=True) flann_valsig_ = flann_valsig_.lstrip('dict').replace(' ', '') #flann_valsig_ = str(list(flann_params.values())) #flann_valsig = ut.remove_chars(flann_valsig_, ', \'[]') flann_cfgstr_list.append('_FLANN(' + flann_valsig_ + ')') if use_data_hash: vecs_hashstr = ut.hashstr_arr(nnindexer.idx2_vec, '_VECS') flann_cfgstr_list.append(vecs_hashstr) flann_cfgstr = ''.join(flann_cfgstr_list) return flann_cfgstr
def write_default_repo_config(): import utool CONFIG_DICT = utool.get_default_repo_config() config_str = utool.dict_str(CONFIG_DICT, strvals=True, newlines=True, recursive=True) print(config_str)
def wrp_cache_invalidator(self, *args, **kwargs): # the class must have a table_cache property colscache_ = self.table_cache[tblname] colnames_ = list( six.iterkeys(colscache_)) if colnames is None else colnames if DEBUG_API_CACHE: indenter = ut.Indenter('[%s]' % (tblname, )) indenter.start() print('+------') print( 'INVALIDATING tblname=%r, colnames=%r, rowidx=%r, force=%r' % (tblname, colnames, rowidx, force)) print('self = %r' % (self, )) print('args = %r' % (args, )) print('kwargs = %r' % (kwargs, )) print('colscache_ = ' + ut.dict_str(colscache_, truncate=1)) # Clear the cache of any specified colname # when the invalidator is called if rowidx is None: for colname in colnames_: kwargs_cache_ = colscache_[colname] # We dont know the rowsids so clear everything for cache_ in six.itervalues(kwargs_cache_): cache_.clear() else: rowid_list = args[rowidx] for colname in colnames_: kwargs_cache_ = colscache_[colname] # We know the rowids to delete # iterate over all getter kwargs values for cache_ in six.itervalues(kwargs_cache_): ut.delete_dict_keys(cache_, rowid_list) # Preform set/delete action if DEBUG_API_CACHE: print('After:') print('colscache_ = ' + ut.dict_str(colscache_, truncate=1)) print('L__________') writer_result = writer_func(self, *args, **kwargs) if DEBUG_API_CACHE: indenter.stop() return writer_result
def sed_projects(regexpr, repl, force=False, recursive=True, user_profile=None, **kwargs): """ Args: regexpr (?): repl (?): force (bool): (default = False) recursive (bool): (default = True) user_profile (None): (default = None) CommandLine: python -m utool.util_project --exec-sed_projects Example: >>> # DISABLE_DOCTEST >>> from utool.util_project import * # NOQA >>> regexpr = ut.get_argval('--find', type_=str, default=sys.argv[-1]) >>> repl = ut.get_argval('--repl', type_=str, default=sys.argv[-2]) >>> force = False >>> recursive = True >>> user_profile = None >>> result = sed_projects(regexpr, repl, force, recursive, user_profile) >>> print(result) Ignore: regexpr = 'annotation match_scores' repl = 'draw_annot_scoresep' """ # FIXME: finishme import utool as ut user_profile = ensure_user_profile(user_profile) sedkw = {} sedkw['exclude_dirs'] = user_profile.project_exclude_dirs sedkw['dpath_list'] = user_profile.project_dpaths sedkw['include_patterns'] = user_profile.project_include_patterns sedkw.update(kwargs) msg_list1 = [] #msg_list2 = [] print_ = msg_list1.append print_('Seding Projects') print(' * regular expression : %r' % (regexpr,)) print(' * replacement : %r' % (repl,)) print_('sedkw = %s' % ut.dict_str(sedkw, nl=True)) print(' * recursive: %r' % (recursive,)) print(' * force: %r' % (force,)) # Walk through each directory recursively for fpath in ut.matching_fpaths(sedkw['dpath_list'], sedkw['include_patterns'], sedkw['exclude_dirs'], recursive=recursive): ut.sedfile(fpath, regexpr, repl, force)
def get_timestats_str(unixtime_list, newlines=False, full=True, isutc=False): r""" Args: unixtime_list (list): newlines (bool): Returns: str: timestat_str CommandLine: python -m utool.util_time --test-get_timestats_str Example: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> import utool as ut >>> unixtime_list = [0, 0 + 60 * 60 * 5 , 10 + 60 * 60 * 5, 100 + 60 * 60 * 5, 1000 + 60 * 60 * 5] >>> newlines = True >>> full = False >>> timestat_str = get_timestats_str(unixtime_list, newlines, full=full, isutc=True) >>> result = ut.align(str(timestat_str), ':') >>> print(result) { 'max' : '1970/01/01 05:16:40', 'mean' : '1970/01/01 04:03:42', 'min' : '1970/01/01 00:00:00', 'range': '5:16:40', 'std' : '2:02:01', } Example2: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> import utool as ut >>> unixtime_list = [0, 0 + 60 * 60 * 5 , 10 + 60 * 60 * 5, 100 + 60 * 60 * 5, 1000 + 60 * 60 * 5, float('nan'), 0] >>> newlines = True >>> timestat_str = get_timestats_str(unixtime_list, newlines, isutc=True) >>> result = ut.align(str(timestat_str), ':') >>> print(result) { 'max' : '1970/01/01 05:16:40', 'mean' : '1970/01/01 03:23:05', 'min' : '1970/01/01 00:00:00', 'nMax' : 1, 'nMin' : 2, 'num_nan': 1, 'range' : '5:16:40', 'shape' : (7,), 'std' : '2:23:43', } """ import utool as ut datetime_stats = get_timestats_dict(unixtime_list, full=full, isutc=isutc) timestat_str = ut.dict_str(datetime_stats, newlines=newlines) return timestat_str
def get_timestats_str(unixtime_list, newlines=False, full=True): r""" Args: unixtime_list (list): newlines (bool): Returns: str: timestat_str CommandLine: python -m utool.util_time --test-get_timestats_str Example: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> import utool as ut >>> unixtime_list = [0, 0 + 60*60*5 , 10+ 60*60*5, 100+ 60*60*5, 1000+ 60*60*5] >>> newlines = True >>> full = False >>> timestat_str = get_timestats_str(unixtime_list, newlines, full=full) >>> result = ut.align(str(timestat_str), ':') >>> print(result) { 'max' : '1970/01/01 05:16:40', 'mean' : '1970/01/01 04:03:42', 'min' : '1970/01/01 00:00:00', 'range': '5:16:40', 'std' : '2:02:01', } Example2: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> import utool as ut >>> unixtime_list = [0, 0 + 60*60*5 , 10+ 60*60*5, 100+ 60*60*5, 1000+ 60*60*5, float('nan'), 0] >>> newlines = True >>> timestat_str = get_timestats_str(unixtime_list, newlines) >>> result = ut.align(str(timestat_str), ':') >>> print(result) { 'max' : '1970/01/01 05:16:40', 'mean' : '1970/01/01 03:23:05', 'min' : '1970/01/01 00:00:00', 'nMax' : 1, 'nMin' : 2, 'num_nan': 1, 'range' : '5:16:40', 'shape' : (7,), 'std' : '2:23:43', } """ import utool as ut datetime_stats = get_timestats_dict(unixtime_list, full=full) timestat_str = ut.dict_str(datetime_stats, newlines=newlines) return timestat_str
def chipmatch_view(self, fnum=None, pnum=(1, 1, 1), verbose=None, **kwargs_): """ just visualizes the matches using some type of lines """ import plottool as pt from plottool import plot_helpers as ph if fnum is None: fnum = self.fnum if verbose is None: verbose = ut.VERBOSE if verbose: print('-- CHIPMATCH VIEW --') print('[ichipmatch_view] self.mode = %r' % (self.mode,)) mode = kwargs_.get('mode', self.mode) draw_ell = mode >= 1 draw_lines = mode == 2 if verbose: print('[ichipmatch_view] draw_lines = %r' % (draw_lines,)) print('[ichipmatch_view] draw_ell = %r' % (draw_ell,)) #pt.figure(fnum=fnum, docla=True, doclf=True) # NOTE: i remove the clf here. might cause issues pt.figure(fnum=fnum, docla=True, doclf=False) #show_matches_kw = self.__dict__.copy() show_matches_kw = dict( #fnum=fnum, pnum=pnum, draw_lines=draw_lines, draw_ell=draw_ell, colorbar_=True, vert=self.vert) show_matches_kw.update(kwargs_) if verbose: print('self.warp_homog = %r' % (self.warp_homog,)) if self.warp_homog: show_matches_kw['H1'] = self.H1 show_matches_kw['H2'] = self.H2 if verbose: print('show_matches_kw = %s' % (ut.dict_str(show_matches_kw, truncate=True))) #tup = show_matches(fm, fs, **show_matches_kw) ax, xywh1, xywh2 = pt.show_chipmatch2( self.rchip1, self.rchip2, self.kpts1, self.kpts2, fm=self.fm, fs=self.fs, pnum=pnum, **show_matches_kw) self.xywh2 = xywh2 ph.set_plotdat(ax, 'viztype', 'matches') if self.truth is not None and self.truth: truth_color = pt.TRUE_BLUE # if else pt.FALSE_RED pt.draw_border(ax, color=truth_color, lw=4) if self.title is not None: pt.set_title(self.title, ax=ax)
def chipmatch_view(self, fnum=None, pnum=(1, 1, 1), verbose=None, **kwargs_): """ just visualizes the matches using some type of lines """ import plottool as pt from plottool import plot_helpers as ph if fnum is None: fnum = self.fnum if verbose is None: verbose = ut.VERBOSE if verbose: print("-- CHIPMATCH VIEW --") print("[ichipmatch_view] self.mode = %r" % (self.mode,)) mode = kwargs_.get("mode", self.mode) draw_ell = mode >= 1 draw_lines = mode == 2 if verbose: print("[ichipmatch_view] draw_lines = %r" % (draw_lines,)) print("[ichipmatch_view] draw_ell = %r" % (draw_ell,)) # pt.figure(fnum=fnum, docla=True, doclf=True) # NOTE: i remove the clf here. might cause issues pt.figure(fnum=fnum, docla=True, doclf=False) # show_matches_kw = self.__dict__.copy() show_matches_kw = dict( # fnum=fnum, pnum=pnum, draw_lines=draw_lines, draw_ell=draw_ell, colorbar_=True, vert=self.vert, ) show_matches_kw.update(kwargs_) if verbose: print("self.warp_homog = %r" % (self.warp_homog,)) if self.warp_homog: show_matches_kw["H1"] = self.H1 show_matches_kw["H2"] = self.H2 if verbose: print("show_matches_kw = %s" % (ut.dict_str(show_matches_kw, truncate=True))) # tup = show_matches(fm, fs, **show_matches_kw) ax, xywh1, xywh2 = pt.show_chipmatch2( self.rchip1, self.rchip2, self.kpts1, self.kpts2, fm=self.fm, fs=self.fs, pnum=pnum, **show_matches_kw ) self.xywh2 = xywh2 ph.set_plotdat(ax, "viztype", "matches") if self.truth is not None and self.truth: truth_color = pt.TRUE_BLUE # if else pt.FALSE_RED pt.draw_border(ax, color=truth_color, lw=4) if self.title is not None: pt.set_title(self.title, ax=ax)
def wrp_cache_invalidator(self, *args, **kwargs): # the class must have a table_cache property colscache_ = self.table_cache[tblname] colnames_ = list(six.iterkeys(colscache_)) if colnames is None else colnames if DEBUG_API_CACHE: indenter = ut.Indenter("[%s]" % (tblname,)) indenter.start() print("+------") print("INVALIDATING tblname=%r, colnames=%r, rowidx=%r, force=%r" % (tblname, colnames, rowidx, force)) print("self = %r" % (self,)) print("args = %r" % (args,)) print("kwargs = %r" % (kwargs,)) print("colscache_ = " + ut.dict_str(colscache_, truncate=1)) # Clear the cache of any specified colname # when the invalidator is called if rowidx is None: for colname in colnames_: kwargs_cache_ = colscache_[colname] # We dont know the rowsids so clear everything for cache_ in six.itervalues(kwargs_cache_): cache_.clear() else: rowid_list = args[rowidx] for colname in colnames_: kwargs_cache_ = colscache_[colname] # We know the rowids to delete # iterate over all getter kwargs values for cache_ in six.itervalues(kwargs_cache_): ut.delete_dict_keys(cache_, rowid_list) # Preform set/delete action if DEBUG_API_CACHE: print("After:") print("colscache_ = " + ut.dict_str(colscache_, truncate=1)) print("L__________") writer_result = writer_func(self, *args, **kwargs) if DEBUG_API_CACHE: indenter.stop() return writer_result
def build_sphinx_apidoc_cmdstr(): print('') print('if this fails try: sudo pip install sphinx') print('') apidoc = 'sphinx-apidoc' if ut.WIN32: winprefix = 'C:/Python27/Scripts/' sphinx_apidoc_exe = winprefix + apidoc + '.exe' else: sphinx_apidoc_exe = apidoc apidoc_argfmt_list = [ sphinx_apidoc_exe, '--force', '--full', '--maxdepth="{maxdepth}"', '--doc-author="{author}"', '--doc-version="{doc_version}"', '--doc-release="{doc_release}"', '--output-dir="_doc"', #'--separate', # Put documentation for each module on its own page '--private', # Include "_private" modules '{pkgdir}', ] outputdir = '_doc' author = ut.parse_author() packages = ut.find_packages(maxdepth=1) assert len( packages) != 0, 'directory must contain at least one package' if len(packages) > 1: assert len(packages) == 1,\ ('FIXME I dont know what to do with more than one root package: %r' % (packages,)) pkgdir = packages[0] version = ut.parse_package_for_version(pkgdir) modpath = dirname(ut.truepath(pkgdir)) apidoc_fmtdict = { 'author': author, 'maxdepth': '8', 'pkgdir': pkgdir, 'doc_version': version, 'doc_release': version, 'outputdir': outputdir, } ut.assert_exists('setup.py') ut.ensuredir('_doc') apidoc_fmtstr = ' '.join(apidoc_argfmt_list) apidoc_cmdstr = apidoc_fmtstr.format(**apidoc_fmtdict) print('[util_setup] autogenerate sphinx docs for %r' % (pkgdir, )) if ut.VERBOSE: print(ut.dict_str(apidoc_fmtdict)) return apidoc_cmdstr, modpath, outputdir
def build_sphinx_apidoc_cmdstr(): print('') print('if this fails try: sudo pip install sphinx') print('') apidoc = 'sphinx-apidoc' if ut.WIN32: winprefix = 'C:/Python27/Scripts/' sphinx_apidoc_exe = winprefix + apidoc + '.exe' else: sphinx_apidoc_exe = apidoc apidoc_argfmt_list = [ sphinx_apidoc_exe, '--force', '--full', '--maxdepth="{maxdepth}"', '--doc-author="{author}"', '--doc-version="{doc_version}"', '--doc-release="{doc_release}"', '--output-dir="_doc"', #'--separate', # Put documentation for each module on its own page '--private', # Include "_private" modules '{pkgdir}', ] outputdir = '_doc' author = ut.parse_author() packages = ut.find_packages(maxdepth=1) assert len(packages) != 0, 'directory must contain at least one package' if len(packages) > 1: assert len(packages) == 1,\ ('FIXME I dont know what to do with more than one root package: %r' % (packages,)) pkgdir = packages[0] version = ut.parse_package_for_version(pkgdir) modpath = dirname(ut.truepath(pkgdir)) apidoc_fmtdict = { 'author': author, 'maxdepth': '8', 'pkgdir': pkgdir, 'doc_version': version, 'doc_release': version, 'outputdir': outputdir, } ut.assert_exists('setup.py') ut.ensuredir('_doc') apidoc_fmtstr = ' '.join(apidoc_argfmt_list) apidoc_cmdstr = apidoc_fmtstr.format(**apidoc_fmtdict) print('[util_setup] autogenerate sphinx docs for %r' % (pkgdir,)) if ut.VERBOSE: print(ut.dict_str(apidoc_fmtdict)) return apidoc_cmdstr, modpath, outputdir
def get_cfgstr(nnindexer, noquery=False): r""" returns string which uniquely identified configuration and support data Args: noquery (bool): if True cfgstr is only relevant to building the index. No search params are returned (default = False) Returns: str: flann_cfgstr CommandLine: python -m ibeis.algo.hots.neighbor_index --test-get_cfgstr Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.neighbor_index import * # NOQA >>> import ibeis >>> cfgdict = dict(fg_on=False) >>> qreq_ = ibeis.testdata_qreq_(defaultdb='testdb1', p='default:fg_on=False') >>> qreq_.load_indexer() >>> nnindexer = qreq_.indexer >>> noquery = True >>> flann_cfgstr = nnindexer.get_cfgstr(noquery) >>> result = ('flann_cfgstr = %s' % (str(flann_cfgstr),)) >>> print(result) flann_cfgstr = _FLANN((algo=kdtree,seed=42,t=8,))_VECS((11260,128)gj5nea@ni0%f3aja) """ flann_cfgstr_list = [] use_params_hash = True use_data_hash = True if use_params_hash: flann_defaults = vt.get_flann_params(nnindexer.flann_params['algorithm']) #flann_params_clean = flann_defaults.copy() flann_params_clean = ut.sort_dict(flann_defaults) ut.updateif_haskey(flann_params_clean, nnindexer.flann_params) if noquery: ut.delete_dict_keys(flann_params_clean, ['checks']) shortnames = dict(algorithm='algo', checks='chks', random_seed='seed', trees='t') short_params = ut.odict([(shortnames.get(key, key), str(val)[0:7]) for key, val in six.iteritems(flann_params_clean)]) flann_valsig_ = ut.dict_str( short_params, nl=False, explicit=True, strvals=True) flann_valsig_ = flann_valsig_.lstrip('dict').replace(' ', '') #flann_valsig_ = str(list(flann_params.values())) #flann_valsig = ut.remove_chars(flann_valsig_, ', \'[]') flann_cfgstr_list.append('_FLANN(' + flann_valsig_ + ')') if use_data_hash: vecs_hashstr = ut.hashstr_arr(nnindexer.idx2_vec, '_VECS') flann_cfgstr_list.append(vecs_hashstr) flann_cfgstr = ''.join(flann_cfgstr_list) return flann_cfgstr
def hack(ibs): #ibs.get_imageset_text(imgsetid_list) #imgsetid = ibs.get_imageset_imgsetids_from_text("NNP GZC Car '1PURPLE'") def get_name_linked_imagesets_by_imgsetid(ibs, imgsetid): import utool as ut #gid_list = ibs.get_imageset_gids(imgsetid) aid_list_ = ibs.get_imageset_aids(imgsetid) aid_list = ut.filterfalse_items(aid_list_, ibs.is_aid_unknown(aid_list_)) #all(ibs.db.check_rowid_exists(const.ANNOTATION_TABLE, aid_list)) #aids_list2 = ibs.get_image_aids(gid_list) #assert ut.flatten(aids_list2) == aids_list1 nid_list = list( set(ibs.get_annot_nids(aid_list, distinguish_unknowns=False))) # remove unknown annots name_imgsetids = ibs.get_name_imgsetids(nid_list) name_imagesettexts = ibs.get_imageset_text(name_imgsetids) return name_imagesettexts imgsetid_list = ibs.get_valid_imgsetids() linked_imagesettexts = [ get_name_linked_imagesets_by_imgsetid(ibs, imgsetid) for imgsetid in imgsetid_list ] imagesettext_list = ibs.get_imageset_text(imgsetid_list) print(ut.dict_str(dict(zip(imgsetid_list, linked_imagesettexts)))) print( ut.align( ut.dict_str(dict(zip(imagesettext_list, linked_imagesettexts))), ':')) print( ut.align( ut.dict_str(dict(zip(imagesettext_list, imgsetid_list)), sorted_=True), ':'))
def fix_capitalization(match): dict_ = match.groupdict() section_title = dict_['section_title'] #if section_title == 'The Great Zebra Count': # return match.string[slice(*match.span())] # #return 'The Great Zebra Count' # general logic #words = section_title.split(' ') tokens = re.split(ut.regex_or([' ', '/']), section_title) #if 'Coverage' in section_title: # ut.embed() # pass #words = [word if count == 0 else word.lower() for count, word in enumerate(words)] #new_section_title = ' '.join(words) tokens = [ t if count == 0 else t.lower() for count, t in enumerate(tokens) ] new_section_title = ''.join(tokens) # hacks for caps of expanded titles search_repl_list = constants_tex_fixes.CAPITAL_TITLE_LIST for repl in search_repl_list: new_section_title = re.sub(re.escape(repl), repl, new_section_title, flags=re.IGNORECASE) # hacks fo acronyms for full, acro in constants_tex_fixes.ACRONYMN_LIST: new_section_title = re.sub(r'\b' + re.escape(acro) + r'\b', acro, new_section_title, flags=re.IGNORECASE) #'the great zebra and giraffe count' #new_section_title = section_title.lower() new_text = dict_['spaces'] + '\\' + dict_[ 'section_type'] + '{' + new_section_title + '}' VERBOSE = 0 if VERBOSE: old_text = match.string[slice(*match.span())] if new_text != old_text: print(ut.dict_str(dict_)) print('--- REPL ---') print(old_text) print(new_text) return new_text
def filter_duplicate_acfgs(expanded_aids_list, acfg_list, acfg_name_list, verbose=ut.NOT_QUIET): """ Removes configs with the same expanded aids list CommandLine: # The following will trigger this function: ibeis -e print_acfg -a timectrl timectrl:view=left --db PZ_MTEST """ from ibeis.expt import annotation_configs acfg_list_ = [] expanded_aids_list_ = [] seen_ = ut.ddict(list) for acfg, (qaids, daids) in zip(acfg_list, expanded_aids_list): key = (ut.hashstr_arr27(qaids, 'qaids'), ut.hashstr_arr27(daids, 'daids')) if key in seen_: seen_[key].append(acfg) continue else: seen_[key].append(acfg) expanded_aids_list_.append((qaids, daids)) acfg_list_.append(acfg) if verbose: duplicate_configs = dict([(key_, val_) for key_, val_ in seen_.items() if len(val_) > 1]) if len(duplicate_configs) > 0: print( 'The following configs produced duplicate annnotation configs') for key, val in duplicate_configs.items(): # Print the semantic difference between the duplicate configs _tup = annotation_configs.compress_acfg_list_for_printing(val) nonvaried_compressed_dict, varied_compressed_dict_list = _tup print('+--') print('key = %r' % (key, )) print('duplicate_varied_cfgs = %s' % (ut.list_str(varied_compressed_dict_list), )) print('duplicate_nonvaried_cfgs = %s' % (ut.dict_str(nonvaried_compressed_dict), )) print('L__') print('[harn.help] parsed %d / %d unique annot configs from: %r' % (len(acfg_list_), len(acfg_list), acfg_name_list)) return expanded_aids_list_, acfg_list_
def auto_docstr(modname, funcname, verbose=True, moddir=None, **kwargs): r""" called from vim. Uses strings of filename and modnames to build docstr Args: modname (str): name of a python module funcname (str): name of a function in the module Returns: str: docstr CommandLine: python -m utool.util_autogen auto_docstr python -m utool --tf auto_docstr Example: >>> import utool as ut >>> from utool.util_autogen import * # NOQA >>> ut.util_autogen.rrr(verbose=False) >>> #docstr = ut.auto_docstr('ibeis.algo.hots.smk.smk_index', 'compute_negentropy_names') >>> modname = ut.get_argval('--modname', default='utool.util_autogen') >>> funcname = ut.get_argval('--funcname', default='auto_docstr') >>> moddir = ut.get_argval('--moddir', type_=str, default=None) >>> docstr = ut.util_autogen.auto_docstr(modname, funcname) >>> print(docstr) """ #import utool as ut func, module, error_str = load_func_from_module( modname, funcname, verbose=verbose, moddir=moddir) if error_str is None: try: docstr = make_default_docstr(func, **kwargs) except Exception as ex: import utool as ut error_str = ut.formatex(ex, 'Caught Error in parsing docstr', tb=True) #ut.printex(ex) error_str += ( '\n\nReplicateCommand:\n ' 'python -m utool --tf auto_docstr ' '--modname={modname} --funcname={funcname} --moddir={moddir}').format( modname=modname, funcname=funcname, moddir=moddir) error_str += '\n kwargs=' + ut.dict_str(kwargs) return error_str else: docstr = error_str return docstr
def figure_clicked(self, event=None): from ibeis.viz import viz_helpers as vh ax = event.inaxes if ih.clicked_inside_axis(event): viztype = vh.get_ibsdat(ax, 'viztype') if viztype == 'chip': aid = vh.get_ibsdat(ax, 'aid') print('... aid=%r' % aid) if event.button == 3: # right-click from ibeis.viz.interact import interact_chip height = self.fig.canvas.geometry().height() qpoint = guitool.newQPoint(event.x, height - event.y) interact_chip.show_annot_context_menu( self.ibs, aid, self.fig.canvas, qpoint, refresh_func=self.show_page) #self.show_page() #ibs.print_annotation_table() print(ut.dict_str(event.__dict__))
def fix_paper_types(cleaner): """ Ensure journals and conferences have correct entrytypes. """ # Record info about types of conferneces # true_confval = entry[pubkey].replace('{', '').replace('}', '') pubval = cleaner.standard_pubval() type_key = 'ENTRYTYPE' # article = journal # inprocedings = converence paper # FIX ENTRIES THAT SHOULD BE CONFERENCES entry = cleaner.entry if pubval in constants_tex_fixes.CONFERENCE_LIST: if entry[type_key] == 'inproceedings': pass elif entry[type_key] == 'article': entry['booktitle'] = entry['journal'] del entry['journal'] elif entry[type_key] == 'incollection': pass else: raise AssertionError('UNKNOWN TYPE: %r' % (entry[type_key], )) if 'booktitle' not in entry: print('DOES NOT HAVE CORRECT CONFERENCE KEY') print(ub.repr2(entry)) assert 'journal' not in entry, 'should not have journal' entry[type_key] = 'inproceedings' # FIX ENTRIES THAT SHOULD BE JOURNALS if pubval in constants_tex_fixes.JOURNAL_LIST: if entry[type_key] == 'article': pass elif entry[type_key] == 'inproceedings': pass #print(ut.dict_str(entry)) elif entry[type_key] == 'incollection': pass else: raise AssertionError('UNKNOWN TYPE: %r' % (entry['type'], )) if 'journal' not in entry: print('DOES NOT HAVE CORRECT CONFERENCE KEY') print(ut.dict_str(entry)) assert 'booktitle' not in entry, 'should not have booktitle'
def test_decorator_module(): import utool as ut import decorator ut.rrrr() def testdecor(func): @ut.on_exception_report_input @ut.accepts_scalar_input2([0]) @ut.ignores_exc_tb def testwrp(*args, **kwargs): print('was wrapped') return func(*args, **kwargs) return testwrp preserving_testdecor = decorator.decorator(testdecor) def myfunction(self, listinput_, arg1, *args, **kwargs): " just a test function " return [x + 1 for x in listinput_] wrapper = testdecor(myfunction) orig_func = myfunction _wrp_preserve0 = preserving_testdecor(myfunction) _wrp_preserve1 = ut.preserve_sig(wrapper, orig_func, True) _wrp_preserve2 = ut.preserve_sig(wrapper, orig_func, False) print('___') print(ut.get_func_sourcecode(_wrp_preserve0)) print('___') print(ut.get_func_sourcecode(_wrp_preserve1)) print('___') print(ut.get_func_sourcecode(_wrp_preserve2)) print('___') print('---') print(ut.get_docstr(_wrp_preserve0)) print('---') print(ut.get_docstr(_wrp_preserve1)) print('---') print(ut.get_docstr(_wrp_preserve2)) print('---') print(ut.dict_str(_wrp_preserve2._utinfo))
def general_coverage_mask_generator(make_mask_func, qreq_, qaid, id_list, fm_list, fs_list, config, cov_cfg): """ agnostic to whether or not the id/fm/fs lists are name or annotation groups """ if ut.VERYVERBOSE: print('[acov] make_mask_func = %r' % (make_mask_func,)) print('[acov] cov_cfg = %s' % (ut.dict_str(cov_cfg),)) # Distinctivness and foreground weight qweights = get_annot_kpts_baseline_weights(qreq_.ibs, [qaid], config2_=qreq_.get_external_query_config2(), config=config)[0] # Denominator weight mask chipsize = qreq_.ibs.get_annot_chip_sizes(qaid, config2_=qreq_.get_external_query_config2()) qkpts = qreq_.ibs.get_annot_kpts(qaid, config2_=qreq_.get_external_query_config2()) weight_mask = make_mask_func(qkpts, chipsize, qweights, resize=False, **cov_cfg) # Prealloc data for loop weight_mask_m = weight_mask.copy() # Apply weighted scoring to matches for daid, fm, fs in zip(id_list, fm_list, fs_list): # CAREFUL weight_mask_m is overriden on every iteration weight_mask_m = compute_general_matching_coverage_mask( make_mask_func, chipsize, fm, fs, qkpts, qweights, cov_cfg, out=weight_mask_m) yield daid, weight_mask_m, weight_mask
def filter_duplicate_acfgs(expanded_aids_list, acfg_list, acfg_name_list, verbose=ut.NOT_QUIET): """ Removes configs with the same expanded aids list CommandLine: # The following will trigger this function: ibeis -e print_acfg -a timectrl timectrl:view=left --db PZ_MTEST """ from ibeis.expt import annotation_configs acfg_list_ = [] expanded_aids_list_ = [] seen_ = ut.ddict(list) for acfg, (qaids, daids) in zip(acfg_list, expanded_aids_list): key = (ut.hashstr_arr27(qaids, 'qaids'), ut.hashstr_arr27(daids, 'daids')) if key in seen_: seen_[key].append(acfg) continue else: seen_[key].append(acfg) expanded_aids_list_.append((qaids, daids)) acfg_list_.append(acfg) if verbose: duplicate_configs = dict( [(key_, val_) for key_, val_ in seen_.items() if len(val_) > 1]) if len(duplicate_configs) > 0: print('The following configs produced duplicate annnotation configs') for key, val in duplicate_configs.items(): # Print the semantic difference between the duplicate configs _tup = annotation_configs.compress_acfg_list_for_printing(val) nonvaried_compressed_dict, varied_compressed_dict_list = _tup print('+--') print('key = %r' % (key,)) print('duplicate_varied_cfgs = %s' % ( ut.list_str(varied_compressed_dict_list),)) print('duplicate_nonvaried_cfgs = %s' % ( ut.dict_str(nonvaried_compressed_dict),)) print('L__') print('[harn.help] parsed %d / %d unique annot configs from: %r' % ( len(acfg_list_), len(acfg_list), acfg_name_list)) return expanded_aids_list_, acfg_list_
def __debug_win_msvcr(): import utool as ut fname = 'msvcr*.dll' key_list = ['PATH'] found = ut.search_env_paths(fname, key_list) fpaths = ut.unique(ut.flatten(found.values())) fpaths = ut.lmap(ut.ensure_unixslash, fpaths) from os.path import basename dllnames = [basename(x) for x in fpaths] grouped = dict(ut.group_items(fpaths, dllnames)) print(ut.dict_str(grouped, nl=4)) keytoid = {} for key, vals in grouped.items(): infos = ut.lmap(ut.get_file_nBytes, vals) #infos = ut.lmap(ut.get_file_uuid, vals) #uuids = [ut.get_file_uuid(val) for val in vals] keytoid[key] = list(zip(infos, vals)) ut.print_dict(keytoid, nl=2)
def general_name_coverage_mask_generator(make_mask_func, qreq_, cm, config, cov_cfg): """ Yeilds: nid, weight_mask_m, weight_mask CommandLine: python -m ibeis.algo.hots.scoring --test-general_name_coverage_mask_generator --show python -m ibeis.algo.hots.scoring --test-general_name_coverage_mask_generator --show --qaid 18 Note: Evaluate output one at a time or it will get clobbered Example0: >>> # SLOW_DOCTEST >>> # (IMPORTANT) >>> from ibeis.algo.hots.scoring import * # NOQA >>> qreq_, cm = plh.testdata_scoring('PZ_MTEST', qaid_list=[18]) >>> config = qreq_.qparams >>> make_mask_func, cov_cfg = get_mask_func(config) >>> masks_iter = general_name_coverage_mask_generator(make_mask_func, qreq_, cm, config, cov_cfg) >>> dnid_list, score_list, masks_list = evaluate_masks_iter(masks_iter) >>> ut.quit_if_noshow() >>> nidx = np.where(dnid_list == cm.qnid)[0][0] >>> daids = cm.get_groundtruth_daids() >>> dnid, weight_mask_m, weight_mask = masks_list[nidx] >>> show_single_coverage_mask(qreq_, cm, weight_mask_m, weight_mask, daids) >>> ut.show_if_requested() """ if ut.VERYVERBOSE: print('[ncov] make_mask_func = %r' % (make_mask_func, )) print('[ncov] cov_cfg = %s' % (ut.dict_str(cov_cfg), )) assert cm.dnid_list is not None, 'eval nids' unique_dnids, groupxs = vt.group_indices(cm.dnid_list) fm_groups = vt.apply_grouping_(cm.fm_list, groupxs) fs_groups = vt.apply_grouping_(cm.fs_list, groupxs) fs_name_list = [np.hstack(fs_group) for fs_group in fs_groups] fm_name_list = [np.vstack(fm_group) for fm_group in fm_groups] return general_coverage_mask_generator(make_mask_func, qreq_, cm.qaid, unique_dnids, fm_name_list, fs_name_list, config, cov_cfg)
def on_click_inside(self, event, ax): ax = event.inaxes viztype = ph.get_plotdat(ax, 'viztype', '') #if verbose: # print(str(event.__dict__)) print('viztype=%r' % viztype) # Clicked a specific matches print('plodat_dict = ' + ut.dict_str(ph.get_plotdat_dict(ax))) if viztype.startswith('chip'): from ibeis.viz.interact import interact_chip options = interact_chip.build_annot_context_options( self.ibs, self.cm.qaid, refresh_func=self._analysis_view, with_interact_chip=False) self.show_popup_menu(options, event) if viztype.startswith('matches') or viztype == 'multi_match': # why startswith? aid2 = ph.get_plotdat(ax, 'aid2', None) aid_list = ph.get_plotdat(ax, 'aid_list', None) if event.button == 3: # right-click # TODO; this functionality should be in viz.interact from ibeis.gui import inspect_gui print('right click') print('qreq_ = %r' % (self.qreq_,)) options = inspect_gui.get_aidpair_context_menu_options( self.ibs, self.cm.qaid, aid2, self.cm, qreq_=self.qreq_, update_callback=self.show_page, backend_callback=None, aid_list=aid_list) self.show_popup_menu(options, event) else: # Ctrl-Click key = '' if event.key is None else event.key print('key = %r' % key) if key.find('control') == 0: print('[viz] result control clicked') self.show_sver_process_to_aid(aid2) # Left-Click else: print('[viz] result clicked') self.show_matches_to_aid(aid2) self.draw()
def get_buildtime_data(**kwargs): flann_params = vt.get_flann_params(**kwargs) print('flann_params = %r' % (ut.dict_str(flann_params),)) data_list = [] num = 1000 print('-----') for count in ut.ProgressIter(itertools.count(), nTotal=-1, freq=1, autoadjust=False): num = int(num * 1.2) print('num = %r' % (num,)) #if num > 1E6: # break data = pool.get_testdata(num) print('object size ' + ut.get_object_size_str(data, 'data')) flann = pyflann.FLANN(**flann_params) with ut.Timer(verbose=False) as t: flann.build_index(data) print('t.ellapsed = %r' % (t.ellapsed,)) if t.ellapsed > 5 or count > 1000: break data_list.append((count, num, t.ellapsed)) print('-----') return data_list, flann_params
def general_name_coverage_mask_generator(make_mask_func, qreq_, cm, config, cov_cfg): """ Yeilds: nid, weight_mask_m, weight_mask CommandLine: python -m ibeis.algo.hots.scoring --test-general_name_coverage_mask_generator --show python -m ibeis.algo.hots.scoring --test-general_name_coverage_mask_generator --show --qaid 18 Note: Evaluate output one at a time or it will get clobbered Example0: >>> # SLOW_DOCTEST >>> # (IMPORTANT) >>> from ibeis.algo.hots.scoring import * # NOQA >>> qreq_, cm = plh.testdata_scoring('PZ_MTEST', qaid_list=[18]) >>> config = qreq_.qparams >>> make_mask_func, cov_cfg = get_mask_func(config) >>> masks_iter = general_name_coverage_mask_generator(make_mask_func, qreq_, cm, config, cov_cfg) >>> dnid_list, score_list, masks_list = evaluate_masks_iter(masks_iter) >>> ut.quit_if_noshow() >>> nidx = np.where(dnid_list == cm.qnid)[0][0] >>> daids = cm.get_groundtruth_daids() >>> dnid, weight_mask_m, weight_mask = masks_list[nidx] >>> show_single_coverage_mask(qreq_, cm, weight_mask_m, weight_mask, daids) >>> ut.show_if_requested() """ if ut.VERYVERBOSE: print('[ncov] make_mask_func = %r' % (make_mask_func,)) print('[ncov] cov_cfg = %s' % (ut.dict_str(cov_cfg),)) assert cm.dnid_list is not None, 'eval nids' unique_dnids, groupxs = vt.group_indices(cm.dnid_list) fm_groups = vt.apply_grouping_(cm.fm_list, groupxs) fs_groups = vt.apply_grouping_(cm.fs_list, groupxs) fs_name_list = [np.hstack(fs_group) for fs_group in fs_groups] fm_name_list = [np.vstack(fm_group) for fm_group in fm_groups] return general_coverage_mask_generator(make_mask_func, qreq_, cm.qaid, unique_dnids, fm_name_list, fs_name_list, config, cov_cfg)
def find_close_incorrect_match(ibs, qaids): use_cache = False save_qcache = False cfgdict_vsmany = dict(index_method='single', pipeline_root='vsmany',) qres_vsmany_list, qreq_vsmany_ = ibs.query_chips( qaids, ibs.get_valid_aids(), cfgdict=cfgdict_vsmany, return_request=True, use_cache=use_cache, save_qcache=save_qcache, verbose=True) qres_vsmany = qres_vsmany_list[0] qres_vsmany.ishow_top(ibs) top_aids = qres_vsmany.get_top_aids() top_nids = ibs.get_annot_nids(top_aids) qaid = qaids[0] qnid = ibs.get_annot_nids(qaid) is_groundfalse = [nid != qnid for nid in top_nids] top_gf_aids = ut.compress(top_aids, is_groundfalse) #top_gt_aids = ut.filterfalse_items(top_aids, is_groundfalse) top_gf_vuuids = ibs.get_annot_visual_uuids(top_gf_aids) qvuuid = ibs.get_annot_visual_uuids(qaid) gf_mapping = {qvuuid: top_gf_vuuids[0:1]} print('gf_mapping = ' + ut.dict_str(gf_mapping)) pass
def testdata_ibeis(**kwargs): """ DEPRICATE Step 1 builds ibs for testing Example: >>> from ibeis.algo.hots.smk.smk_debug import * # NOQA >>> kwargs = {} """ print(' === Test Data IBEIS ===') print('kwargs = ' + ut.dict_str(kwargs)) print('[smk_debug] testdata_ibeis') db = kwargs.get('db', ut.get_argval('--db', str, 'PZ_MTEST')) #with ut.Indenter('ENSURE'): if db == 'PZ_MTEST': ibeis.ensure_pz_mtest() ibs = ibeis.opendb(db=db) ibs._default_config() aggregate = kwargs.get('aggregate', ut.get_argflag( ('--agg', '--aggregate'))) nWords = kwargs.get( 'nWords', ut.get_argval(('--nWords', '--nCentroids'), int, default=8E3)) nAssign = kwargs.get('nAssign', ut.get_argval(('--nAssign', '--K'), int, default=10)) # Configs ibs.cfg.query_cfg.pipeline_root = 'smk' ibs.cfg.query_cfg.smk_cfg.aggregate = aggregate ibs.cfg.query_cfg.smk_cfg.smk_alpha = 3 ibs.cfg.query_cfg.smk_cfg.smk_thresh = 0 ibs.cfg.query_cfg.smk_cfg.vocabtrain_cfg.nWords = nWords ibs.cfg.query_cfg.smk_cfg.vocabassign_cfg.nAssign = nAssign if ut.VERYVERBOSE: ibs.cfg.query_cfg.smk_cfg.printme3() return ibs
def run_example(ibs): # Print IBEIS Database info print(ibs.get_infostr()) ibs.print_tables() # uncomment if you want to see a lot of text # # # Each table in the database is indexed with a unique id (rowid) # NOTE: This is differnt than a universal unique id (uuid) # rowids are ints and uuids are hex strings. Currently # only annotations and images have uuids # gid_list = ibs.get_valid_gids() # Valid Image IDS aid_list = ibs.get_valid_aids() # Valid ANNOTATION IDs nid_list = ibs.get_valid_nids() # Valid Name IDs imgsetid_list = ibs.get_valid_imgsetids() # Valid ImageSet IDs # # # IBEIS getter methods primarily deal with lists of rowids as input # and return lists of values as output # name_list = ibs.get_name_texts(nid_list) # Animal names aids_in_gids = ibs.get_image_aids(gid_list) # Rois in images aids_in_nids = ibs.get_name_aids(nid_list) # Rois in images img_uuid_list = ibs.get_image_uuids(gid_list) # Image uuids annotation_uuid_list = ibs.get_annot_uuids(aid_list) # Roi uuids # # # IBEIS Getter methods can take scalars as input too, # in this case the output is also a scalar # gid = gid_list[0] gpath = ibs.get_image_paths(gid) # Get an image path # Print locals to the screen print('locals() = ' + utool.dict_str(locals())) return locals()
def test_viz_image(img_fpath): # Read image img = cv2.imread(img_fpath) tau = np.pi * 2 # References: tauday.com # Create figure fig = df2.figure(fnum=42, pnum=(1, 1, 1)) # Clear figure fig.clf() # Build parameters bbox_list = [dummy_bbox(img), dummy_bbox(img, (-.25, -.25), .1)] showkw = { 'title': 'test axis title', # The list of bounding boxes to be drawn on the image 'bbox_list': bbox_list, 'theta_list': [tau * .7, tau * .9], 'sel_list': [True, False], 'label_list': ['test label', 'lbl2'], } # Print the keyword arguments to illustrate their format print('showkw = ' + utool.dict_str(showkw)) # Display the image in figure-num 42, using a 1x1 axis grid in the first # axis. Pass showkw as keyword arguments. viz_image2.show_image(img, fnum=42, pnum=(1, 1, 1), **showkw) df2.set_figtitle('Test figure title')