def demo_model_idependencies(): """ Independences of the 3 annot 3 name model CommandLine: python -m ibeis.algo.hots.demobayes --exec-demo_model_idependencies --mode=1 --num-names=2 --show python -m ibeis.algo.hots.demobayes --exec-demo_model_idependencies --mode=2 Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.demobayes import * # NOQA >>> result = demo_model_idependencies() >>> print(result) >>> ut.show_if_requested() """ num_names = ut.get_argval('--num-names', default=3) model = test_model(num_annots=num_names, num_names=num_names, score_evidence=[], name_evidence=[])[0] # This model has the following independenceis idens = model.get_independencies() iden_strs = [ ', '.join(sorted(iden.event1)) + ' _L ' + ','.join(sorted(iden.event2)) + ' | ' + ', '.join(sorted(iden.event3)) for iden in idens.independencies ] print('general idependencies') print(ut.align(ut.align('\n'.join(sorted(iden_strs)), '_'), '|'))
def demo_model_idependencies(): """ Independences of the 3 annot 3 name model CommandLine: python -m ibeis.algo.hots.demobayes --exec-demo_model_idependencies --mode=1 --num-names=2 --show python -m ibeis.algo.hots.demobayes --exec-demo_model_idependencies --mode=2 Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.demobayes import * # NOQA >>> result = demo_model_idependencies() >>> print(result) >>> ut.show_if_requested() """ num_names = ut.get_argval('--num-names', default=3) model = test_model(num_annots=num_names, num_names=num_names, score_evidence=[], name_evidence=[])[0] # This model has the following independenceis idens = model.get_independencies() iden_strs = [', '.join(sorted(iden.event1)) + ' _L ' + ','.join(sorted(iden.event2)) + ' | ' + ', '.join(sorted(iden.event3)) for iden in idens.independencies] print('general idependencies') print(ut.align(ut.align('\n'.join(sorted(iden_strs)), '_'), '|'))
def hack(ibs): #ibs.get_imageset_text(imgsetid_list) #imgsetid = ibs.get_imageset_imgsetids_from_text("NNP GZC Car '1PURPLE'") def get_name_linked_imagesets_by_imgsetid(ibs, imgsetid): import utool as ut #gid_list = ibs.get_imageset_gids(imgsetid) aid_list_ = ibs.get_imageset_aids(imgsetid) aid_list = ut.filterfalse_items(aid_list_, ibs.is_aid_unknown(aid_list_)) #all(ibs.db.check_rowid_exists(const.ANNOTATION_TABLE, aid_list)) #aids_list2 = ibs.get_image_aids(gid_list) #assert ut.flatten(aids_list2) == aids_list1 nid_list = list(set(ibs.get_annot_nids(aid_list, distinguish_unknowns=False))) # remove unknown annots name_imgsetids = ibs.get_name_imgsetids(nid_list) name_imagesettexts = ibs.get_imageset_text(name_imgsetids) return name_imagesettexts imgsetid_list = ibs.get_valid_imgsetids() linked_imagesettexts = [get_name_linked_imagesets_by_imgsetid(ibs, imgsetid) for imgsetid in imgsetid_list] imagesettext_list = ibs.get_imageset_text(imgsetid_list) print(ut.dict_str(dict(zip(imgsetid_list, linked_imagesettexts)))) print(ut.align(ut.dict_str(dict(zip(imagesettext_list, linked_imagesettexts))), ':')) print(ut.align(ut.dict_str(dict(zip(imagesettext_list, imgsetid_list)), sorted_=True), ':'))
def join_tabular(parts, hline=False, align=True): top, header, mid, bot = parts if hline: toprule = midrule = botrule = '\\hline' else: toprule = '\\toprule' midrule = '\\midrule' botrule = '\\bottomrule' ut.flatten(ut.bzip(['a', 'b', 'c'], ['-'])) top_parts = [top, toprule, header] if mid: # join midblocks given as lists of lines instead of strings midblocks = [] for m in mid: if isinstance(m, str): midblocks.append(m) else: midblocks.append('\n'.join(m)) mid_parts = ut.flatten(ut.bzip([midrule], midblocks)) else: mid_parts = [] # middle_parts = ut.flatten(list(ut.bzip(body_parts, ['\\midrule']))) bot_parts = [botrule, bot] text = '\n'.join(top_parts + mid_parts + bot_parts) if align: text = ut.align(text, '&', pos=None) # text = ut.align(text, r'\\', pos=None) return text
def print_size_info(inva): sizes = inva.get_size_info() sizes = ut.sort_dict(sizes, 'vals', ut.identity) total_nbytes = sum(sizes.values()) logger.info( ut.align(ut.repr3(ut.map_dict_vals(ut.byte_str2, sizes), strvals=True), ':') ) logger.info('total_nbytes = %r' % (ut.byte_str2(total_nbytes),))
def sympy_latex_repr(expr1): expr1_repr = sympy.latex(expr1) expr1_repr = expr1_repr.replace("\\\\", "\\\\\n") expr1_repr = expr1_repr.replace(r"\left[\begin{smallmatrix}{}", "\\MAT{\n") expr1_repr = expr1_repr.replace(r"\end{smallmatrix}\right]", "\n}") expr1_repr = expr1_repr.replace(r"\left[\begin{matrix}", "\\BIGMAT{\n") expr1_repr = expr1_repr.replace(r"\end{matrix}\right]", "\n}") expr1_repr = expr1_repr.replace(r"\left (", "(") expr1_repr = expr1_repr.replace(r"\right )", ")") expr1_repr = expr1_repr.replace(r"\left(", "(") expr1_repr = expr1_repr.replace(r"\right)", ")") # hack of align expr1_repr = ut.align(expr1_repr, "&", pos=None) return expr1_repr
def sympy_latex_repr(expr1): expr1_repr = sympy.latex(expr1) expr1_repr = expr1_repr.replace('\\\\', '\\\\\n') expr1_repr = expr1_repr.replace(r'\left[\begin{smallmatrix}{}', '\\MAT{\n') expr1_repr = expr1_repr.replace(r'\end{smallmatrix}\right]', '\n}') expr1_repr = expr1_repr.replace(r'\left[\begin{matrix}', '\\BIGMAT{\n') expr1_repr = expr1_repr.replace(r'\end{matrix}\right]', '\n}') expr1_repr = expr1_repr.replace(r'\left (', '(') expr1_repr = expr1_repr.replace(r'\right )', ')') expr1_repr = expr1_repr.replace(r'\left(', '(') expr1_repr = expr1_repr.replace(r'\right)', ')') # hack of align expr1_repr = ut.align(expr1_repr, '&', pos=None) return expr1_repr
def hack(ibs): #ibs.get_imageset_text(imgsetid_list) #imgsetid = ibs.get_imageset_imgsetids_from_text("NNP GZC Car '1PURPLE'") def get_name_linked_imagesets_by_imgsetid(ibs, imgsetid): import utool as ut #gid_list = ibs.get_imageset_gids(imgsetid) aid_list_ = ibs.get_imageset_aids(imgsetid) aid_list = ut.filterfalse_items(aid_list_, ibs.is_aid_unknown(aid_list_)) #all(ibs.db.check_rowid_exists(const.ANNOTATION_TABLE, aid_list)) #aids_list2 = ibs.get_image_aids(gid_list) #assert ut.flatten(aids_list2) == aids_list1 nid_list = list( set(ibs.get_annot_nids(aid_list, distinguish_unknowns=False))) # remove unknown annots name_imgsetids = ibs.get_name_imgsetids(nid_list) name_imagesettexts = ibs.get_imageset_text(name_imgsetids) return name_imagesettexts imgsetid_list = ibs.get_valid_imgsetids() linked_imagesettexts = [ get_name_linked_imagesets_by_imgsetid(ibs, imgsetid) for imgsetid in imgsetid_list ] imagesettext_list = ibs.get_imageset_text(imgsetid_list) print(ut.dict_str(dict(zip(imgsetid_list, linked_imagesettexts)))) print( ut.align( ut.dict_str(dict(zip(imagesettext_list, linked_imagesettexts))), ':')) print( ut.align( ut.dict_str(dict(zip(imagesettext_list, imgsetid_list)), sorted_=True), ':'))
def test_model(num_annots, num_names, score_evidence=[], name_evidence=[], other_evidence={}, noquery=False, verbose=None, **kwargs): if verbose is None: verbose = ut.VERBOSE method = kwargs.pop('method', None) model = make_name_model(num_annots, num_names, verbose=verbose, **kwargs) if verbose: model.print_priors(ignore_ttypes=['match', 'score']) model, evidence, soft_evidence = update_model_evidence( model, name_evidence, score_evidence, other_evidence) if verbose and len(soft_evidence) != 0: model.print_priors(ignore_ttypes=['match', 'score'], title='Soft Evidence', color='green') #if verbose: # ut.colorprint('\n --- Soft Evidence ---', 'white') # for ttype, cpds in model.ttype2_cpds.items(): # if ttype != 'match': # for fs_ in ut.ichunks(cpds, 4): # ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]), # 'green') if verbose: ut.colorprint('\n --- Inference ---', 'red') if (len(evidence) > 0 or len(soft_evidence) > 0) and not noquery: evidence = model._ensure_internal_evidence(evidence) query_vars = [] query_vars += ut.list_getattr(model.ttype2_cpds['name'], 'variable') #query_vars += ut.list_getattr(model.ttype2_cpds['match'], 'variable') query_vars = ut.setdiff(query_vars, evidence.keys()) #query_vars = ut.setdiff(query_vars, soft_evidence.keys()) query_results = cluster_query(model, query_vars, evidence, soft_evidence, method) else: query_results = {} factor_list = query_results['factor_list'] if verbose: if verbose: print('+--------') semtypes = [model.var2_cpd[f.variables[0]].ttype for f in factor_list] for type_, factors in ut.group_items(factor_list, semtypes).items(): print('Result Factors (%r)' % (type_,)) factors = ut.sortedby(factors, [f.variables[0] for f in factors]) for fs_ in ut.ichunks(factors, 4): ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]), 'yellow') print('MAP assignments') top_assignments = query_results.get('top_assignments', []) tmp = [] for lbl, val in top_assignments: tmp.append('%s : %.4f' % (ut.repr2(lbl), val)) print(ut.align('\n'.join(tmp), ' :')) print('L_____\n') showkw = dict(evidence=evidence, soft_evidence=soft_evidence, **query_results) pgm_viz.show_model(model, **showkw) return (model, evidence, query_results)
def temp_model(num_annots, num_names, score_evidence=[], name_evidence=[], other_evidence={}, noquery=False, verbose=None, **kwargs): if verbose is None: verbose = ut.VERBOSE method = kwargs.pop('method', None) model = make_name_model(num_annots, num_names, verbose=verbose, **kwargs) if verbose: model.print_priors(ignore_ttypes=[MATCH_TTYPE, SCORE_TTYPE]) model, evidence, soft_evidence = update_model_evidence( model, name_evidence, score_evidence, other_evidence) if verbose and len(soft_evidence) != 0: model.print_priors(ignore_ttypes=[MATCH_TTYPE, SCORE_TTYPE], title='Soft Evidence', color='green') # if verbose: # ut.colorprint('\n --- Soft Evidence ---', 'white') # for ttype, cpds in model.ttype2_cpds.items(): # if ttype != MATCH_TTYPE: # for fs_ in ut.ichunks(cpds, 4): # ut.colorprint(ut.hz_str([f._cpdstr('psql') for f in fs_]), # 'green') if verbose: ut.colorprint('\n --- Inference ---', 'red') if (len(evidence) > 0 or len(soft_evidence) > 0) and not noquery: evidence = model._ensure_internal_evidence(evidence) query_vars = [] query_vars += ut.list_getattr(model.ttype2_cpds[NAME_TTYPE], 'variable') # query_vars += ut.list_getattr(model.ttype2_cpds[MATCH_TTYPE], 'variable') query_vars = ut.setdiff(query_vars, evidence.keys()) # query_vars = ut.setdiff(query_vars, soft_evidence.keys()) query_results = cluster_query(model, query_vars, evidence, soft_evidence, method) else: query_results = {} factor_list = query_results['factor_list'] if verbose: if verbose: logger.info('+--------') semtypes = [model.var2_cpd[f.variables[0]].ttype for f in factor_list] for type_, factors in ut.group_items(factor_list, semtypes).items(): logger.info('Result Factors (%r)' % (type_, )) factors = ut.sortedby(factors, [f.variables[0] for f in factors]) for fs_ in ut.ichunks(factors, 4): ut.colorprint(ut.hz_str([f._str('phi', 'psql') for f in fs_]), 'yellow') logger.info('MAP assignments') top_assignments = query_results.get('top_assignments', []) tmp = [] for lbl, val in top_assignments: tmp.append('%s : %.4f' % (ut.repr2(lbl), val)) logger.info(ut.align('\n'.join(tmp), ' :')) logger.info('L_____\n') showkw = dict(evidence=evidence, soft_evidence=soft_evidence, **query_results) from wbia.algo.hots import pgm_viz pgm_viz.show_model(model, **showkw) return (model, evidence, query_results)
def myquery(): r""" BUG:: THERE IS A BUG SOMEWHERE: HOW IS THIS POSSIBLE? if everything is weightd ) how di the true positive even get a score while the true negative did not qres_copy.filtkey_list = ['ratio', 'fg', 'homogerr', 'distinctiveness'] CORRECT STATS { 'max' : [0.832, 0.968, 0.604, 0.000], 'min' : [0.376, 0.524, 0.000, 0.000], 'mean' : [0.561, 0.924, 0.217, 0.000], 'std' : [0.114, 0.072, 0.205, 0.000], 'nMin' : [1, 1, 1, 51], 'nMax' : [1, 1, 1, 1], 'shape': (52, 4), } INCORRECT STATS { 'max' : [0.759, 0.963, 0.264, 0.000], 'min' : [0.379, 0.823, 0.000, 0.000], 'mean' : [0.506, 0.915, 0.056, 0.000], 'std' : [0.125, 0.039, 0.078, 0.000], 'nMin' : [1, 1, 1, 24], 'nMax' : [1, 1, 1, 1], 'shape': (26, 4), # score_diff, tp_score, tn_score, p, K, dcvs_clip_max, fg_power, homogerr_power 0.494, 0.494, 0.000, 73.000, 2, 0.500, 0.100, 10.000 see how seperability changes as we very things CommandLine: python -m ibeis.algo.hots.devcases --test-myquery python -m ibeis.algo.hots.devcases --test-myquery --show --index 0 python -m ibeis.algo.hots.devcases --test-myquery --show --index 1 python -m ibeis.algo.hots.devcases --test-myquery --show --index 2 References: http://en.wikipedia.org/wiki/Pareto_distribution <- look into Example: >>> # DISABLE_DOCTEST >>> from ibeis.all_imports import * # NOQA >>> from ibeis.algo.hots.devcases import * # NOQA >>> ut.dev_ipython_copypaster(myquery) if ut.inIPython() else myquery() >>> pt.show_if_requested() """ from ibeis.algo.hots import special_query # NOQA from ibeis.algo.hots import distinctiveness_normalizer # NOQA from ibeis import viz # NOQA import plottool as pt index = ut.get_argval('--index', int, 0) ibs, aid1, aid2, tn_aid = testdata_my_exmaples(index) qaids = [aid1] daids = [aid2] + [tn_aid] qvuuid = ibs.get_annot_visual_uuids(aid1) cfgdict_vsone = dict( sv_on=True, #sv_on=False, #codename='vsone_unnorm_dist_ratio_extern_distinctiveness', codename='vsone_unnorm_ratio_extern_distinctiveness', sver_output_weighting=True, ) use_cache = False save_qcache = False qres_list, qreq_ = ibs.query_chips(qaids, daids, cfgdict=cfgdict_vsone, return_request=True, use_cache=use_cache, save_qcache=save_qcache, verbose=True) qreq_.load_distinctiveness_normalizer() qres = qres_list[0] top_aids = qres.get_top_aids() # NOQA qres_orig = qres # NOQA def test_config(qreq_, qres_orig, cfgdict): """ function to grid search over """ qres_copy = copy.deepcopy(qres_orig) qreq_vsone_ = qreq_ qres_vsone = qres_copy filtkey = hstypes.FiltKeys.DISTINCTIVENESS newfsv_list, newscore_aids = special_query.get_extern_distinctiveness( qreq_, qres_copy, **cfgdict) special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_vsone, newfsv_list, newscore_aids, filtkey) tp_score = qres_copy.aid2_score[aid2] tn_score = qres_copy.aid2_score[tn_aid] return qres_copy, tp_score, tn_score #[.01, .1, .2, .5, .6, .7, .8, .9, 1.0]), #FiltKeys = hstypes.FiltKeys # FIXME: Use other way of doing gridsearch grid_basis = distinctiveness_normalizer.DCVS_DEFAULT.get_grid_basis() gridsearch = ut.GridSearch(grid_basis, label='qvuuid=%r' % (qvuuid, )) print('Begin Grid Search') for cfgdict in ut.ProgressIter(gridsearch, lbl='GridSearch'): qres_copy, tp_score, tn_score = test_config(qreq_, qres_orig, cfgdict) gridsearch.append_result(tp_score, tn_score) print('Finish Grid Search') # Get best result best_cfgdict = gridsearch.get_rank_cfgdict() qres_copy, tp_score, tn_score = test_config(qreq_, qres_orig, best_cfgdict) # Examine closely what you can do with scores if False: qres_copy = copy.deepcopy(qres_orig) qreq_vsone_ = qreq_ filtkey = hstypes.FiltKeys.DISTINCTIVENESS newfsv_list, newscore_aids = special_query.get_extern_distinctiveness( qreq_, qres_copy, **cfgdict) ut.embed() def make_cm_very_old_tuple(qres_copy): assert ut.listfind(qres_copy.filtkey_list, filtkey) is None weight_filters = hstypes.WEIGHT_FILTERS weight_filtxs, nonweight_filtxs = special_query.index_partition( qres_copy.filtkey_list, weight_filters) aid2_fsv = {} aid2_fs = {} aid2_score = {} for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids): pass break #scorex_vsone = ut.listfind(qres_copy.filtkey_list, filtkey) #if scorex_vsone is None: # TODO: add spatial verification as a filter score # augment the vsone scores # TODO: paramaterize weighted_ave_score = True if weighted_ave_score: # weighted average scoring new_fs_vsone = special_query.weighted_average_scoring( new_fsv_vsone, weight_filtxs, nonweight_filtxs) else: # product scoring new_fs_vsone = special_query.product_scoring(new_fsv_vsone) new_score_vsone = new_fs_vsone.sum() aid2_fsv[daid] = new_fsv_vsone aid2_fs[daid] = new_fs_vsone aid2_score[daid] = new_score_vsone return aid2_fsv, aid2_fs, aid2_score # Look at plot of query products for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids): new_fs_vsone = special_query.product_scoring(new_fsv_vsone) scores_list = np.array(new_fs_vsone)[:, None].T pt.plot_sorted_scores(scores_list, logscale=False, figtitle=str(daid)) pt.iup() special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_copy, newfsv_list, newscore_aids, filtkey) # PRINT INFO import functools #ut.rrrr() get_stats_str = functools.partial(ut.get_stats_str, axis=0, newlines=True, precision=3) tp_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[aid2]), ':') tn_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[tn_aid]), ':') info_str_list = [] info_str_list.append('qres_copy.filtkey_list = %r' % (qres_copy.filtkey_list, )) info_str_list.append('CORRECT STATS') info_str_list.append(tp_stats_str) info_str_list.append('INCORRECT STATS') info_str_list.append(tn_stats_str) info_str = '\n'.join(info_str_list) print(info_str) # SHOW BEST RESULT #qres_copy.ishow_top(ibs, fnum=pt.next_fnum()) #qres_orig.ishow_top(ibs, fnum=pt.next_fnum()) # Text Informatio param_lbl = 'dcvs_power' param_stats_str = gridsearch.get_dimension_stats_str(param_lbl) print(param_stats_str) csvtext = gridsearch.get_csv_results(10) print(csvtext) # Paramter visuzliation fnum = pt.next_fnum() # plot paramter influence param_label_list = gridsearch.get_param_lbls() pnum_ = pt.get_pnum_func(2, len(param_label_list)) for px, param_label in enumerate(param_label_list): gridsearch.plot_dimension(param_label, fnum=fnum, pnum=pnum_(px)) # plot match figure pnum2_ = pt.get_pnum_func(2, 2) qres_copy.show_matches(ibs, aid2, fnum=fnum, pnum=pnum2_(2)) qres_copy.show_matches(ibs, tn_aid, fnum=fnum, pnum=pnum2_(3)) # Add figure labels figtitle = 'Effect of parameters on vsone separation for a single case' subtitle = 'qvuuid = %r' % (qvuuid) figtitle += '\n' + subtitle pt.set_figtitle(figtitle) # Save Figure #fig_fpath = pt.save_figure(usetitle=True) #print(fig_fpath) # Write CSV Results #csv_fpath = fig_fpath + '.csv.txt' #ut.write_to(csv_fpath, csvtext) #qres_copy.ishow_top(ibs) #from matplotlib import pyplot as plt #plt.show() #print(ut.list_str())) # TODO: plot max variation dims #import plottool as pt #pt.plot(p_list, diff_list) """
def align2(str_): return ut.align(str_, ':', ' :')
def argparse_dict(default_dict_, lbl=None, verbose=None, only_specified=False, force_keys={}, type_hint=None, alias_dict={}): r""" Gets values for a dict based on the command line Args: default_dict_ (?): only_specified (bool): if True only returns keys that are specified on commandline. no defaults. Returns: dict_: dict_ - a dictionary CommandLine: python -m utool.util_arg --test-argparse_dict python -m utool.util_arg --test-argparse_dict --foo=3 python -m utool.util_arg --test-argparse_dict --flag1 python -m utool.util_arg --test-argparse_dict --flag2 python -m utool.util_arg --test-argparse_dict --noflag2 python -m utool.util_arg --test-argparse_dict --thresh=43 python -m utool.util_arg --test-argparse_dict --bins=-10 python -m utool.util_arg --test-argparse_dict --bins=-10 --only-specified --helpx Example: >>> # DISABLE_DOCTEST >>> from utool.util_arg import * # NOQA >>> import utool as ut >>> # build test data >>> default_dict_ = { ... 'bins': 8, ... 'foo': None, ... 'flag1': False, ... 'flag2': True, ... 'max': 0.2, ... 'neg': -5, ... 'thresh': -5.333, ... } >>> # execute function >>> only_specified = ut.get_argflag('--only-specified') >>> dict_ = argparse_dict(default_dict_, only_specified=only_specified) >>> # verify results >>> result = ut.dict_str(dict_, sorted_=True) >>> print(result) """ if verbose is None: verbose = VERBOSE_ARGPARSE def make_argstrs(key, prefix_list): for prefix in prefix_list: yield prefix + key yield prefix + key.replace('-', '_') yield prefix + key.replace('_', '-') def get_dictkey_cmdline_val(key, default, type_hint): # see if the user gave a commandline value for this dict key defaulttype_ = None if default is None else type(default) if type_hint is None: type_ = defaulttype_ elif isinstance(type_hint, dict): type_ = type_hint.get(key, defaulttype_) elif isinstance(type_hint, type): type_ = type_hint else: raise NotImplementedError('Unknown type of type_hint=%r' % (type_hint,)) was_specified = False if isinstance(default, bool): val = default if default is True: falsekeys = list(set(make_argstrs(key, ['--no', '--no-']))) notval, was_specified = get_argflag(falsekeys, return_specified=True) val = not notval if not was_specified: truekeys = list(set(make_argstrs(key, ['--']))) val_, was_specified = get_argflag(truekeys, return_specified=True) if was_specified: val = val_ elif default is False: truekeys = list(set(make_argstrs(key, ['--']))) val, was_specified = get_argflag(truekeys, return_specified=True) else: argtup = list(set(make_argstrs(key, ['--']))) #if key == 'species': # import utool as ut # ut.embed() val, was_specified = get_argval(argtup, type_=type_, default=default, return_specified=True) return val, was_specified dict_ = {} num_specified = 0 for key, default in six.iteritems(default_dict_): val, was_specified = get_dictkey_cmdline_val(key, default, type_hint) if not was_specified: alias_keys = meta_util_iter.ensure_iterable(alias_dict.get(key, [])) for alias_key in alias_keys: val, was_specified = get_dictkey_cmdline_val(alias_key, default, type_hint) if was_specified: break if VERBOSE_ARGPARSE: if was_specified: num_specified += 1 print('[argparse_dict] Specified key=%r, val=%r' % (key, val)) #if key == 'foo': # import utool as ut # ut.embed() if not only_specified or was_specified or key in force_keys: dict_[key] = val if VERBOSE_ARGPARSE: print('[argparse_dict] num_specified = %r' % (num_specified,)) print('[argparse_dict] force_keys = %r' % (force_keys,)) #dict_ = {key: get_dictkey_cmdline_val(key, default) for key, default in #six.iteritems(default_dict_)} if verbose: for key in dict_: if dict_[key] != default_dict_[key]: print('[argparse_dict] GOT ARGUMENT: cfgdict[%r] = %r' % (key, dict_[key])) do_helpx = get_argflag('--helpx', help_='Specifies that argparse_dict should print help and quit') if get_argflag(('--help', '--help2')) or do_helpx: import utool as ut print('COMMAND LINE IS ACCEPTING THESE PARAMS WITH DEFAULTS:') if lbl is not None: print(lbl) #print(ut.align(ut.dict_str(dict_, sorted_=True), ':')) print(ut.align(ut.dict_str(default_dict_, sorted_=True), ':')) if do_helpx: sys.exit(1) return dict_
def argparse_dict(default_dict_, lbl=None, verbose=None, only_specified=False, force_keys={}, type_hint=None, alias_dict={}): r""" Gets values for a dict based on the command line Args: default_dict_ (?): only_specified (bool): if True only returns keys that are specified on commandline. no defaults. Returns: dict_: dict_ - a dictionary CommandLine: python -m utool.util_arg --test-argparse_dict python -m utool.util_arg --test-argparse_dict --foo=3 python -m utool.util_arg --test-argparse_dict --flag1 python -m utool.util_arg --test-argparse_dict --flag2 python -m utool.util_arg --test-argparse_dict --noflag2 python -m utool.util_arg --test-argparse_dict --thresh=43 python -m utool.util_arg --test-argparse_dict --bins=-10 python -m utool.util_arg --test-argparse_dict --bins=-10 --only-specified --helpx Example: >>> # DISABLE_DOCTEST >>> from utool.util_arg import * # NOQA >>> import utool as ut >>> # build test data >>> default_dict_ = { ... 'bins': 8, ... 'foo': None, ... 'flag1': False, ... 'flag2': True, ... 'max': 0.2, ... 'neg': -5, ... 'thresh': -5.333, ... } >>> # execute function >>> only_specified = ut.get_argflag('--only-specified') >>> dict_ = argparse_dict(default_dict_, only_specified=only_specified) >>> # verify results >>> result = ut.dict_str(dict_, sorted_=True) >>> print(result) """ if verbose is None: verbose = VERBOSE_ARGPARSE def make_argstrs(key, prefix_list): for prefix in prefix_list: yield prefix + key yield prefix + key.replace('-', '_') yield prefix + key.replace('_', '-') def get_dictkey_cmdline_val(key, default, type_hint): # see if the user gave a commandline value for this dict key defaulttype_ = None if default is None else type(default) if type_hint is None: type_ = defaulttype_ elif isinstance(type_hint, dict): type_ = type_hint.get(key, defaulttype_) elif isinstance(type_hint, type): type_ = type_hint else: raise NotImplementedError('Unknown type of type_hint=%r' % (type_hint, )) was_specified = False if isinstance(default, bool): val = default if default is True: falsekeys = list(set(make_argstrs(key, ['--no', '--no-']))) notval, was_specified = get_argflag(falsekeys, return_specified=True) val = not notval if not was_specified: truekeys = list(set(make_argstrs(key, ['--']))) val_, was_specified = get_argflag(truekeys, return_specified=True) if was_specified: val = val_ elif default is False: truekeys = list(set(make_argstrs(key, ['--']))) val, was_specified = get_argflag(truekeys, return_specified=True) else: argtup = list(set(make_argstrs(key, ['--']))) #if key == 'species': # import utool as ut # ut.embed() val, was_specified = get_argval(argtup, type_=type_, default=default, return_specified=True) return val, was_specified dict_ = {} num_specified = 0 for key, default in six.iteritems(default_dict_): val, was_specified = get_dictkey_cmdline_val(key, default, type_hint) if not was_specified: alias_keys = meta_util_iter.ensure_iterable(alias_dict.get( key, [])) for alias_key in alias_keys: val, was_specified = get_dictkey_cmdline_val( alias_key, default, type_hint) if was_specified: break if VERBOSE_ARGPARSE: if was_specified: num_specified += 1 print('[argparse_dict] Specified key=%r, val=%r' % (key, val)) #if key == 'foo': # import utool as ut # ut.embed() if not only_specified or was_specified or key in force_keys: dict_[key] = val if VERBOSE_ARGPARSE: print('[argparse_dict] num_specified = %r' % (num_specified, )) print('[argparse_dict] force_keys = %r' % (force_keys, )) #dict_ = {key: get_dictkey_cmdline_val(key, default) for key, default in #six.iteritems(default_dict_)} if verbose: for key in dict_: if dict_[key] != default_dict_[key]: print('[argparse_dict] GOT ARGUMENT: cfgdict[%r] = %r' % (key, dict_[key])) do_helpx = get_argflag( '--helpx', help_='Specifies that argparse_dict should print help and quit') if get_argflag(('--help', '--help2')) or do_helpx: import utool as ut print('COMMAND LINE IS ACCEPTING THESE PARAMS WITH DEFAULTS:') if lbl is not None: print(lbl) #print(ut.align(ut.dict_str(dict_, sorted_=True), ':')) print(ut.align(ut.dict_str(default_dict_, sorted_=True), ':')) if do_helpx: sys.exit(1) return dict_
def myquery(): r""" BUG:: THERE IS A BUG SOMEWHERE: HOW IS THIS POSSIBLE? if everything is weightd ) how di the true positive even get a score while the true negative did not qres_copy.filtkey_list = ['ratio', 'fg', 'homogerr', 'distinctiveness'] CORRECT STATS { 'max' : [0.832, 0.968, 0.604, 0.000], 'min' : [0.376, 0.524, 0.000, 0.000], 'mean' : [0.561, 0.924, 0.217, 0.000], 'std' : [0.114, 0.072, 0.205, 0.000], 'nMin' : [1, 1, 1, 51], 'nMax' : [1, 1, 1, 1], 'shape': (52, 4), } INCORRECT STATS { 'max' : [0.759, 0.963, 0.264, 0.000], 'min' : [0.379, 0.823, 0.000, 0.000], 'mean' : [0.506, 0.915, 0.056, 0.000], 'std' : [0.125, 0.039, 0.078, 0.000], 'nMin' : [1, 1, 1, 24], 'nMax' : [1, 1, 1, 1], 'shape': (26, 4), # score_diff, tp_score, tn_score, p, K, dcvs_clip_max, fg_power, homogerr_power 0.494, 0.494, 0.000, 73.000, 2, 0.500, 0.100, 10.000 see how seperability changes as we very things CommandLine: python -m ibeis.algo.hots.devcases --test-myquery python -m ibeis.algo.hots.devcases --test-myquery --show --index 0 python -m ibeis.algo.hots.devcases --test-myquery --show --index 1 python -m ibeis.algo.hots.devcases --test-myquery --show --index 2 References: http://en.wikipedia.org/wiki/Pareto_distribution <- look into Example: >>> # DISABLE_DOCTEST >>> from ibeis.all_imports import * # NOQA >>> from ibeis.algo.hots.devcases import * # NOQA >>> ut.dev_ipython_copypaster(myquery) if ut.inIPython() else myquery() >>> pt.show_if_requested() """ from ibeis.algo.hots import special_query # NOQA from ibeis.algo.hots import distinctiveness_normalizer # NOQA from ibeis import viz # NOQA import plottool as pt index = ut.get_argval('--index', int, 0) ibs, aid1, aid2, tn_aid = testdata_my_exmaples(index) qaids = [aid1] daids = [aid2] + [tn_aid] qvuuid = ibs.get_annot_visual_uuids(aid1) cfgdict_vsone = dict( sv_on=True, #sv_on=False, #codename='vsone_unnorm_dist_ratio_extern_distinctiveness', codename='vsone_unnorm_ratio_extern_distinctiveness', sver_output_weighting=True, ) use_cache = False save_qcache = False qres_list, qreq_ = ibs.query_chips(qaids, daids, cfgdict=cfgdict_vsone, return_request=True, use_cache=use_cache, save_qcache=save_qcache, verbose=True) qreq_.load_distinctiveness_normalizer() qres = qres_list[0] top_aids = qres.get_top_aids() # NOQA qres_orig = qres # NOQA def test_config(qreq_, qres_orig, cfgdict): """ function to grid search over """ qres_copy = copy.deepcopy(qres_orig) qreq_vsone_ = qreq_ qres_vsone = qres_copy filtkey = hstypes.FiltKeys.DISTINCTIVENESS newfsv_list, newscore_aids = special_query.get_extern_distinctiveness(qreq_, qres_copy, **cfgdict) special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_vsone, newfsv_list, newscore_aids, filtkey) tp_score = qres_copy.aid2_score[aid2] tn_score = qres_copy.aid2_score[tn_aid] return qres_copy, tp_score, tn_score #[.01, .1, .2, .5, .6, .7, .8, .9, 1.0]), #FiltKeys = hstypes.FiltKeys # FIXME: Use other way of doing gridsearch grid_basis = distinctiveness_normalizer.DCVS_DEFAULT.get_grid_basis() gridsearch = ut.GridSearch(grid_basis, label='qvuuid=%r' % (qvuuid,)) print('Begin Grid Search') for cfgdict in ut.ProgressIter(gridsearch, lbl='GridSearch'): qres_copy, tp_score, tn_score = test_config(qreq_, qres_orig, cfgdict) gridsearch.append_result(tp_score, tn_score) print('Finish Grid Search') # Get best result best_cfgdict = gridsearch.get_rank_cfgdict() qres_copy, tp_score, tn_score = test_config(qreq_, qres_orig, best_cfgdict) # Examine closely what you can do with scores if False: qres_copy = copy.deepcopy(qres_orig) qreq_vsone_ = qreq_ filtkey = hstypes.FiltKeys.DISTINCTIVENESS newfsv_list, newscore_aids = special_query.get_extern_distinctiveness(qreq_, qres_copy, **cfgdict) ut.embed() def make_cm_very_old_tuple(qres_copy): assert ut.listfind(qres_copy.filtkey_list, filtkey) is None weight_filters = hstypes.WEIGHT_FILTERS weight_filtxs, nonweight_filtxs = special_query.index_partition(qres_copy.filtkey_list, weight_filters) aid2_fsv = {} aid2_fs = {} aid2_score = {} for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids): pass break #scorex_vsone = ut.listfind(qres_copy.filtkey_list, filtkey) #if scorex_vsone is None: # TODO: add spatial verification as a filter score # augment the vsone scores # TODO: paramaterize weighted_ave_score = True if weighted_ave_score: # weighted average scoring new_fs_vsone = special_query.weighted_average_scoring(new_fsv_vsone, weight_filtxs, nonweight_filtxs) else: # product scoring new_fs_vsone = special_query.product_scoring(new_fsv_vsone) new_score_vsone = new_fs_vsone.sum() aid2_fsv[daid] = new_fsv_vsone aid2_fs[daid] = new_fs_vsone aid2_score[daid] = new_score_vsone return aid2_fsv, aid2_fs, aid2_score # Look at plot of query products for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids): new_fs_vsone = special_query.product_scoring(new_fsv_vsone) scores_list = np.array(new_fs_vsone)[:, None].T pt.plot_sorted_scores(scores_list, logscale=False, figtitle=str(daid)) pt.iup() special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_copy, newfsv_list, newscore_aids, filtkey) # PRINT INFO import functools #ut.rrrr() get_stats_str = functools.partial(ut.get_stats_str, axis=0, newlines=True, precision=3) tp_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[aid2]), ':') tn_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[tn_aid]), ':') info_str_list = [] info_str_list.append('qres_copy.filtkey_list = %r' % (qres_copy.filtkey_list,)) info_str_list.append('CORRECT STATS') info_str_list.append(tp_stats_str) info_str_list.append('INCORRECT STATS') info_str_list.append(tn_stats_str) info_str = '\n'.join(info_str_list) print(info_str) # SHOW BEST RESULT #qres_copy.ishow_top(ibs, fnum=pt.next_fnum()) #qres_orig.ishow_top(ibs, fnum=pt.next_fnum()) # Text Informatio param_lbl = 'dcvs_power' param_stats_str = gridsearch.get_dimension_stats_str(param_lbl) print(param_stats_str) csvtext = gridsearch.get_csv_results(10) print(csvtext) # Paramter visuzliation fnum = pt.next_fnum() # plot paramter influence param_label_list = gridsearch.get_param_lbls() pnum_ = pt.get_pnum_func(2, len(param_label_list)) for px, param_label in enumerate(param_label_list): gridsearch.plot_dimension(param_label, fnum=fnum, pnum=pnum_(px)) # plot match figure pnum2_ = pt.get_pnum_func(2, 2) qres_copy.show_matches(ibs, aid2, fnum=fnum, pnum=pnum2_(2)) qres_copy.show_matches(ibs, tn_aid, fnum=fnum, pnum=pnum2_(3)) # Add figure labels figtitle = 'Effect of parameters on vsone separation for a single case' subtitle = 'qvuuid = %r' % (qvuuid) figtitle += '\n' + subtitle pt.set_figtitle(figtitle) # Save Figure #fig_fpath = pt.save_figure(usetitle=True) #print(fig_fpath) # Write CSV Results #csv_fpath = fig_fpath + '.csv.txt' #ut.write_to(csv_fpath, csvtext) #qres_copy.ishow_top(ibs) #from matplotlib import pyplot as plt #plt.show() #print(ut.list_str())) # TODO: plot max variation dims #import plottool as pt #pt.plot(p_list, diff_list) """