def make_ibeis_cell_list(ibs): cell_template_list = get_default_cell_template_list(ibs) autogen_str = make_autogen_str() dbname = ibs.get_dbname() #if ut.get_argflag('--hacktestscore'): # annotconfig_list_body = ut.codeblock( # ''' # 'timectrl', # ''' # ) #else: default_acfgstr = ut.get_argval('-a', type_=str, default='default:is_known=True') annotconfig_list_body = ut.codeblock( ut.repr2(default_acfgstr) + '\n' + ut.codeblock(''' # See ibeis/expt/annotation_configs.py for names of annot configuration options #'default:has_any=(query,),dpername=1,exclude_reference=True', #'default:is_known=True', #'default:qsame_encounter=True,been_adjusted=True,excluderef=True' #'default:qsame_encounter=True,been_adjusted=True,excluderef=True,qsize=10,dsize=20', #'default:require_timestamp=True,min_timedelta=3600', #'default:species=primary', #'timectrl:', #'timectrl:been_adjusted=True,dpername=3', #'timectrl:qsize=10,dsize=20', #'unctrl:been_adjusted=True', ''') ) #if ut.get_argflag('--hacktestscore'): # pipeline_list_body = ut.codeblock( # ''' # # See ibeis/algo/Config.py for names of pipeline config options # 'default:lnbnn_on=True,bar_l2_on=False,normonly_on=False,fg_on=True', # 'default:lnbnn_on=False,bar_l2_on=True,normonly_on=False,fg_on=True', # 'default:lnbnn_on=False,bar_l2_on=False,normonly_on=True,fg_on=True', # 'default:lnbnn_on=True,bar_l2_on=False,normonly_on=False,fg_on=False', # 'default:lnbnn_on=False,bar_l2_on=True,normonly_on=False,fg_on=False', # 'default:lnbnn_on=False,bar_l2_on=False,normonly_on=True,fg_on=False', # ''' # ) #elif True: default_pcfgstr_list = ut.get_argval(('-t', '-p'), type_=list, default='default') default_pcfgstr = ut.repr3(default_pcfgstr_list, nobr=True) pipeline_list_body = ut.codeblock( default_pcfgstr + '\n' + ut.codeblock(''' #'default', #'default:K=1', #'default:K=1,AI=False', #'default:K=1,AI=False,QRH=True', #'default:K=1,RI=True,AI=False', #'default:K=1,adapteq=True', #'default:fg_on=[True,False]', ''') ) locals_ = locals() _format = partial(format_cells, locals_=locals_) cell_list = ut.flatten(map(_format, cell_template_list)) return cell_list
def testsdata_match_verification(defaultdb='testdb1', aid1=1, aid2=2): r""" CommandLine: main.py --imgsetid 2 main.py --imgsetid 13 --db PZ_MUGU_19 CommandLine: python -m ibeis.viz.interact.interact_name --test-testsdata_match_verification --show python -m ibeis.viz.interact.interact_name --test-testsdata_match_verification --aid1 2 --aid2 3 --show # Merge case python -m ibeis.viz.interact.interact_name --test-testsdata_match_verification --show --db PZ_MTEST --aid1 1 --aid2 30 # Split case python -m ibeis.viz.interact.interact_name --test-testsdata_match_verification --show --db PZ_MTEST --aid1 30 --aid2 32 Example: >>> # ENABLE_DOCTEST >>> from ibeis.viz.interact.interact_name import * # NOQA >>> self = testsdata_match_verification() >>> # verify results >>> ut.quit_if_noshow() >>> self.show_page() >>> ut.show_if_requested() """ #from ibeis.viz.interact.interact_name import * # NOQA import ibeis #ibs = ibeis.opendb(defaultdb='PZ_Master0') ibs = ibeis.opendb(defaultdb=defaultdb) #aid1 = ut.get_argval('--aid1', int, 14) #aid2 = ut.get_argval('--aid2', int, 5545) aid1 = ut.get_argval('--aid1', int, aid1) aid2 = ut.get_argval('--aid2', int, aid2) self = MatchVerificationInteraction(ibs, aid1, aid2, dodraw=False) return self
def testdata_ibeis(**kwargs): """ DEPRICATE Step 1 builds ibs for testing Example: >>> from ibeis.algo.hots.smk.smk_debug import * # NOQA >>> kwargs = {} """ print(' === Test Data IBEIS ===') print('kwargs = ' + ut.dict_str(kwargs)) print('[smk_debug] testdata_ibeis') db = kwargs.get('db', ut.get_argval('--db', str, 'PZ_MTEST')) #with ut.Indenter('ENSURE'): if db == 'PZ_MTEST': ibeis.ensure_pz_mtest() ibs = ibeis.opendb(db=db) ibs._default_config() aggregate = kwargs.get('aggregate', ut.get_argflag(('--agg', '--aggregate'))) nWords = kwargs.get( 'nWords', ut.get_argval(('--nWords', '--nCentroids'), int, default=8E3)) nAssign = kwargs.get( 'nAssign', ut.get_argval(('--nAssign', '--K'), int, default=10)) # Configs ibs.cfg.query_cfg.pipeline_root = 'smk' ibs.cfg.query_cfg.smk_cfg.aggregate = aggregate ibs.cfg.query_cfg.smk_cfg.smk_alpha = 3 ibs.cfg.query_cfg.smk_cfg.smk_thresh = 0 ibs.cfg.query_cfg.smk_cfg.vocabtrain_cfg.nWords = nWords ibs.cfg.query_cfg.smk_cfg.vocabassign_cfg.nAssign = nAssign if ut.VERYVERBOSE: ibs.cfg.query_cfg.smk_cfg.printme3() return ibs
def testdata_expts(defaultdb='testdb1', default_acfgstr_name_list=['default:qindex=0:10:4,dindex=0:20'], default_test_cfg_name_list=['default'], a=None, t=None, qaid_override=None, daid_override=None, initial_aids=None, ): """ Use this if you want data from an experiment. Command line interface to quickly get testdata for test_results. Command line flags can be used to specify db, aidcfg, pipecfg, qaid override, daid override (and maybe initial aids). """ print('[main_helpers] testdata_expts') import ibeis from ibeis.expt import experiment_harness from ibeis.expt import test_result if a is not None: default_acfgstr_name_list = a if t is not None: default_test_cfg_name_list = t if isinstance(default_acfgstr_name_list, six.string_types): default_acfgstr_name_list = [default_acfgstr_name_list] if isinstance(default_test_cfg_name_list, six.string_types): default_test_cfg_name_list = [default_test_cfg_name_list] #from ibeis.expt import experiment_helpers ibs = ibeis.opendb(defaultdb=defaultdb) acfg_name_list = ut.get_argval(('--aidcfg', '--acfg', '-a'), type_=list, default=default_acfgstr_name_list) test_cfg_name_list = ut.get_argval(('-t', '-p'), type_=list, default=default_test_cfg_name_list) daid_override = ut.get_argval(('--daid-override', '--daids-override'), type_=list, default=daid_override) qaid_override = ut.get_argval(('--qaid', '--qaids-override', '--qaid-override'), type_=list, default=qaid_override) # Hack a cache here use_bigtest_cache3 = not ut.get_argflag(('--nocache', '--nocache-hs')) use_bigtest_cache3 &= ut.is_developer() use_bigtest_cache3 &= False if use_bigtest_cache3: from os.path import dirname, join cache_dir = ut.ensuredir(join(dirname(ut.get_module_dir(ibeis)), 'BIG_TESTLIST_CACHE3')) load_testres = ut.cached_func('testreslist', cache_dir=cache_dir)(experiment_harness.run_test_configurations2) else: load_testres = experiment_harness.run_test_configurations2 testres_list = load_testres( ibs, acfg_name_list, test_cfg_name_list, qaid_override=qaid_override, daid_override=daid_override, initial_aids=initial_aids) testres = test_result.combine_testres_list(ibs, testres_list) print(testres) return ibs, testres
def __init__(self): import utool as ut self.modname = None code_dpath = ut.truepath(ut.get_argval('--code-dir', default='~/code')) self.code_dpath = ut.unexpanduser(code_dpath) self.repo_fname = (ut.get_argval(('--repo', '--repo-name'), type_=str)) self.repo_dpath = join(code_dpath, self.repo_fname) self.modname = ut.get_argval('--modname', default=self.repo_fname) self.regenfmt = 'python -m utool SetupRepo.{cmd} --modname={modname} --repo={repo_fname} --codedir={code_dpath}' ut.ensuredir(self.repo_dpath, verbose=True)
def testdata_show_qres(): import ibeis cm, qreq_ = ibeis.testdata_cm() kwargs = dict( top_aids=ut.get_argval('--top-aids', type_=int, default=3), sidebyside=not ut.get_argflag('--no-sidebyside'), annot_mode=ut.get_argval('--annot_mode', type_=int, default=1), viz_name_score=not ut.get_argflag('--no-viz_name_score'), max_nCols=ut.get_argval('--max_nCols', type_=int, default=None) ) return qreq_.ibs, cm, qreq_, kwargs
def train_featscore_normalizer(): r""" CommandLine: python -m ibeis --tf train_featscore_normalizer --show # Write Encoder python -m ibeis --tf train_featscore_normalizer --db PZ_MTEST -t best -a default --fsvx=0 --threshx=1 --show # Visualize encoder score adjustment python -m ibeis --tf TestResult.draw_feat_scoresep --db PZ_MTEST -a timectrl -t best:lnbnn_normer=lnbnn_fg_featscore --show --nocache --nocache-hs # Compare ranking with encoder vs without python -m ibeis --tf draw_rank_cdf --db PZ_MTEST -a timectrl -t best:lnbnn_normer=[None,wulu] --show python -m ibeis --tf draw_rank_cdf --db PZ_MTEST -a default -t best:lnbnn_normer=[None,wulu] --show # Compare in ipynb python -m ibeis --tf autogen_ipynb --ipynb --db PZ_MTEST -a default -t best:lnbnn_normer=[None,lnbnn_fg_0.9__featscore] # Big Test python -m ibeis --tf draw_rank_cdf --db PZ_Master1 -a timectrl -t best:lnbnn_normer=[None,lovb],lnbnn_norm_thresh=.5 --show python -m ibeis --tf draw_rank_cdf --db PZ_Master1 -a timectrl -t best:lnbnn_normer=[None,jypz],lnbnn_norm_thresh=.1 --show python -m ibeis --tf draw_rank_cdf --db PZ_Master1 -a timectrl -t best:lnbnn_normer=[None,jypz],lnbnn_norm_thresh=0 --show # Big Train python -m ibeis --tf learn_featscore_normalizer --db PZ_Master1 -a timectrl -t best:K=1 --fsvx=0 --threshx=1 --show python -m ibeis --tf train_featscore_normalizer --db PZ_Master1 -a timectrl:has_none=photobomb -t best:K=1 --fsvx=0 --threshx=1 --show --ainfo python -m ibeis --tf train_featscore_normalizer --db PZ_Master1 -a timectrl:has_none=photobomb -t best:K=1 --fsvx=0 --threshx=1 --show python -m ibeis --tf train_featscore_normalizer --db PZ_Master1 -a timectrl:has_none=photobomb -t best:K=3 --fsvx=0 --threshx=1 --show Example: >>> # SCRIPT >>> from ibeis.algo.hots.scorenorm import * # NOQA >>> encoder = train_featscore_normalizer() >>> encoder.visualize(figtitle=encoder.get_cfgstr()) >>> ut.show_if_requested() """ import ibeis # TODO: training / loading / general external models qreq_ = ibeis.testdata_qreq_( defaultdb='PZ_MTEST', a=['default'], p=['default']) datakw = dict( disttypes_=None, namemode=ut.get_argval('--namemode', default=True), fsvx=ut.get_argval('--fsvx', type_='fuzzy_subset', default=slice(None, None, None)), threshx=ut.get_argval('--threshx', type_=int, default=None), thresh=ut.get_argval('--thresh', type_=float, default=.9), ) encoder = learn_featscore_normalizer(qreq_, datakw=datakw) encoder.save() return encoder
def resolve_dbdir2(defaultdb=None, allow_newdir=False, db=None, dbdir=None): r""" CommandLine: python -m ibeis.init.sysres --exec-resolve_dbdir2 --db PZ_MTEST python -m ibeis.init.sysres --exec-resolve_dbdir2 --db None python -m ibeis.init.sysres --exec-resolve_dbdir2 --dbdir None Args: defaultdb (None): (default = None) allow_newdir (bool): (default = False) db (None): (default = None) dbdir (None): (default = None) CommandLine: python -m ibeis.init.sysres --exec-resolve_dbdir2 Example: >>> # ENABLE_DOCTEST >>> from ibeis.init.sysres import * # NOQA >>> defaultdb = 'cache' >>> allow_newdir = False >>> dbdir_ = resolve_dbdir2(defaultdb) >>> result = ('dbdir_ = %r' % (dbdir_,)) >>> print(result) """ invalid = ['', ' ', '.', 'None'] if db in invalid: db = None if dbdir in invalid: dbdir = None db, db_specified = ut.get_argval( '--db', type_=str, default=db, return_was_specified=True) dbdir, dbdir_specified = ut.get_argval( '--dbdir', type_=str, default=dbdir, return_was_specified=True) dbdir_flag = dbdir_specified or dbdir is not None db_flag = db_specified or db is not None if dbdir_flag: # Priority 1 dbdir_ = realpath(dbdir) elif db_flag: # Priority 2 dbdir_ = db_to_dbdir(db, allow_newdir=allow_newdir) else: # Priority 3 if defaultdb == 'cache': dbdir_ = get_default_dbdir() else: dbdir_ = db_to_dbdir(defaultdb, allow_newdir=allow_newdir) return dbdir_
def testdata_showname(): import ibeis ibs = ibeis.opendb(defaultdb='testdb1') default = None if ibs.dbname == 'testdb1': default = 'easy' name_text = ut.get_argval('--name', type_=str, default=default) if name_text is None: nid = 1 else: nid = ibs.get_name_rowids_from_text(name_text) in_image = not ut.get_argflag('--no-inimage') index_list = ut.get_argval('--index_list', type_=list, default=None) return ibs, nid, in_image, index_list
def testdata_matching_affine_inliers(): import vtool.tests.dummy as dummy import vtool as vt scale_thresh = 2.0 xy_thresh = ut.get_argval('--xy-thresh', type_=float, default=.01) dlen_sqrd2 = 447271.015 ori_thresh = 1.57 xy_thresh_sqrd = dlen_sqrd2 * xy_thresh featkw = ut.argparse_dict(vt.get_extract_features_default_params()) fname1 = ut.get_argval('--fname1', type_=str, default='easy1.png') fname2 = ut.get_argval('--fname2', type_=str, default='easy2.png') (kpts1, kpts2, fm, fs, rchip1, rchip2) = dummy.testdata_ratio_matches(fname1, fname2, **featkw) aff_inliers, aff_errors, Aff = get_best_affine_inliers_( kpts1, kpts2, fm, fs, xy_thresh_sqrd, scale_thresh, ori_thresh) return kpts1, kpts2, fm, aff_inliers, rchip1, rchip2, xy_thresh_sqrd
def run_devprecmds(): """ Looks for pre-tests specified with the -t flag and runs them """ #input_precmd_list = params.args.tests[:] input_precmd_list = ut.get_argval('-e', type_=list, default=[]) valid_precmd_list = [] def intest(*args, **kwargs): for precmd_name in args: valid_precmd_list.append(precmd_name) ret = precmd_name in input_precmd_list ret2 = precmd_name in params.unknown # Let unparsed args count towards tests if ret or ret2: if ret: input_precmd_list.remove(precmd_name) else: ret = ret2 print('+===================') print('| running precmd = %s' % (args,)) return ret return False ut.start_logging(appname='ibeis') # Implicit (decorated) test functions for (func_aliases, func) in DEVPRECMD_FUNCTIONS: if intest(*func_aliases): #with utool.Indenter('[dev.' + get_funcname(func) + ']'): func() print('Exiting after first precommand') sys.exit(1) if len(input_precmd_list) > 0: raise AssertionError('Unhandled tests: ' + repr(input_precmd_list))
def testdata_pipecfg(p=None, t=None): r""" Returns: dict: pcfgdict CommandLine: python -m ibeis.init.main_helpers --exec-testdata_pipecfg python -m ibeis.init.main_helpers --exec-testdata_pipecfg -t default:AI=False Example: >>> # ENABLE_DOCTEST >>> from ibeis.init.main_helpers import * # NOQA >>> pcfgdict = testdata_pipecfg() >>> result = ('pcfgdict = %s' % (ut.dict_str(pcfgdict),)) >>> print(result) """ print('[main_helpers] testdata_pipecfg') if t is not None and p is None: p = t if p is None: p = ['default'] from ibeis.expt import experiment_helpers test_cfg_name_list = ut.get_argval(('-t', '-p'), type_=list, default=p) pcfgdict_list = experiment_helpers.get_pipecfg_list(test_cfg_name_list)[0] assert len(pcfgdict_list) == 1, 'can only specify one pipeline config here' pcfgdict = pcfgdict_list[0] return pcfgdict
def parse_latex_comments_for_commmands(): r""" CommandLine: python -m ibeis.scripts.gen_cand_expts --exec-parse_latex_comments_for_commmands Example: >>> # SCRIPT >>> from ibeis.scripts.gen_cand_expts import * # NOQA >>> parse_latex_comments_for_commmands() """ fname = ut.get_argval('--fname', type_=str, default='figdefexpt.tex') text = ut.read_from(ut.truepath('~/latex/crall-candidacy-2015/' + fname)) #text = ut.read_from(ut.truepath('~/latex/crall-candidacy-2015/figdefindiv.tex')) lines = text.split('\n') cmd_list = [''] in_comment = True for line in lines: if line.startswith('% ---'): # Keep separators toadd = line.replace('%', '#') if not (len(cmd_list) > 1 and cmd_list[-1].startswith('# ---')): cmd_list[-1] += (toadd) else: cmd_list.append(toadd) cmd_list.append('') if line.strip().startswith(r'\begin{comment}'): in_comment = True continue if in_comment: line = line.strip() if line == '' or line.startswith('#') or line.startswith('%'): in_comment = False else: cmd_list[-1] = cmd_list[-1] + line if not line.strip().endswith('\\'): cmd_list[-1] = cmd_list[-1] + ' $@' #cmd_list.append('') #cmd_list.append('#--') cmd_list.append('') in_comment = False else: cmd_list[-1] = cmd_list[-1] + '\n' cmd_list = [cmd.replace('--render', '').replace('--diskshow', '') for cmd in cmd_list] # formatting cmd_list2 = [] for cmd in cmd_list: #cmd = cmd.replace(' -t ', ' \\\n -t ') #cmd = cmd.replace('--db', '\\\n --db') #cmd = cmd.replace('python -m ibeis.dev', './dev.py') cmd = cmd.replace('python -m ibeis.dev -e', 'ibeis -e') cmd_list2.append(cmd) cmd_list = cmd_list2 print('cmd_list = %s' % (ut.list_str(cmd_list),)) from os.path import splitext script_fname = 'regen_' + splitext(fname)[0] + '.sh' fname, script, line_list = write_script_lines(cmd_list, script_fname)
def get_argv_tail(scriptname, prefer_main=None, argv=None): r""" gets the rest of the arguments after a script has been invoked hack. accounts for python -m scripts. Args: scriptname (str): CommandLine: python -m utool.util_arg --test-get_argv_tail Example: >>> # ENABLE_DOCTEST >>> from utool.util_arg import * # NOQA >>> import utool as ut >>> from os.path import relpath, dirname >>> scriptname = 'utool.util_arg' >>> prefer_main = False >>> argv=['python', '-m', 'utool.util_arg', '--test-get_argv_tail'] >>> tail = get_argv_tail(scriptname, prefer_main, argv) >>> # hack >>> tail[0] = ut.ensure_unixslash(relpath(tail[0], dirname(dirname(ut.__file__)))) >>> result = ut.repr2(tail) >>> print(result) ['utool/util_arg.py', '--test-get_argv_tail'] Example: >>> # ENABLE_DOCTEST >>> from utool.util_arg import * # NOQA >>> import utool as ut >>> from os.path import relpath, dirname >>> scriptname = 'utprof.py' >>> prefer_main = True >>> argv=['utprof.py', '-m', 'utool', '--tf', 'get_argv_tail'] >>> tail = get_argv_tail(scriptname, prefer_main, argv) >>> # hack >>> tail[0] = ut.ensure_unixslash(relpath(tail[0], dirname(dirname(ut.__file__)))) >>> result = ut.repr2(tail) >>> print(result) ['utool/__main__.py', '--tf', 'get_argv_tail'] """ if argv is None: argv = sys.argv import utool as ut modname = ut.get_argval('-m', help_='specify module name to profile', argv=argv) if modname is not None: # hack to account for -m scripts modpath = ut.get_modpath(modname, prefer_main=prefer_main) argvx = argv.index(modname) + 1 argv_tail = [modpath] + argv[argvx:] else: try: argvx = argv.index(scriptname) except ValueError: for argvx, arg in enumerate(argv): # HACK if scriptname in arg: break argv_tail = argv[(argvx + 1):] return argv_tail
def grab_numpy_testdata(shape=(3e3, 128), dtype=np.uint8): ndata = utool.get_argval('--ndata', type_=int, default=2) print('[TEST] build ndata=%d numpy arrays with shape=%r' % (ndata, shape)) print(' * expected_memory(table_list) = %s' % utool.byte_str2(ndata * np.product(shape))) table_list = [np.empty(shape, dtype=dtype) for i in range(ndata)] print(' * memory+overhead(table_list) = %s' % utool.byte_str2(utool.get_object_size(table_list))) return table_list
def main(): r""" python win32bootstrap.py --dl numpy --nocache python win32bootstrap.py --dl numpy-1.9.2rc1 --force python win32bootstrap.py --dl numpy-1.9.2rc1 --run python win32bootstrap.py --force python win32bootstrap.py --dryrun python win32bootstrap.py --dryrun --dl numpy scipy python win32bootstrap.py --dl numpy C:\Users\jon.crall\AppData\Roaming\utool\numpy-1.9.2rc1+mkl-cp27-none-win32.whl pip install C:/Users/jon.crall/AppData/Roaming/utool/numpy-1.9.2rc1+mkl-cp27-none-win32.whl """ # Packages that you are requesting pkg_list = [] if ut.get_argflag('--all'): pkg_list = KNOWN_PKG_LIST else: print('specify --all to download all packages') print('or specify --dl pkgname to download that package') pkg_list.extend(ut.get_argval('--dl', list, [])) dryrun = ut.get_argflag('--dryrun') pkg_exe_list = bootstrap_sysreq(pkg_list, dryrun=dryrun) if ut.get_argflag('--run'): for pkg_exe in pkg_exe_list: if pkg_exe.endswith('.whl'): ut.cmd('pip install ' + pkg_exe)
def demo_model_idependencies(): """ Independences of the 3 annot 3 name model CommandLine: python -m ibeis.algo.hots.demobayes --exec-demo_model_idependencies --mode=1 --num-names=2 --show python -m ibeis.algo.hots.demobayes --exec-demo_model_idependencies --mode=2 Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.demobayes import * # NOQA >>> result = demo_model_idependencies() >>> print(result) >>> ut.show_if_requested() """ num_names = ut.get_argval('--num-names', default=3) model = test_model(num_annots=num_names, num_names=num_names, score_evidence=[], name_evidence=[])[0] # This model has the following independenceis idens = model.get_independencies() iden_strs = [', '.join(sorted(iden.event1)) + ' _L ' + ','.join(sorted(iden.event2)) + ' | ' + ', '.join(sorted(iden.event3)) for iden in idens.independencies] print('general idependencies') print(ut.align(ut.align('\n'.join(sorted(iden_strs)), '_'), '|'))
def incremental_test(ibs, qaid_list, daid_list=None): """ Adds / queries new images one at a time to a clean test database. Tests the complete system. Args: ibs (list) : IBEISController object qaid_list (list) : list of annotation-ids to query CommandLine: python dev.py -t inc --db PZ_MTEST --qaid 1:30:3 --cmd python dev.py --db PZ_MTEST --allgt --cmd python dev.py --db PZ_MTEST --allgt -t inc python dev.py -t inc --db PZ_MTEST --qaid 1:30:3 --cmd python dev.py -t inc --db GZ_ALL --ninit 100 --noqcache python dev.py -t inc --db PZ_MTEST --noqcache --interactive-after 40 python dev.py -t inc --db PZ_Master0 --noqcache --interactive-after 10000 --ninit 400 Example: >>> from ibeis.all_imports import * # NOQA >>> ibs = ibeis.opendb('PZ_MTEST') >>> qaid_list = ibs.get_valid_aids() >>> daid_list = None """ from ibeis.algo.hots import automated_matcher ibs1 = ibs num_initial = ut.get_argval('--ninit', type_=int, default=0) return automated_matcher.incremental_test(ibs1, num_initial)
def draw_tree_model(model, **kwargs): import plottool as pt import networkx as netx if not ut.get_argval('--hackjunc'): fnum = pt.ensure_fnum(None) fig = pt.figure(fnum=fnum, doclf=True) # NOQA ax = pt.gca() #name_nodes = sorted(ut.list_getattr(model.ttype2_cpds['name'], 'variable')) netx_graph = model.to_markov_model() #pos = netx.pygraphviz_layout(netx_graph) #pos = netx.graphviz_layout(netx_graph) #pos = get_hacked_pos(netx_graph, name_nodes, prog='neato') pos = netx.pydot_layout(netx_graph) node_color = [pt.WHITE] * len(pos) drawkw = dict(pos=pos, ax=ax, with_labels=True, node_color=node_color, node_size=1100) netx.draw(netx_graph, **drawkw) if kwargs.get('show_title', True): pt.set_figtitle('Markov Model') if not ut.get_argval('--hackmarkov'): fnum = pt.ensure_fnum(None) fig = pt.figure(fnum=fnum, doclf=True) # NOQA ax = pt.gca() netx_graph = model.to_junction_tree() # prettify nodes def fixtupkeys(dict_): return { ', '.join(k) if isinstance(k, tuple) else k: fixtupkeys(v) for k, v in dict_.items() } n = fixtupkeys(netx_graph.node) e = fixtupkeys(netx_graph.edge) a = fixtupkeys(netx_graph.adj) netx_graph.node = n netx_graph.edge = e netx_graph.adj = a #netx_graph = model.to_markov_model() #pos = netx.pygraphviz_layout(netx_graph) #pos = netx.graphviz_layout(netx_graph) pos = netx.pydot_layout(netx_graph) node_color = [pt.WHITE] * len(pos) drawkw = dict(pos=pos, ax=ax, with_labels=True, node_color=node_color, node_size=2000) netx.draw(netx_graph, **drawkw) if kwargs.get('show_title', True): pt.set_figtitle('Junction/Clique Tree / Cluster Graph')
def show_hist_submaxima(hist_, edges=None, centers=None, maxima_thresh=.8, pnum=(1, 1, 1)): r""" For C++ to show data Args: hist_ (?): edges (None): centers (None): CommandLine: python -m vtool.histogram --test-show_hist_submaxima --show python -m pyhesaff._pyhesaff --test-test_rot_invar --show python -m vtool.histogram --test-show_hist_submaxima --dpath figures --save ~/latex/crall-candidacy-2015/figures/show_hist_submaxima.jpg Example: >>> # DISABLE_DOCTEST >>> import plottool as pt >>> from vtool.histogram import * # NOQA >>> # build test data >>> hist_ = np.array(list(map(float, ut.get_argval('--hist', type_=list, default=[1, 4, 2, 5, 3, 3])))) >>> edges = np.array(list(map(float, ut.get_argval('--edges', type_=list, default=[0, 1, 2, 3, 4, 5, 6])))) >>> maxima_thresh = ut.get_argval('--maxima_thresh', type_=float, default=.8) >>> centers = None >>> # execute function >>> show_hist_submaxima(hist_, edges, centers, maxima_thresh) >>> pt.show_if_requested() """ #print(repr(hist_)) #print(repr(hist_.shape)) #print(repr(edges)) #print(repr(edges.shape)) #ut.embed() import plottool as pt #ut.embed() if centers is None: centers = hist_edges_to_centers(edges) bin_colors = pt.get_orientation_color(centers) pt.figure(fnum=pt.next_fnum(), pnum=pnum) POLAR = False if POLAR: pt.df2.plt.subplot(*pnum, polar=True, axisbg='#000000') pt.draw_hist_subbin_maxima(hist_, centers, bin_colors=bin_colors, maxima_thresh=maxima_thresh) #pt.gca().set_rmax(hist_.max() * 1.1) #pt.gca().invert_yaxis() #pt.gca().invert_xaxis() pt.dark_background() #if ut.get_argflag('--legend'): # pt.figure(fnum=pt.next_fnum()) # centers_ = np.append(centers, centers[0]) # r = np.ones(centers_.shape) * .2 # ax = pt.df2.plt.subplot(111, polar=True) # pt.plots.colorline(centers_, r, cmap=pt.df2.plt.get_cmap('hsv'), linewidth=10) # #ax.plot(centers_, r, 'm', color=bin_colors, linewidth=100) # ax.set_rmax(.2) # #ax.grid(True) # #ax.set_title("Angle Colors", va='bottom') title = ut.get_argval('--title', default='') import plottool as pt pt.set_figtitle(title)
def make_ibeis_cell_list(ibs): cell_template_list = get_default_cell_template_list(ibs) autogen_str = ut.make_autogen_str() dbname = ibs.get_dbname() default_acfgstr = ut.get_argval('-a', type_=str, default='default:is_known=True') asreport = ut.get_argflag('--asreport') default_pcfgstr_list = ut.get_argval(('-t', '-p'), type_=list, default='default') default_pcfgstr = ut.repr3(default_pcfgstr_list, nobr=True) if asreport: annotconfig_list_body = ut.codeblock( ut.repr2(default_acfgstr) ) pipeline_list_body = ut.codeblock( default_pcfgstr ) else: annotconfig_list_body = ut.codeblock( ut.repr2(default_acfgstr) + '\n' + ut.codeblock(''' # See ibeis/expt/annotation_configs.py for names of annot configuration options #'default:has_any=(query,),dpername=1,exclude_reference=True', #'default:is_known=True', #'default:qsame_imageset=True,been_adjusted=True,excluderef=True,qsize=10,dsize=20', #'default:require_timestamp=True,min_timedelta=3600', #'default:species=primary', #'timectrl:', #'unctrl:been_adjusted=True', ''') ) pipeline_list_body = ut.codeblock( default_pcfgstr + '\n' + ut.codeblock(''' #'default', #'default:K=1,AI=False,QRH=True', #'default:K=1,RI=True,AI=False', #'default:K=1,adapteq=True', #'default:fg_on=[True,False]', ''') ) locals_ = locals() _format = partial(ut.format_cells, locals_=locals_) cell_list = ut.flatten(map(_format, cell_template_list)) return cell_list
def testdata_multichips(): import ibeis ibs = ibeis.opendb(defaultdb='testdb1') nid = ut.get_argval('--nid', type_=int, default=None) tags = ut.get_argval('--tags', type_=list, default=None) if nid is not None: aid_list = ibs.get_name_aids(nid) elif tags is not None: index = ut.get_argval('--index', default=0) aid_list = ibs.filter_aidpairs_by_tags(any_tags=tags)[index] else: #aid_list = ut.get_argval('--aids', type_=list, default=[1, 2, 3]) aid_list = ibeis.testdata_aids(default_aids=[1, 2, 3], ibs=ibs) in_image = not ut.get_argflag('--no-inimage') return ibs, aid_list, in_image
def dump_autogen_code(fpath, autogen_text, codetype='python', fullprint=None): """ Helper that write a file if -w is given on command line, otherwise it just prints it out. It has the opption of comparing a diff to the file. """ import utool as ut dowrite = ut.get_argflag(('-w', '--write')) show_diff = ut.get_argflag('--diff') num_context_lines = ut.get_argval('--diff', type_=int, default=None) show_diff = show_diff or num_context_lines is not None num_context_lines = ut.get_argval('--diff', type_=int, default=None) if fullprint is None: fullprint = True if fullprint is False: fullprint = ut.get_argflag('--print') print('[autogen] Autogenerated %s...\n+---\n' % (fpath,)) if not dowrite: if fullprint: ut.print_code(autogen_text, lexer_name=codetype) print('\nL___') else: print('specify --print to write to stdout') pass print('specify -w to write, or --diff to compare') print('...would write to: %s' % fpath) if show_diff: if ut.checkpath(fpath, verbose=True): prev_text = ut.read_from(fpath) textdiff = ut.get_textdiff(prev_text, autogen_text, num_context_lines=num_context_lines) try: ut.print_difftext(textdiff) except UnicodeDecodeError: import unicodedata textdiff = unicodedata.normalize('NFKD', textdiff).encode('ascii', 'ignore') ut.print_difftext(textdiff) if dowrite: print('WARNING: Not writing. Remove --diff from command line') elif dowrite: ut.write_to(fpath, autogen_text)
def testdata_kpts(): import utool as ut import vtool as vt import pyhesaff img_fpath = ut.grab_test_imgpath(ut.get_argval('--fname', default='star.png')) kwargs = ut.parse_dict_from_argv(pyhesaff.get_hesaff_default_params()) (kpts, vecs) = pyhesaff.detect_feats(img_fpath, **kwargs) imgBGR = vt.imread(img_fpath) return kpts, vecs, imgBGR
def parse_img_from_arg(argstr_): fpath = ut.get_argval(argstr_, type_=str, default='None') if fpath is not None and fpath != 'None': img = vt.imread(fpath, grayscale=True) print('Reading %s with stats %s' % (fpath, ut.get_stats_str(img, axis=None))) else: print('Did not read %s' % (fpath)) img = None return img
def change_names(ibs, qaid_list): """ Test to changes names """ #next_name = utool.get_argval('--name', str, default='<name>_the_<species>') next_name = utool.get_argval('--name', str, default='glob') for aid in qaid_list: ibs.print_name_table() #(nid,) = ibs.add_names((next_name,)) ibs.set_annot_names(aid, next_name) ibs.print_name_table() ibs.print_annotation_table()
def rsync_ibsdb_main(): import sys default_user = ut.get_user_name() default_db = 'MUGU_Master' if len(sys.argv) < 2: print('Usage: ' 'python -m ibeis.scripts.rsync_ibeisdb' '[push, pull] --db <db=%s> --user <user=%s>' % (default_db, default_user,)) sys.exit(1) user = ut.get_argval('--user', type_=str, default=default_user) port = ut.get_argval('--port', type_=int, default=22) dbname = ut.get_argval(('--db', '--dbname'), type_=str, default=default_db) workdir = ut.get_argval(('--workdir', '--dbname'), type_=str, default=None, help_='local work dir override') dry_run = ut.get_argflag(('--dryrun', '--dry-run', '--dry')) mode = sys.argv[1] assert mode in ['push', 'pull'], 'mode=%r must be push or pull' % (mode,) remote_key = ut.get_argval('--remote', type_=str, default='hyrule') remote_map = { 'hyrule': 'hyrule.cs.rpi.edu', 'pachy': 'pachy.cs.uic.edu', 'lewa': '41.203.223.178', 'lev': 'lev.cs.rpi.edu', } remote_workdir_map = { 'hyrule': '/raid/work', 'pachy': '/home/shared_ibeis/data/work', 'lewa': '/data/ibeis', 'lev': '/media/hdd/work', } if ':' in remote_key: remote_key_, remote_workdir = remote_key.split(':') else: remote_key_ = remote_key remote_workdir = remote_workdir_map.get(remote_key, '') remote = remote_map.get(remote_key_, remote_key_) remote_uri = user + '@' + remote + ':' + remote_workdir ut.change_term_title('RSYNC IBEISDB %r' % (dbname,)) sync_ibeisdb(remote_uri, dbname, mode, workdir, port, dry_run)
def learn_k(): r""" CommandLine: python -m ibeis.other.optimize_k --test-learn_k python -m ibeis.other.optimize_k --test-learn_k --show python -m ibeis.other.optimize_k --test-learn_k --show --dummy Example: >>> # DISABLE_DOCTEST >>> from ibeis.other.optimize_k import * # NOQA >>> import plottool as pt >>> # build test data >>> # execute function >>> known_nd_data, known_target_points, given_data_dims, opt_model_params = learn_k() >>> # verify results >>> ut.quit_if_noshow() >>> plot_search_surface(known_nd_data, known_target_points, given_data_dims, opt_model_params) >>> pt.all_figures_bring_to_front() >>> pt.show_if_requested() """ # Compute Training Data varydict = { #'K': [4, 7, 10, 13, 16, 19, 22, 25][:4], #'K': [1, 2, 3, 4, 8, 10, 13, 15], 'K': [1, 2, 4, 8, 16], #'nDaids': [20, 100, 250, 500, 750, 1000], } nDaids_basis = [20, 30, 50, 75, 100, 200, 250, 300, 325, 350, 400, 500, 600, 750, 800, 900, 1000, 1500] DUMMY = ut.get_argflag('--dummy') if DUMMY: nDaids_list, K_list, nError_list = test_training_data(varydict, nDaids_basis) nError_list = nError_list.astype(np.float32) / nError_list.max() else: dbname = ut.get_argval('--db', default='PZ_Master0') ibs = ibeis.opendb(dbname) verbose = False qaids, daids_list = collect_ibeis_training_annotations(ibs, nDaids_basis, verbose=verbose) nDaids_list, K_list, nError_list = evaluate_training_data(ibs, qaids, daids_list, varydict, nDaids_basis, verbose=verbose) nError_list = nError_list.astype(np.float32) / len(qaids) print('\nFinished Get Training Data') print('len(qaids) = %r' % (len(qaids))) print(ut.get_stats_str(nError_list)) #unique_nDaids = np.unique(nDaids_list) # Alias to general optimization problem known_nd_data = np.vstack([nDaids_list, K_list]).T known_target_points = nError_list # Mark the data we are given vs what we want to learn given_data_dims = [0] #learn_data_dims = [1] # Minimize K params opt_model_params = minimize_compute_K_params(known_nd_data, known_target_points, given_data_dims) return known_nd_data, known_target_points, given_data_dims, opt_model_params
def TEST_QUERY_COMP(ibs): r""" CommandLine: python -m ibeis.tests.test_ibs_query_components --test-TEST_QUERY_COMP Example: >>> # ENABLE_DOCTEST >>> from ibeis.tests.test_ibs_query_components import * # NOQA >>> import ibeis >>> # build test data >>> ibs = ibeis.opendb('testdb1') >>> # execute function >>> TEST_QUERY_COMP(ibs) """ print('[TEST_QUERY_COMP]') aids = ibs.get_valid_aids() index = 0 index = utool.get_argval('--index', type_=int, default=index) qaid_list = utool.safe_slice(aids, index, index + 1) print('[TEST_QUERY_COMP] len(qaid_list)=%r' % (qaid_list)) try: comp_locals_ = query_helpers.get_query_components(ibs, qaid_list) qres_dict = OrderedDict([ ('ORIG', comp_locals_['qres_ORIG']), ('FILT', comp_locals_['qres_FILT']), ('SVER', comp_locals_['qres_SVER']), ]) top_aids = qres_dict['SVER'].get_top_aids() aid2 = top_aids[0] except Exception as ex: if 'qres_dict' in vars(): for name, qres in qres_dict.items(): print(name) print(qres.get_inspect_str()) utool.printex(ex, keys=['qaid_list'], pad_stdout=True) raise for px, (lbl, qres) in enumerate(six.iteritems(qres_dict)): print(lbl) fnum = df2.next_fnum() df2.figure(fnum=fnum, doclf=True) qres.ishow_top(ibs, fnum=fnum, top_aids=top_aids, ensure=False) df2.set_figtitle(lbl) df2.adjust_subplots_safe(top=.8) fnum = df2.next_fnum() qaid2_svtups = comp_locals_['qaid2_svtups'] qaid2_chipmatch_FILT = comp_locals_['qaid2_chipmatch_FILT'] aid1 = qaid = comp_locals_['qaid'] aid2_svtup = qaid2_svtups[aid1] chipmatch_FILT = qaid2_chipmatch_FILT[aid1] viz.show_sver(ibs, aid1, aid2, chipmatch_FILT, aid2_svtup, fnum=fnum) return locals()
def TEST_INTERACT(ibs): valid_gids = ibs.get_valid_gids() valid_aids = ibs.get_valid_aids() print(''' * len(valid_aids) = %r * len(valid_gids) = %r ''' % (len(valid_aids), len(valid_gids))) assert len(valid_gids) > 0, 'database images cannot be empty for test' gindex = int(utool.get_argval('--gx', default=0)) cindex = int(utool.get_argval('--rx', default=0)) gid = valid_gids[gindex] aid_list = ibs.get_image_aids(gid) aid = aid_list[cindex] #---------------------- #print('Show Image') aids = aid_list[1:3] interact.ishow_image(ibs, gid, aids=aids, fnum=1) #---------------------- #print('Show Chip') interact.ishow_chip(ibs, aid, in_image=False, fnum=2) #interact.ishow_chip(ibs, aid, in_image=True, fnum=3) #---------------------- #print('Show Query') #aid1 = aid #qcid2_qres = ibs._query_chips4([qaid1], valid_aids) #qres = qcid2_qres.values()[0] #top_cids = qres.get_top_cids(ibs) #assert len(top_cids) > 0, 'there does not seem to be results' #cid2 = top_cids[0] # 294 #viz.show_matches(ibs, qres, cid2, fnum=4) #viz.show_qres(ibs, qres, fnum=5) return locals()
def draw_results(ibs, testres): r""" Draws results from an experiment harness run. Rows store different qaids (query annotation ids) Cols store different configurations (algorithm parameters) Args: testres (TestResult): CommandLine: python dev.py -t custom:rrvsone_on=True,constrained_coeff=0 custom --qaid 12 --db PZ_MTEST --show --va python dev.py -t custom:rrvsone_on=True,constrained_coeff=.3 custom --qaid 12 --db PZ_MTEST --show --va --noqcache python dev.py -t custom:rrvsone_on=True custom --qaid 4 --db PZ_MTEST --show --va --noqcache python dev.py -t custom:rrvsone_on=True,grid_scale_factor=1 custom --qaid 12 --db PZ_MTEST --show --va --noqcache python dev.py -t custom:rrvsone_on=True,grid_scale_factor=1,grid_steps=1 custom --qaid 12 --db PZ_MTEST --show --va --noqcache CommandLine: python dev.py -t best --db seals2 --allgt --vz --fig-dname query_analysis_easy --show python dev.py -t best --db seals2 --allgt --vh --fig-dname query_analysis_hard --show python dev.py -t pyrscale --db PZ_MTEST --allgt --vn --fig-dname query_analysis_interesting --show python dev.py -t pyrscale --db testdb3 --allgt --vn --fig-dname query_analysis_interesting --vf python dev.py -t pyrscale --db testdb3 --allgt --vn --fig-dname query_analysis_interesting --vf --quality python -m ibeis.expt.experiment_drawing --test-draw_results --show --vn python -m ibeis.expt.experiment_drawing --test-draw_results --show --vn --db PZ_MTEST python -m ibeis.expt.old_storage --test-draw_results --show --db PZ_MTEST --gv Example: >>> # DISABLE_DOCTEST >>> from ibeis.expt.old_storage import * # NOQA >>> from ibeis.init import main_helpers >>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST') >>> result = draw_results(ibs, testres) >>> # verify results >>> print(result) """ print(' --- DRAW RESULTS ---') # It is very inefficient to turn off caching when view_all is true figdir = ibs.get_fig_dir() ut.ensuredir(figdir) if ut.get_argflag(('--view-fig-directory', '--vf')): ut.view_directory(figdir) figdir_suffix = ut.get_argval('--fig-dname', type_=str, default=None) from os.path import join if figdir_suffix is not None: figdir = join(figdir, figdir_suffix) ut.ensuredir(figdir) #gx2_gt_timedelta # cfgres_info['qx2_gf_timedelta'] = qx2_gf_timedelta metadata_fpath = join(figdir, 'result_metadata.shelf') metadata = ResultMetadata(metadata_fpath) #metadata.rrr() metadata.connect() metadata.sync_test_results(testres) #cfgstr = qreq_.get_cfgstr() #cfg_metadata = ensure_item(metadata, cfgstr, {}) #avuuids = ibs.get_annot_visual_uuids(qaids) #avuuid2_ax = ensure_item(cfg_metadata, 'avuuid2_ax', {}) #cfg_columns = ensure_item(cfg_metadata, 'columns', {}) #import guitool # ut.argv_flag_dec(draw_rank_cmc)(ibs, testres) # VIZ_INDIVIDUAL_RESULTS = True # if VIZ_INDIVIDUAL_RESULTS: # draw_match_cases(ibs, testres, metadata=metadata) metadata.write() if ut.get_argflag(('--guiview', '--gv')): import guitool guitool.ensure_qapp() #wgt = make_test_result_custom_api(ibs, testres) wgt = make_metadata_custom_api(metadata) wgt.show() wgt.raise_() guitool.qtapp_loop(wgt, frequency=100) metadata.close() if ut.NOT_QUIET: print('[DRAW_RESULT] EXIT EXPERIMENT HARNESS')
def autogenerate_nth_schema_version(schema_spec, n=-1): r""" dumps, prints, or diffs autogen schema based on command line Args: n (int): CommandLine: python -m wbia.control._sql_helpers --test-autogenerate_nth_schema_version Example: >>> # DISABLE_DOCTEST >>> from wbia.control._sql_helpers import * # NOQA >>> from wbia.control import DB_SCHEMA >>> # build test data >>> schema_spec = DB_SCHEMA >>> n = 1 >>> # execute function >>> tablename = autogenerate_nth_schema_version(schema_spec, n) >>> # verify results >>> result = str(tablename) >>> print(result) """ import utool as ut logger.info('[_SQL] AUTOGENERATING CURRENT SCHEMA') db = get_nth_test_schema_version(schema_spec, n=n) # Auto-generate the version skip schema file schema_spec_dir, schema_spec_fname = split(schema_spec.__file__) schema_spec_fname = splitext(schema_spec_fname)[0] # HACK TO GET AUTOGEN COMMAND # FIXME: Make this autogen command a bit more sane and not completely # coupled with wbia autogen_cmd = ut.codeblock(""" python -m wbia.control.{schema_spec_fname} --test-autogen_{funcname} --force-incremental-db-update --write python -m wbia.control.{schema_spec_fname} --test-autogen_{funcname} --force-incremental-db-update --diff=1 python -m wbia.control.{schema_spec_fname} --test-autogen_{funcname} --force-incremental-db-update """).format(schema_spec_fname=schema_spec_fname, funcname=schema_spec_fname.lower()) autogen_text = db.get_schema_current_autogeneration_str(autogen_cmd) autogen_fname = '%s_CURRENT.py' % schema_spec_fname autogen_fpath = join(schema_spec_dir, autogen_fname) dowrite = ut.get_argflag(('-w', '--write', '--dump-autogen-schema')) show_diff = ut.get_argflag('--diff') num_context_lines = ut.get_argval('--diff', type_=int, default=None) show_diff = show_diff or num_context_lines is not None dowrite = dowrite and not show_diff if dowrite: ut.write_to(autogen_fpath, autogen_text) else: if show_diff: if ut.checkpath(autogen_fpath, verbose=True): prev_text = ut.read_from(autogen_fpath) textdiff = ut.util_str.get_textdiff( prev_text, autogen_text, num_context_lines=num_context_lines) ut.print_difftext(textdiff) else: ut.util_print.print_python_code(autogen_text) logger.info('\nL___\n...would write to: %s' % autogen_fpath) logger.info( ' Run with -n=%r to get a specific schema version by index. -1 == latest' ) logger.info(' Run with --write to autogenerate latest schema version') logger.info( ' Run with --diff or --diff=<numcontextlines> to see the difference between current and requested' ) return db
def make_name_model(num_annots, num_names=None, verbose=True, mode=1): """ Defines the general name model CommandLine: python -m wbia.algo.hots.bayes --exec-make_name_model --show Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.hots.bayes import * # NOQA >>> defaults = dict(num_annots=2, num_names=2, verbose=True, mode=2) >>> kw = ut.argparse_funckw(make_name_model, defaults) >>> model = make_name_model(**kw) >>> ut.quit_if_noshow() >>> show_model(model, show_prior=True) >>> ut.show_if_requested() """ # annots = ut.chr_range(num_annots, base='a') mode = ut.get_argval('--mode', default=mode) annots = ut.chr_range(num_annots, base=ut.get_argval('--base', default='a')) # The indexes of match CPDs will not change if another annotation is added upper_diag_idxs = ut.colwise_diag_idxs(num_annots, 2) if num_names is None: num_names = num_annots # -- Define CPD Templates def match_pmf(match_type, n1, n2): if n1 == n2: val = 1.0 if match_type == 'same' else 0.0 # val = .999 if match_type == 'same' else 0.001 elif n1 != n2: # val = 0.01 if match_type == 'same' else .99 val = 0.0 if match_type == 'same' else 1.0 return val def score_pmf(score_type, match_type): score_lookup = { 'same': { 'low': 0.1, 'high': 0.9, 'veryhigh': 0.9 }, 'diff': { 'low': 0.9, 'high': 0.09, 'veryhigh': 0.01 } #'same': {'low': .1, 'high': .9}, #'diff': {'low': .9, 'high': .1} } val = score_lookup[match_type][score_type] return val def score_pmf3(score_type, match_type, isdup='False'): score_lookup = { 'False': { 'same': { 'low': 0.1, 'high': 0.5, 'veryhigh': 0.4 }, 'diff': { 'low': 0.9, 'high': 0.09, 'veryhigh': 0.01 }, }, 'True': { 'same': { 'low': 0.01, 'high': 0.2, 'veryhigh': 0.79 }, 'diff': { 'low': 0.4, 'high': 0.4, 'veryhigh': 0.2 }, }, } val = score_lookup[isdup][match_type][score_type] return val def score_pmf2(score_type, n1, n2): score_lookup = { True: { 'low': 0.1, 'high': 0.4, 'veryhigh': 0.5 }, False: { 'low': 0.9, 'high': 0.09, 'veryhigh': 0.01 }, } val = score_lookup[n1 == n2][score_type] return val def dup_pmf(dupstate, match_type): lookup = { 'same': { 'True': 0.5, 'False': 0.5 }, 'diff': { 'True': 0.0, 'False': 1.0 }, } return lookup[match_type][dupstate] def check_pmf(n0, n1, match_type): pass def trimatch_pmf(match_ab, match_bc, match_ca): lookup = { 'same': { 'same': { 'same': 1, 'diff': 0, }, 'diff': { 'same': 0, 'diff': 1, } }, 'diff': { 'same': { 'same': 0, 'diff': 1, }, 'diff': { 'same': 0.5, 'diff': 0.5, }, }, } return lookup[match_ca][match_bc][match_ab] name_cpd_t = pgm_ext.TemplateCPD('name', ('n', num_names), varpref='N', special_basis_pool=SPECIAL_BASIS_POOL) if mode == 1 or mode == 5: match_cpd_t = pgm_ext.TemplateCPD( 'match', ['diff', 'same'], varpref='M', evidence_ttypes=[name_cpd_t, name_cpd_t], pmf_func=match_pmf, ) if mode == 5: trimatch_cpd_t = pgm_ext.TemplateCPD( 'tri_match', ['diff', 'same'], varpref='T', # evidence_ttypes=[match_cpd_t, match_cpd_t, match_cpd_t], evidence_ttypes=[match_cpd_t, match_cpd_t], pmf_func=trimatch_pmf, ) score_cpd_t = pgm_ext.TemplateCPD( #'score', ['low', 'high', 'veryhigh'], 'score', ['low', 'high'], varpref='S', evidence_ttypes=[match_cpd_t], pmf_func=score_pmf, ) else: score_cpd_t = pgm_ext.TemplateCPD( #'score', ['low', 'high', 'veryhigh'], 'score', ['low', 'high'], varpref='S', evidence_ttypes=[match_cpd_t], pmf_func=score_pmf, ) elif mode == 2: name_cpd_t = pgm_ext.TemplateCPD('name', ('n', num_names), varpref='N', special_basis_pool=SPECIAL_BASIS_POOL) score_cpd_t = pgm_ext.TemplateCPD( #'score', ['low', 'high', 'veryhigh'], 'score', ['low', 'high'], varpref='S', evidence_ttypes=[name_cpd_t, name_cpd_t], pmf_func=score_pmf2, ) elif mode == 3 or mode == 4: match_cpd_t = pgm_ext.TemplateCPD( 'match', ['diff', 'same'], varpref='M', evidence_ttypes=[name_cpd_t, name_cpd_t], pmf_func=match_pmf, ) if mode == 3: dup_cpd_t = pgm_ext.TemplateCPD( 'dup', ['False', 'True'], varpref='D', ) else: dup_cpd_t = pgm_ext.TemplateCPD( 'dup', ['False', 'True'], varpref='D', evidence_ttypes=[match_cpd_t], pmf_func=dup_pmf, ) score_cpd_t = pgm_ext.TemplateCPD( 'score', ['low', 'high', 'veryhigh'], varpref='S', evidence_ttypes=[match_cpd_t, dup_cpd_t], pmf_func=score_pmf3, ) # Instanciate templates if mode == 1 or mode == 5: name_cpds = [name_cpd_t.new_cpd(parents=aid) for aid in annots] namepair_cpds = ut.list_unflat_take(name_cpds, upper_diag_idxs) match_cpds = [ match_cpd_t.new_cpd(parents=cpds) for cpds in namepair_cpds ] score_cpds = [ score_cpd_t.new_cpd(parents=cpds) for cpds in zip(match_cpds) ] if mode == 5: # triple_idxs = ut.colwise_diag_idxs(num_annots, 3) tid2_match = {cpd._template_id: cpd for cpd in match_cpds} trimatch_cpds = [] # such hack for cpd in match_cpds: parents = [] this_ = list(cpd._template_id) for aid in annots: if aid in this_: continue for aid2 in this_: key = aid2 + aid if key not in tid2_match: key = aid + aid2 parents += [tid2_match[key]] trimatch_cpds += [trimatch_cpd_t.new_cpd(parents=parents)] # score_cpds = [score_cpd_t.new_cpd(parents=cpds) # for cpds in zip(trimatch_cpds)] cpd_list = name_cpds + score_cpds + match_cpds + trimatch_cpds else: cpd_list = name_cpds + score_cpds + match_cpds elif mode == 2: name_cpds = [name_cpd_t.new_cpd(parents=aid) for aid in annots] namepair_cpds = ut.list_unflat_take(name_cpds, upper_diag_idxs) score_cpds = [ score_cpd_t.new_cpd(parents=cpds) for cpds in namepair_cpds ] cpd_list = name_cpds + score_cpds elif mode == 3 or mode == 4: name_cpds = [name_cpd_t.new_cpd(parents=aid) for aid in annots] namepair_cpds = ut.list_unflat_take(name_cpds, upper_diag_idxs) match_cpds = [ match_cpd_t.new_cpd(parents=cpds) for cpds in namepair_cpds ] if mode == 3: dup_cpds = [ dup_cpd_t.new_cpd(parents=''.join(map(str, aids))) for aids in ut.list_unflat_take(annots, upper_diag_idxs) ] else: dup_cpds = [ dup_cpd_t.new_cpd(parents=[mcpds]) for mcpds in match_cpds ] score_cpds = [ score_cpd_t.new_cpd(parents=([mcpds] + [dcpd])) for mcpds, dcpd in zip(match_cpds, dup_cpds) ] cpd_list = name_cpds + score_cpds + match_cpds + dup_cpds # logger.info('upper_diag_idxs = %r' % (upper_diag_idxs,)) logger.info('score_cpds = %r' % (ut.list_getattr(score_cpds, 'variable'), )) # import sys # sys.exit(1) # Make Model model = pgm_ext.define_model(cpd_list) model.num_names = num_names if verbose: model.print_templates() # ut.colorprint('\n --- CPD Templates ---', 'blue') # for temp_cpd in templates: # ut.colorprint(temp_cpd._cpdstr('psql'), 'cyan') # print_ascii_graph(model) return model
import six import numpy as np import math import utool as ut from collections import OrderedDict from os.path import join (print, rrr, profile) = ut.inject2(__name__) logger = logging.getLogger('wbia') CONTAINERIZED = ut.get_argflag('--containerized') PRODUCTION = ut.get_argflag('--production') HTTPS = ut.get_argflag('--https') CONTAINER_NAME = ut.get_argval('--container-name', type_=str, default=ut.get_computer_name()) ENGINE_SLOT = ut.get_argval('--engine-slot', type_=str, default='default') PI = math.pi TAU = 2.0 * PI VIEWTEXT_TO_YAW_RADIANS = OrderedDict([ ('right', 0.000 * TAU), ('frontright', 0.125 * TAU), ('front', 0.250 * TAU), ('frontleft', 0.375 * TAU), ('left', 0.500 * TAU), ('backleft', 0.625 * TAU), ('back', 0.750 * TAU), ('backright', 0.875 * TAU),
def testdata_expanded_aids( defaultdb=None, a=None, ibs=None, default_qaids=None, default_daids=None, qaid_override=None, daid_override=None, return_annot_info=False, verbose=None, use_cache=None, ): r""" Args: default_qaids (list): (default = [1]) default_daids (str): (default = 'all') defaultdb (str): (default = 'testdb1') ibs (IBEISController): wbia controller object(default = None) verbose (bool): verbosity flag(default = False) return_annot_info (bool): (default = False) Returns: ibs, qaid_list, daid_list, annot_info: CommandLine: python -m wbia.init.main_helpers testdata_expanded_aids python -m wbia.init.main_helpers testdata_expanded_aids --db PZ_MTEST --acfg default:index=0:25 --verbose-testdata python -m wbia.init.main_helpers testdata_expanded_aids --db PZ_MTEST --qaid 3 python -m wbia.init.main_helpers testdata_expanded_aids --db GZ_ALL --acfg ctrl --verbose-testdata Example: >>> # ENABLE_DOCTEST >>> from wbia.init.main_helpers import * # NOQA >>> import wbia >>> from wbia.expt import annotation_configs >>> ibs, qaid_list, daid_list, aidcfg = testdata_expanded_aids(return_annot_info=True) >>> print('Printing annot config') >>> annotation_configs.print_acfg(aidcfg) >>> print('Printing annotconfig stats') >>> ibs.print_annotconfig_stats(qaid_list, daid_list) >>> print('Combined annotconfig stats') >>> ibs.print_annot_stats(qaid_list + daid_list, viewcode_isect=True) >>> print('qaid_list = %r' % (qaid_list,)) """ if verbose is None: verbose = 1 if verbose: logger.info('[main_helpers] testdata_expanded_aids') default_qaids = ut.get_argval( ('--qaid', '--qaid-override'), type_=list, default=default_qaids ) if default_qaids is None: default_qaids = [1] if defaultdb is None: defaultdb = 'testdb1' import wbia if ibs is None: ibs = wbia.opendb(defaultdb=defaultdb) # TODO: rectify command line with function arguments from wbia.expt import experiment_helpers _specified2 = True if a is None: _specified2 = False a = ['default'] if isinstance(a, six.string_types): a = [a] aidcfg_name_list, _specified = ut.get_argval( ('--aidcfg', '--acfg', '-a'), type_=list, default=a, return_specified=True ) if not _specified: # Allow a to be specified an explicit default if len(a) == 2: qaids, daids = a if ut.is_int(qaids[0]) and ut.is_int(daids[0]): if return_annot_info: return ibs, qaids, daids, None else: return ibs, qaids, daids acfg_list, expanded_aids_list = experiment_helpers.get_annotcfg_list( ibs, aidcfg_name_list, qaid_override=qaid_override, use_cache=use_cache, daid_override=daid_override, verbose=max(0, verbose - 1), ) # aidcfg = old_main_helpers.get_commandline_aidcfg() assert len(acfg_list) == 1, ( 'multiple acfgs specified, but this function' 'is built to return only 1. len(acfg_list)=%r' ) % (len(acfg_list),) aidcfg = acfg_list[0] qaid_list, daid_list = expanded_aids_list[0] if not (_specified or _specified2): # hack if default_qaids is not None and qaid_override is None: qaid_list = default_qaids if default_daids is not None and daid_override is None: daid_list = default_daids if ut.VERYVERBOSE: ibs.print_annotconfig_stats(qaid_list, daid_list) # wbia.other.dbinfo.print_qd_info(ibs, qaid_list, daid_list, verbose=True) if return_annot_info: return ibs, qaid_list, daid_list, aidcfg else: return ibs, qaid_list, daid_list
def testdata_aids( defaultdb=None, a=None, adefault='default', ibs=None, return_acfg=False, verbose=None, default_aids=None, default_set='qcfg', ): r""" Grabs default testdata for functions, but is command line overrideable CommandLine: python -m wbia testdata_aids --verbtd --db PZ_ViewPoints python -m wbia testdata_aids --verbtd --db NNP_Master3 -a is_known=True,view_pername='#primary>0&#primary1>=1' python -m wbia testdata_aids --verbtd --db PZ_Master1 -a default:is_known=True,view_pername='#primary>0&#primary1>=1' python -m wbia testdata_aids --verbtd --db PZ_Master1 -a default:species=primary,minqual=ok --verbtd python -m wbia.other.dbinfo --test-latex_dbstats --dblist python -m wbia testdata_aids --show Example: >>> # ENABLE_DOCTEST >>> from wbia.init.main_helpers import * # NOQA >>> from wbia.expt import annotation_configs >>> import wbia >>> #ibs = wbia.opendb(defaultdb='PZ_ViewPoints') >>> ibs = wbia.opendb(defaultdb='testdb1') >>> a = None >>> adefault = 'default:is_known=True' >>> aids, aidcfg = testdata_aids(ibs=ibs, a=a, adefault=adefault, return_acfg=True) >>> print('\n RESULT:') >>> annotation_configs.print_acfg(aidcfg, aids, ibs, per_name_vpedge=None) """ import wbia from wbia.init import filter_annots from wbia.expt import annotation_configs from wbia.expt import cfghelpers if verbose is None or verbose >= 1: logger.info('[main_helpers] testdata_aids') if a is None: a = adefault a, _specified_a = ut.get_argval( ('--aidcfg', '--acfg', '-a'), type_=str, default=a, return_was_specified=True ) return_ibs = False if ibs is None: return_ibs = True if defaultdb is None: defaultdb = 'testdb1' ibs = wbia.opendb(defaultdb=defaultdb) named_defaults_dict = ut.dict_take( annotation_configs.__dict__, annotation_configs.TEST_NAMES ) named_acfg_defaults = dict( zip( annotation_configs.TEST_NAMES, ut.get_list_column(named_defaults_dict, default_set), ) ) # Allow command line override aids, _specified_aids = ut.get_argval( ('--aid', '--aids'), type_=list, default=default_aids, return_was_specified=True ) aidcfg = None have_aids = aids is not None need_expand = (not have_aids) or (_specified_a and not _specified_aids) # (not aid) or (sa and (not said)) if need_expand: # base_cfg = annotation_configs.single_default aidcfg_combo_list = cfghelpers.parse_cfgstr_list2( [a], named_acfg_defaults, 'acfg', annotation_configs.ALIAS_KEYS, expand_nested=False, is_nestedcfgtype=False, ) aidcfg_combo = aidcfg_combo_list[0] if len(aidcfg_combo_list) != 1: raise AssertionError('Error: combinations not handled for single cfg setting') if len(aidcfg_combo) != 1: raise AssertionError('Error: combinations not handled for single cfg setting') aidcfg = aidcfg_combo[0] aids = filter_annots.expand_single_acfg(ibs, aidcfg, verbose=verbose) if return_ibs: return ibs, aids if return_acfg: return aids, aidcfg else: return aids
def learn_k(): r""" CommandLine: python -m ibeis.other.optimize_k --test-learn_k python -m ibeis.other.optimize_k --test-learn_k --show python -m ibeis.other.optimize_k --test-learn_k --show --dummy Example: >>> # DISABLE_DOCTEST >>> from ibeis.other.optimize_k import * # NOQA >>> import plottool as pt >>> # build test data >>> # execute function >>> known_nd_data, known_target_points, given_data_dims, opt_model_params = learn_k() >>> # verify results >>> ut.quit_if_noshow() >>> plot_search_surface(known_nd_data, known_target_points, given_data_dims, opt_model_params) >>> pt.all_figures_bring_to_front() >>> pt.show_if_requested() """ # Compute Training Data varydict = { #'K': [4, 7, 10, 13, 16, 19, 22, 25][:4], #'K': [1, 2, 3, 4, 8, 10, 13, 15], 'K': [1, 2, 4, 8, 16], #'nDaids': [20, 100, 250, 500, 750, 1000], } nDaids_basis = [ 20, 30, 50, 75, 100, 200, 250, 300, 325, 350, 400, 500, 600, 750, 800, 900, 1000, 1500 ] DUMMY = ut.get_argflag('--dummy') if DUMMY: nDaids_list, K_list, nError_list = test_training_data( varydict, nDaids_basis) nError_list = nError_list.astype(np.float32) / nError_list.max() else: dbname = ut.get_argval('--db', default='PZ_Master0') ibs = ibeis.opendb(dbname) verbose = False qaids, daids_list = collect_ibeis_training_annotations(ibs, nDaids_basis, verbose=verbose) nDaids_list, K_list, nError_list = evaluate_training_data( ibs, qaids, daids_list, varydict, nDaids_basis, verbose=verbose) nError_list = nError_list.astype(np.float32) / len(qaids) print('\nFinished Get Training Data') print('len(qaids) = %r' % (len(qaids))) print(ut.get_stats_str(nError_list)) #unique_nDaids = np.unique(nDaids_list) # Alias to general optimization problem known_nd_data = np.vstack([nDaids_list, K_list]).T known_target_points = nError_list # Mark the data we are given vs what we want to learn given_data_dims = [0] #learn_data_dims = [1] # Minimize K params opt_model_params = minimize_compute_K_params(known_nd_data, known_target_points, given_data_dims) return known_nd_data, known_target_points, given_data_dims, opt_model_params
def in_depth_ellipse(kp): """ Makes sure that I understand how the ellipse is created form a keypoint representation. Walks through the steps I took in coming to an understanding. CommandLine: python -m pyhesaff.tests.test_ellipse --test-in_depth_ellipse --show --num-samples=12 Example: >>> # SCRIPT >>> from pyhesaff.tests.test_ellipse import * # NOQA >>> import pyhesaff.tests.pyhestest as pyhestest >>> test_data = pyhestest.load_test_data(short=True) >>> kpts = test_data['kpts'] >>> kp = kpts[0] >>> #kp = np.array([0, 0, 10, 10, 10, 0]) >>> print('Testing kp=%r' % (kp,)) >>> test_locals = in_depth_ellipse(kp) >>> ut.quit_if_noshow() >>> ut.show_if_requested() """ #nSamples = 12 nSamples = ut.get_argval('--num-samples', type_=int, default=12) kp = np.array(kp, dtype=np.float64) print('kp = %r' % kp) #----------------------- # SETUP #----------------------- np.set_printoptions(precision=3) df2.reset() df2.figure(9003, docla=True, doclf=True) ax = df2.gca() ax.invert_yaxis() def _plotpts(data, px, color=df2.BLUE, label='', marker='.', **kwargs): #df2.figure(9003, docla=True, pnum=(1, 1, px)) df2.plot2(data.T[0], data.T[1], marker, '', color=color, label=label, **kwargs) #df2.update() def _plotarrow(x, y, dx, dy, color=df2.BLUE, label=''): ax = df2.gca() arrowargs = dict(head_width=.5, length_includes_head=True, label=label) arrow = mpl.patches.FancyArrow(x, y, dx, dy, **arrowargs) arrow.set_edgecolor(color) arrow.set_facecolor(color) ax.add_patch(arrow) #df2.update() #----------------------- # INPUT #----------------------- # We will call perdoch's invA = invV print('--------------------------------') print('Let V = Perdoch.A') print('Let Z = Perdoch.E') print('--------------------------------') print('Input from Perdoch\'s detector: ') # We are given the keypoint in invA format if len(kp) == 5: (ix, iy, iv11, iv21, iv22), iv12 = kp, 0 elif len(kp) == 6: (ix, iy, iv11, iv21, iv22, ori), iv12 = kp, 0 invV = np.array([[iv11, iv12, ix], [iv21, iv22, iy], [0, 0, 1]]) V = np.linalg.inv(invV) Z = (V.T).dot(V) print('invV is a transform from points on a unit-circle to the ellipse') ut.horiz_print('invV = ', invV) print('--------------------------------') print('V is a transformation from points on the ellipse to a unit circle') ut.horiz_print('V = ', V) print('--------------------------------') print('An ellipse is a special case of a conic. For any ellipse:') print( 'Points on the ellipse satisfy (x_ - x_0).T.dot(Z).dot(x_ - x_0) = 1') print('where Z = (V.T).dot(V)') ut.horiz_print('Z = ', Z) # Define points on a unit circle theta_list = np.linspace(0, TAU, nSamples) cicrle_pts = np.array([(np.cos(t_), np.sin(t_), 1) for t_ in theta_list]) # Transform those points to the ellipse using invV ellipse_pts1 = invV.dot(cicrle_pts.T).T #Lets check our assertion: (x_ - x_0).T.dot(Z).dot(x_ - x_0) == 1 x_0 = np.array([ix, iy, 1]) checks = [(x_ - x_0).T.dot(Z).dot(x_ - x_0) for x_ in ellipse_pts1] try: # HELP: The phase is off here. in 3x3 version I'm not sure why #assert all([almost_eq(1, check) for check in checks1]) is_almost_eq_pos1 = [ut.almost_eq(1, check) for check in checks] is_almost_eq_neg1 = [ut.almost_eq(-1, check) for check in checks] assert all(is_almost_eq_pos1) except AssertionError as ex: print('circle pts = %r ' % cicrle_pts) print(ex) print(checks) print([ut.almost_eq(-1, check, 1E-9) for check in checks]) raise else: #assert all([abs(1 - check) < 1E-11 for check in checks2]) print('... all of our plotted points satisfy this') #======================= # THE CONIC SECTION #======================= # All of this was from the Perdoch paper, now lets move into conic sections # We will use the notation from wikipedia # References: # http://en.wikipedia.org/wiki/Conic_section # http://en.wikipedia.org/wiki/Matrix_representation_of_conic_sections #----------------------- # MATRIX REPRESENTATION #----------------------- # The matrix representation of a conic is: #(A, B2, B2_, C) = Z.flatten() #(D, E, F) = (0, 0, 1) (A, B2, D2, B2_, C, E2, D2_, E2_, F) = Z.flatten() B = B2 * 2 D = D2 * 2 E = E2 * 2 assert B2 == B2_, 'matrix should by symmetric' assert D2 == D2_, 'matrix should by symmetric' assert E2 == E2_, 'matrix should by symmetric' print('--------------------------------') print('Now, using wikipedia\' matrix representation of a conic.') con = np.array(((' A', 'B / 2', 'D / 2'), ('B / 2', ' C', 'E / 2'), ('D / 2', 'E / 2', ' F'))) ut.horiz_print('A matrix A_Q = ', con) # A_Q is our conic section (aka ellipse matrix) A_Q = np.array(((A, B / 2, D / 2), (B / 2, C, E / 2), (D / 2, E / 2, F))) ut.horiz_print('A_Q = ', A_Q) #----------------------- # DEGENERATE CONICS # References: # http://individual.utoronto.ca/somody/quiz.html print('----------------------------------') print('As long as det(A_Q) != it is not degenerate.') print('If the conic is not degenerate, we can use the 2x2 minor: A_33') print('det(A_Q) = %s' % str(np.linalg.det(A_Q))) assert np.linalg.det(A_Q) != 0, 'degenerate conic' A_33 = np.array(((A, B / 2), (B / 2, C))) ut.horiz_print('A_33 = ', A_33) #----------------------- # CONIC CLASSIFICATION #----------------------- print('----------------------------------') print('The determinant of the minor classifies the type of conic it is') print('(det == 0): parabola, (det < 0): hyperbola, (det > 0): ellipse') print('det(A_33) = %s' % str(np.linalg.det(A_33))) assert np.linalg.det(A_33) > 0, 'conic is not an ellipse' print('... this is indeed an ellipse') #----------------------- # CONIC CENTER #----------------------- print('----------------------------------') print('the centers of the ellipse are obtained by: ') print('x_center = (B * E - (2 * C * D)) / (4 * A * C - B ** 2)') print('y_center = (D * B - (2 * A * E)) / (4 * A * C - B ** 2)') # Centers are obtained by solving for where the gradient of the quadratic # becomes 0. Without going through the derivation the calculation is... # These should be 0, 0 if we are at the origin, or our original x, y # coordinate specified by the keypoints. I'm doing the calculation just for # shits and giggles x_center = (B * E - (2 * C * D)) / (4 * A * C - B**2) y_center = (D * B - (2 * A * E)) / (4 * A * C - B**2) ut.horiz_print('x_center = ', x_center) ut.horiz_print('y_center = ', y_center) #----------------------- # MAJOR AND MINOR AXES #----------------------- # Now we are going to determine the major and minor axis # of this beast. It just the center augmented by the eigenvecs print('----------------------------------') # Plot ellipse axis # !HELP! I DO NOT KNOW WHY I HAVE TO DIVIDE, SQUARE ROOT, AND NEGATE!!! (evals, evecs) = np.linalg.eig(A_33) l1, l2 = evals # The major and minor axis lengths b = 1 / np.sqrt(l1) a = 1 / np.sqrt(l2) v1, v2 = evecs # Find the transformation to align the axis nminor = v1 nmajor = v2 dx1, dy1 = (v1 * b) dx2, dy2 = (v2 * a) minor = np.array([dx1, -dy1]) major = np.array([dx2, -dy2]) x_axis = np.array([[1], [0]]) cosang = (x_axis.T.dot(nmajor)).T # Rotation angle theta = np.arccos(cosang) print('a = ' + str(a)) print('b = ' + str(b)) print('theta = ' + str(theta[0] / TAU) + ' * 2pi') # The warped eigenvects should have the same magintude # As the axis lengths assert ut.almost_eq(a, major.dot(ltool.rotation_mat2x2(theta))[0]) assert ut.almost_eq(b, minor.dot(ltool.rotation_mat2x2(theta))[1]) try: # HACK if len(theta) == 1: theta = theta[0] except Exception: pass #----------------------- # ECCENTRICITY #----------------------- print('----------------------------------') print('The eccentricity is determined by:') print('') print(' (2 * np.sqrt((A - C) ** 2 + B ** 2)) ') print('ecc = -----------------------------------------------') print(' (nu * (A + C) + np.sqrt((A - C) ** 2 + B ** 2))') print('') print('(nu is always 1 for ellipses)') nu = 1 ecc_numer = (2 * np.sqrt((A - C)**2 + B**2)) ecc_denom = (nu * (A + C) + np.sqrt((A - C)**2 + B**2)) ecc = np.sqrt(ecc_numer / ecc_denom) print('ecc = ' + str(ecc)) # Eccentricity is a little easier in axis aligned coordinates # Make sure they aggree ecc2 = np.sqrt(1 - (b**2) / (a**2)) assert ut.almost_eq(ecc, ecc2) #----------------------- # APPROXIMATE UNIFORM SAMPLING #----------------------- # We are given the keypoint in invA format print('----------------------------------') print('Approximate uniform points an inscribed polygon bondary') #def next_xy(x, y, d): # # References: # # http://gamedev.stackexchange.com/questions/1692/what-is-a-simple-algorithm-for-calculating-evenly-distributed-points-on-an-ellip # num = (b ** 2) * (x ** 2) # den = ((a ** 2) * ((a ** 2) - (x ** 2))) # dxdenom = np.sqrt(1 + (num / den)) # deltax = d / dxdenom # x_ = x + deltax # y_ = b * np.sqrt(1 - (x_ ** 2) / (a ** 2)) # return x_, y_ def xy_fn(t): return np.array((a * np.cos(t), b * np.sin(t))).T #nSamples = 16 #(ix, iy, iv11, iv21, iv22), iv12 = kp, 0 #invV = np.array([[iv11, iv12, ix], # [iv21, iv22, iy], # [ 0, 0, 1]]) #theta_list = np.linspace(0, TAU, nSamples) #cicrle_pts = np.array([(np.cos(t_), np.sin(t_), 1) for t_ in theta_list]) uneven_points = invV.dot(cicrle_pts.T).T[:, 0:2] #uneven_points2 = xy_fn(theta_list) def circular_distance(arr): dist_most_ = ((arr[0:-1] - arr[1:])**2).sum(1) dist_end_ = ((arr[-1] - arr[0])**2).sum() return np.sqrt(np.hstack((dist_most_, dist_end_))) # Calculate the distance from each point on the ellipse to the next dists = circular_distance(uneven_points) total_dist = dists.sum() # Get an even step size multiplier = 1 step_size = total_dist / (nSamples * multiplier) # Walk along edge num_steps_list = [] offset_list = [] dist_walked = 0 total_dist = step_size for count in range(len(dists)): segment_len = dists[count] # Find where your starting location is offset_list.append(total_dist - dist_walked) # How far can you possibly go? total_dist += segment_len # How many steps can you take? num_steps = int((total_dist - dist_walked) // step_size) num_steps_list.append(num_steps) # Log how much further youve gotten dist_walked += (num_steps * step_size) #print('step_size = %r' % step_size) #print(np.vstack((num_steps_list, dists, offset_list)).T) # store the percent location at each line segment where # the cut will be made cut_list = [] for num, dist, offset in zip(num_steps_list, dists, offset_list): if num == 0: cut_list.append([]) continue offset1 = (step_size - offset) / dist offset2 = ((num * step_size) - offset) / dist cut_locs = (np.linspace(offset1, offset2, num, endpoint=True)) cut_list.append(cut_locs) #print(cut_locs) # Cut the segments into new better segments approx_pts = [] nPts = len(uneven_points) for count, cut_locs in enumerate(cut_list): for loc in cut_locs: pt1 = uneven_points[count] pt2 = uneven_points[(count + 1) % nPts] # Linearly interpolate between points new_loc = ((1 - loc) * pt1) + ((loc) * pt2) approx_pts.append(new_loc) approx_pts = np.array(approx_pts) # Warp approx_pts to the unit circle print('----------------------------------') print('For each aproximate point, find the closet point on the ellipse') #new_unit = V.dot(approx_pts.T).T ones_ = np.ones(len(approx_pts)) new_hlocs = np.vstack((approx_pts.T, ones_)) new_unit = V.dot(new_hlocs).T # normalize new_unit new_mag = np.sqrt((new_unit**2).sum(1)) new_unorm_unit = new_unit / np.vstack([new_mag] * 3).T new_norm_unit = new_unorm_unit / np.vstack([new_unorm_unit[:, 2]] * 3).T # Get angle (might not be necessary) x_axis = np.array([1, 0, 0]) arccos_list = x_axis.dot(new_norm_unit.T) uniform_theta_list = np.arccos(arccos_list) # Maybe this? uniform_theta_list = np.arctan2(new_norm_unit[:, 1], new_norm_unit[:, 0]) # unevn_cicrle_pts = np.array([(np.cos(t_), np.sin(t_), 1) for t_ in uniform_theta_list]) # This is the output. Approximately uniform points sampled along an ellipse uniform_ell_pts = invV.dot(unevn_cicrle_pts.T).T #uniform_ell_pts = invV.dot(new_norm_unit.T).T _plotpts(approx_pts, 0, df2.YELLOW, label='approx points', marker='o-') _plotpts(uniform_ell_pts, 0, df2.RED, label='uniform points', marker='o-') # Desired number of points #ecc = np.sqrt(1 - (b ** 2) / (a ** 2)) # Total arclength #total_arclen = ellipeinc(TAU, ecc) #firstquad_arclen = total_arclen / 4 # Desired arclength between points #d = firstquad_arclen / nSamples # Initial point #x, y = xy_fn(.001) #uniform_points = [] #for count in range(nSamples): # if np.isnan(x_) or np.isnan(y_): # print('nan on count=%r' % count) # break # uniform_points.append((x_, y_)) # The angle between the major axis and our x axis is: #----------------------- # DRAWING #----------------------- print('----------------------------------') # Draw the keypoint using the tried and true df2 # Other things should subsiquently align #df2.draw_kpts2(np.array([kp]), ell_linewidth=4, # ell_color=df2.DEEP_PINK, ell_alpha=1, arrow=True, rect=True) # Plot ellipse points _plotpts(ellipse_pts1, 0, df2.LIGHT_BLUE, label='invV.dot(cicrle_pts.T).T', marker='o-') _plotarrow(x_center, y_center, dx1, -dy1, color=df2.GRAY, label='minor axis') _plotarrow(x_center, y_center, dx2, -dy2, color=df2.GRAY, label='major axis') # Rotate the ellipse so it is axis aligned and plot that rot = ltool.rotation_around_mat3x3(theta, ix, iy) ellipse_pts3 = rot.dot(ellipse_pts1.T).T #!_plotpts(ellipse_pts3, 0, df2.GREEN, label='axis aligned points') # Plot ellipse orientation ortho_basis = np.eye(3)[:, 0:2] orient_axis = invV.dot(ortho_basis) print(orient_axis) _dx1, _dx2, _dy1, _dy2, _1, _2 = orient_axis.flatten() #!_plotarrow(x_center, y_center, _dx1, _dy1, color=df2.BLUE, label='ellipse rotation') #!_plotarrow(x_center, y_center, _dx2, _dy2, color=df2.BLUE) #df2.plt.gca().set_xlim(400, 600) #df2.plt.gca().set_ylim(300, 500) xmin, ymin = ellipse_pts1.min(0)[0:2] - 1 xmax, ymax = ellipse_pts1.max(0)[0:2] + 1 df2.plt.gca().set_xlim(xmin, xmax) df2.plt.gca().set_ylim(ymin, ymax) df2.legend() df2.dark_background(doubleit=3) df2.gca().invert_yaxis() # Hack in another view # It seems like the even points are not actually that even. # there must be a bug df2.figure(fnum=9003 + 1, docla=True, doclf=True, pnum=(1, 3, 1)) _plotpts(ellipse_pts1, 0, df2.LIGHT_BLUE, label='invV.dot(cicrle_pts.T).T', marker='o-', title='even') df2.plt.gca().set_xlim(xmin, xmax) df2.plt.gca().set_ylim(ymin, ymax) df2.dark_background(doubleit=3) df2.gca().invert_yaxis() df2.figure(fnum=9003 + 1, pnum=(1, 3, 2)) _plotpts(approx_pts, 0, df2.YELLOW, label='approx points', marker='o-', title='approx') df2.plt.gca().set_xlim(xmin, xmax) df2.plt.gca().set_ylim(ymin, ymax) df2.dark_background(doubleit=3) df2.gca().invert_yaxis() df2.figure(fnum=9003 + 1, pnum=(1, 3, 3)) _plotpts(uniform_ell_pts, 0, df2.RED, label='uniform points', marker='o-', title='uniform') df2.plt.gca().set_xlim(xmin, xmax) df2.plt.gca().set_ylim(ymin, ymax) df2.dark_background(doubleit=3) df2.gca().invert_yaxis() return locals()
def postload_commands(ibs, back): """ Postload commands deal with a specific ibeis database ibeis --db PZ_MTEST --occur "*All Images" --query 1 ibeis --db PZ_MTEST --occur "*All Images" --query-intra """ if ut.NOT_QUIET: print('\n[main_cmd] postload_commands') if params.args.view_database_directory: print('got arg --vdd') vdd(ibs) if params.args.set_default_dbdir: sysres.set_default_dbdir(ibs.get_dbdir()) if params.args.update_query_cfg is not None: # Set query parameters from command line using the --cfg flag cfgdict = ut.parse_cfgstr_list(params.args.update_query_cfg) print('Custom cfgdict specified') print(ut.repr2(cfgdict)) ibs.update_query_cfg(**cfgdict) if params.args.edit_notes: ut.editfile(ibs.get_dbnotes_fpath(ensure=True)) if params.args.delete_cache: ibs.delete_cache() if params.args.delete_cache_complete: ibs.delete_cache(delete_imagesets=True) if params.args.delete_query_cache: ibs.delete_qres_cache() if params.args.set_all_species is not None: ibs._overwrite_all_annot_species_to(params.args.set_all_species) if params.args.dump_schema: ibs.db.print_schema() if ut.get_argflag('--ipynb'): back.launch_ipy_notebook() select_imgsetid = ut.get_argval( ('--select-imgsetid', '--imgsetid', '--occur', '--gsid'), None) if select_imgsetid is not None: print('\n+ --- CMD SELECT IMGSETID=%r ---' % (select_imgsetid, )) # Whoa: this doesnt work. weird. #back.select_imgsetid(select_imgsetid) # This might be the root of gui problems #back.front._change_imageset(select_imgsetid) back.front.select_imageset_tab(select_imgsetid) print('L ___ CMD SELECT IMGSETID=%r ___\n' % (select_imgsetid, )) # Send commands to GUIBack if params.args.select_aid is not None: if back is not None: try: ibsfuncs.assert_valid_aids(ibs, (params.args.select_aid, )) except AssertionError: print('Valid RIDs are: %r' % (ibs.get_valid_aids(), )) raise back.select_aid(params.args.select_aid) if params.args.select_gid is not None: back.select_gid(params.args.select_gid) if params.args.select_nid is not None: back.select_nid(params.args.select_nid) select_name = ut.get_argval('--select-name') if select_name is not None: import ibeis.gui.guiheaders as gh back.ibswgt.select_table_indicies_from_text(gh.NAMES_TREE, select_name, allow_table_change=True) if ut.get_argflag( ('--intra-occur-query', '--query-intra-occur', '--query-intra')): back.special_query_funcs['intra_occurrence'](cfgdict={ 'use_k_padding': False }) qaid_list = ut.get_argval(('--query-aid', '--query'), type_=list, default=None) if qaid_list is not None: #qaid_list = params.args.query_aid # fix stride case if len(qaid_list) == 1 and isinstance(qaid_list[0], tuple): qaid_list = list(qaid_list[0]) daids_mode = ut.get_argval('--daids-mode', type_=str, default=const.VS_EXEMPLARS_KEY) back.compute_queries(qaid_list=qaid_list, daids_mode=daids_mode, ranks_top=10) if ut.get_argflag('--inc-query'): back.incremental_query() if ut.get_argflag(('--dbinfo', '--display_dbinfo')): back.display_dbinfo() pass aidcmd = ut.get_argval('--aidcmd', default=None) aid = ut.get_argval('--aid', type_=int, default=1) if aidcmd: #aidcmd = 'Interact image' metadata = ibs.get_annot_lazy_dict(aid) annot_context_options = metadata['annot_context_options'] aidcmd_dict = dict(annot_context_options) print('aidcmd_dict = %s' % (ut.repr3(aidcmd_dict), )) command = aidcmd_dict[aidcmd] command() #import utool #utool.embed() #back.start_web_server_parallel() if ut.get_argflag('--start-web'): back.start_web_server_parallel() if ut.get_argflag('--name-tab'): from ibeis.gui.guiheaders import NAMES_TREE back.front.set_table_tab(NAMES_TREE) view = back.front.views[NAMES_TREE] model = view.model() view._set_sort(model.col_name_list.index('nAids'), col_sort_reverse=True) if ut.get_argflag('--graph'): back.make_qt_graph_interface() screengrab_fpath = ut.get_argval('--screengrab') if screengrab_fpath: from guitool_ibeis.__PYQT__.QtGui import QPixmap from PyQt4.QtTest import QTest from PyQt4.QtCore import Qt fpath = ut.truepath(screengrab_fpath) import guitool_ibeis #ut.embed() timer2 = guitool_ibeis.__PYQT__.QtCore.QTimer() done = [1000] def delayed_screenshot_func(): if done[0] == 500: #back.mainwin.menubar.triggered.emit(back.mainwin.menuFile) print('Mouseclick') QTest.mouseClick(back.mainwin.menuFile, Qt.LeftButton) # This works #QTest.mouseClick(back.front.import_button, Qt.LeftButton) if done[0] == 1: timer2.stop() print('screengrab to %r' % (fpath, )) screenimg = QPixmap.grabWindow(back.mainwin.winId()) screenimg.save(fpath, 'jpg') ut.startfile(fpath) print('lub dub2') done[0] -= 1 return None CLICK_FILE_MENU = True if CLICK_FILE_MENU: #ut.embed() #QTest::keyClick(menu, Qt::Key_Down) pass timer2.delayed_screenshot_func = delayed_screenshot_func timer2.timeout.connect(timer2.delayed_screenshot_func) timer2.start(1) back.mainwin.timer2 = timer2 guitool_ibeis.activate_qwindow(back.mainwin) #QPixmap.grabWindow(back.mainwin.winId()).save(fpath, 'jpg') #ut.startfile(fpath) #ut.embed() pass if params.args.postload_exit: print('[main_cmd] postload exit') sys.exit(0)
def start_tornado(ibs, port=None, browser=None, url_suffix=None, start_web_loop=True, fallback=True): """Initialize the web server""" if browser is None: browser = ut.get_argflag('--browser') if url_suffix is None: url_suffix = ut.get_argval('--url', default='') # from wbia import constants as const # ibs.https = const.HTTPS def _start_tornado(ibs_, port_): # Get Flask app app = controller_inject.get_flask_app() app.ibs = ibs_ # Try to ascertain the socket's domain name socket.setdefaulttimeout(0.1) try: app.server_domain = socket.gethostbyname(socket.gethostname()) except socket.gaierror: app.server_domain = '127.0.0.1' socket.setdefaulttimeout(None) app.server_port = port_ # URL for the web instance app.server_url = 'http://%s:%s' % (app.server_domain, app.server_port) logger.info('[web] Tornado server starting at %s' % (app.server_url, )) # Launch the web browser to view the web interface and API if browser: url = app.server_url + url_suffix import webbrowser logger.info('[web] opening browser with url = %r' % (url, )) webbrowser.open(url) if PROMETHEUS: # Add prometheus wsgi middleware to route /metrics requests logger.info('LOADING PROMETHEUS') app_ = DispatcherMiddleware( app, {'/metrics': prometheus_client.make_wsgi_app()}) # Migrate the most essential settings app_.server_port = app.server_port app_.server_url = app.server_url app_.ibs = app.ibs app = app_ else: logger.info('SKIPPING PROMETHEUS') # Start the tornado web handler # WSGI = Web Server Gateway Interface # WSGI is Python standard described in detail in PEP 3333 wsgi_container = TimedWSGIContainer(app) # # Try wrapping with newrelic performance monitoring # try: # import newrelic # wsgi_container = newrelic.agent.WSGIApplicationWrapper(wsgi_container) # except (ImportError, AttributeError): # pass http_server = tornado.httpserver.HTTPServer(wsgi_container) try: http_server.listen(app.server_port) except socket.error: fallback_port = ut.find_open_port(app.server_port) if fallback: logger.info( 'Port %s is unavailable, using fallback_port = %r' % ( port, fallback_port, )) start_tornado( ibs, port=fallback_port, browser=browser, url_suffix=url_suffix, start_web_loop=start_web_loop, fallback=False, ) else: raise RuntimeError( (('The specified IBEIS web port %d is not available, ' 'but %d is') % (app.server_port, fallback_port))) # Add more verbose logging try: utool_logfile_handler = ut.util_logging.__UTOOL_ROOT_LOGGER__ except Exception: utool_logfile_handler = None if utool_logfile_handler is not None: logger_list = [] try: logger_list += [ app.logger, ] except AttributeError: pass try: logger_list += [ app.app.logger, ] except AttributeError: pass logger_list += [ logging.getLogger('concurrent'), logging.getLogger('concurrent.futures'), logging.getLogger('flask_cors.core'), logging.getLogger('flask_cors'), logging.getLogger('flask_cors.decorator'), logging.getLogger('flask_cors.extension'), logging.getLogger('urllib3'), logging.getLogger('requests'), logging.getLogger('tornado'), logging.getLogger('tornado.access'), logging.getLogger('tornado.application'), logging.getLogger('tornado.general'), logging.getLogger('websocket'), ] for logger_ in logger_list: logger_.setLevel(logging.INFO) logger_.addHandler(utool_logfile_handler) if start_web_loop: tornado.ioloop.IOLoop.instance().start() # Get the port if unspecified if port is None: port = appf.DEFAULT_WEB_API_PORT # Launch the web handler _start_tornado(ibs, port)
def make_name_model(num_annots, num_names=None, verbose=True, mode=1, num_scores=2, p_score_given_same=None, hack_score_only=False, score_basis=None, special_names=None): r""" CommandLine: python -m ibeis.algo.hots.bayes --exec-make_name_model --no-cnn python -m ibeis.algo.hots.bayes --exec-make_name_model --show --no-cnn python -m ibeis.algo.hots.bayes --exec-make_name_model --num-annots=3 Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.bayes import * # NOQA >>> defaults = dict(num_annots=2, num_names=2, verbose=True) >>> modeltype = ut.get_argval('--modeltype', default='bayes') >>> kw = ut.argparse_funckw(make_name_model, defaults) >>> model = make_name_model(**kw) >>> ut.quit_if_noshow() >>> model.show_model(show_prior=False, show_title=False, modeltype=modeltype) >>> ut.show_if_requested() """ if special_names is None: special_names = SPECIAL_BASIS_POOL assert mode == 1, 'only can do mode 1' base = ut.get_argval('--base', type_=str, default='a') annots = ut.chr_range(num_annots, base=base) # The indexes of match CPDs will not change if another annotation is added upper_diag_idxs = ut.colwise_diag_idxs(num_annots, 2) if hack_score_only: upper_diag_idxs = upper_diag_idxs[-hack_score_only:] if num_names is None: num_names = num_annots # +--- Define CPD Templates and Instantiation --- cpd_list = [] # Name Factor name_cpd_t = pgm_ext.TemplateCPD( NAME_TTYPE, ('n', num_names), special_basis_pool=special_names) name_cpds = [name_cpd_t.new_cpd(parents=aid) for aid in annots] #name_cpds = [name_cpd_t.new_cpd(parents=aid, constrain_state=count) # for count, aid in enumerate(annots, start=1)] cpd_list.extend(name_cpds) # Match Factor def match_pmf(match_type, n1, n2): return { True: {'same': 1.0, 'diff': 0.0}, False: {'same': 0.0, 'diff': 1.0}, }[n1 == n2][match_type] match_states = ['diff', 'same'] match_cpd_t = pgm_ext.TemplateCPD( MATCH_TTYPE, match_states, evidence_ttypes=[name_cpd_t, name_cpd_t], pmf_func=match_pmf) #match_cpd_t.varpref = 'S' namepair_cpds = ut.list_unflat_take(name_cpds, upper_diag_idxs) match_cpds = [match_cpd_t.new_cpd(parents=cpds) for cpds in namepair_cpds] cpd_list.extend(match_cpds) # Score Factor score_states = list(range(num_scores)) if score_basis is not None: score_states = ['%.2f' % (s,) for s in score_basis] if p_score_given_same is None: tmp = np.arange(num_scores + 1)[1:] tmp = np.cumsum(tmp) tmp = (tmp / tmp.sum()) p_score_given_same = tmp def score_pmf(score_type, match_type): if isinstance(score_type, six.string_types): score_type = score_states.index(score_type) if match_type == 'same': return p_score_given_same[score_type] else: return p_score_given_same[-(score_type + 1)] score_cpd_t = pgm_ext.TemplateCPD( SCORE_TTYPE, score_states, evidence_ttypes=[match_cpd_t], pmf_func=score_pmf) #match_cpd_t.varpref = 'P' score_cpds = [score_cpd_t.new_cpd(parents=cpds) for cpds in zip(match_cpds)] cpd_list.extend(score_cpds) with_humans = False if with_humans: human_states = ['diff', 'same'] human_cpd_t = pgm_ext.TemplateCPD( 'human', human_states, evidence_ttypes=[match_cpd_t], pmf_func=[[.9, .1], [.1, .9]]) human_cpds = [human_cpd_t.new_cpd(parents=cpds) for cpds in zip(match_cpds)] cpd_list.extend(human_cpds) with_rank = False # Rank depends on dependant scores if with_rank: rank_states = ['0', '1', '2', '3'] rank_cpd_t = pgm_ext.TemplateCPD( 'rank', rank_states, evidence_ttypes=[match_cpd_t], pmf_func=None) rank_cpds = [rank_cpd_t.new_cpd(parents=cpds) for cpds in zip(match_cpds)] cpd_list.extend(rank_cpds) # L___ End CPD Definitions ___ print('score_cpds = %r' % (ut.list_getattr(score_cpds, 'variable'),)) # Make Model model = pgm_ext.define_model(cpd_list) model.num_names = num_names if verbose: model.print_templates(ignore_ttypes=[MATCH_TTYPE]) return model
def name_model_mode1(num_annots, num_names=None, verbose=True): r""" spaghettii CommandLine: python -m wbia.algo.hots.bayes --exec-name_model_mode1 --show python -m wbia.algo.hots.bayes --exec-name_model_mode1 python -m wbia.algo.hots.bayes --exec-name_model_mode1 --num-annots=3 Example: >>> # DISABLE_DOCTEST >>> from wbia.algo.hots.bayes import * # NOQA >>> defaults = dict(num_annots=2, num_names=2, verbose=True) >>> kw = ut.argparse_funckw(name_model_mode1, defaults) >>> model = name_model_mode1(**kw) >>> ut.quit_if_noshow() >>> show_model(model, show_prior=False, show_title=False) >>> ut.show_if_requested() Ignore: import nx2tikz logger.info(nx2tikz.dumps_tikz(model, layout='layered', use_label=True)) """ annots = ut.chr_range(num_annots, base=ut.get_argval('--base', default='a')) # The indexes of match CPDs will not change if another annotation is added upper_diag_idxs = ut.colwise_diag_idxs(num_annots, 2) if num_names is None: num_names = num_annots # +--- Define CPD Templates --- # +-- Name Factor --- name_cpd_t = pgm_ext.TemplateCPD('name', ('n', num_names), varpref='N', special_basis_pool=SPECIAL_BASIS_POOL) name_cpds = [name_cpd_t.new_cpd(parents=aid) for aid in annots] # +-- Match Factor --- def match_pmf(match_type, n1, n2): return { True: { 'same': 1.0, 'diff': 0.0 }, False: { 'same': 0.0, 'diff': 1.0 }, }[n1 == n2][match_type] match_cpd_t = pgm_ext.TemplateCPD( 'match', ['diff', 'same'], varpref='M', evidence_ttypes=[name_cpd_t, name_cpd_t], pmf_func=match_pmf, ) namepair_cpds = ut.list_unflat_take(name_cpds, upper_diag_idxs) match_cpds = [match_cpd_t.new_cpd(parents=cpds) for cpds in namepair_cpds] # +-- Score Factor --- def score_pmf(score_type, match_type): score_lookup = { 'same': { 'low': 0.1, 'high': 0.9, 'veryhigh': 0.9 }, 'diff': { 'low': 0.9, 'high': 0.09, 'veryhigh': 0.01 }, } val = score_lookup[match_type][score_type] return val score_cpd_t = pgm_ext.TemplateCPD( 'score', ['low', 'high'], varpref='S', evidence_ttypes=[match_cpd_t], pmf_func=score_pmf, ) score_cpds = [ score_cpd_t.new_cpd(parents=cpds) for cpds in zip(match_cpds) ] # L___ End CPD Definitions ___ cpd_list = name_cpds + score_cpds + match_cpds logger.info('score_cpds = %r' % (ut.list_getattr(score_cpds, 'variable'), )) # Make Model model = pgm_ext.define_model(cpd_list) model.num_names = num_names if verbose: model.print_templates() return model
def rsync_ibsdb_main(): import sys default_user = ut.get_user_name() # default_db = 'MUGU_Master' default_db = None # Get positional commandline arguments cmdline_varags = ut.get_cmdline_varargs() if len(cmdline_varags) > 0 and cmdline_varags[0] == 'rsync': # ignore rsync as first command (b/c we are calling from # wbia.__main__) cmdline_varags = cmdline_varags[1:] valid_modes = ['push', 'pull', 'list'] if len(cmdline_varags) < 1: logger.info('Usage: ' # 'python -m wbia.scripts.rsync_wbiadb' 'python -m wbia rsync' '%s --db <db=%s> --user <user=%s>' % (valid_modes, default_db, default_user)) sys.exit(1) varargs_dict = dict(enumerate(cmdline_varags)) mode = varargs_dict.get(0, None) default_db = varargs_dict.get(1, None) user = ut.get_argval('--user', type_=str, default=default_user) port = ut.get_argval('--port', type_=int, default=22) dbnames = ut.get_argval(('--db', '--dbs', '--dbname'), type_=str, default=default_db) dbnames = ut.smart_cast(dbnames, list) workdir = ut.get_argval(('--workdir'), type_=str, default=None, help_='local work dir override') dry_run = ut.get_argflag(('--dryrun', '--dry-run', '--dry')) assert mode in valid_modes, 'mode=%r must be in %r' % (mode, valid_modes) remote_key = ut.get_argval('--remote', type_=str, default='hyrule') remote_map = { 'hyrule': 'hyrule.cs.rpi.edu', 'pachy': 'pachy.cs.uic.edu', 'lewa': '41.203.223.178', } remote_workdir_map = { 'hyrule': '/raid/work', 'pachy': '/home/shared_wbia/data/work', 'lewa': '/data/wbia', } if ':' in remote_key: remote_key_, remote_workdir = remote_key.split(':') else: remote_key_ = remote_key if remote_key not in remote_workdir_map: import warnings warnings.warn('Workdir not specified for remote') remote_workdir = remote_workdir_map.get(remote_key, '') remote = remote_map.get(remote_key_, remote_key_) remote_uri = user + '@' + remote + ':' + remote_workdir if mode == 'list': logger.info('remote = %r' % (remote, )) logger.info('need to list') remote_paths = ut.list_remote(remote_uri) logger.info('REMOTE LS -- TODO need to get only wbia dirs') logger.info('\n'.join(remote_paths)) elif mode in ['push', 'pull']: logger.info('dbnames = {!r}'.format(dbnames)) for dbname in ut.ProgIter(dbnames, label='sync db'): ut.change_term_title('RSYNC IBEISDB %r' % (dbname, )) sync_wbiadb(remote_uri, dbname, mode, workdir, port, dry_run)
def flann_add_time_experiment(): """ builds plot of number of annotations vs indexer build time. TODO: time experiment CommandLine: python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --show python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_Master0 --show utprof.py -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --show valgrind --tool=memcheck --suppressions=valgrind-python.supp python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --no-with-reindex Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots._neighbor_experiment import * # NOQA >>> import ibeis >>> #ibs = ibeis.opendb('PZ_MTEST') >>> result = flann_add_time_experiment() >>> # verify results >>> print(result) >>> ut.show_if_requested() """ import ibeis import utool as ut import numpy as np import plottool as pt def make_flann_index(vecs, flann_params): flann = pyflann.FLANN() flann.build_index(vecs, **flann_params) return flann db = ut.get_argval('--db') ibs = ibeis.opendb(db=db) # Input if ibs.get_dbname() == 'PZ_MTEST': initial = 1 reindex_stride = 16 addition_stride = 4 max_ceiling = 120 elif ibs.get_dbname() == 'PZ_Master0': #ibs = ibeis.opendb(db='GZ_ALL') initial = 32 reindex_stride = 32 addition_stride = 16 max_ceiling = 300001 else: assert False #max_ceiling = 32 all_daids = ibs.get_valid_aids() max_num = min(max_ceiling, len(all_daids)) flann_params = vt.get_flann_params() # Output count_list, time_list_reindex = [], [] count_list2, time_list_addition = [], [] # Setup #all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:]) all_randomize_daids_ = all_daids # ensure all features are computed ibs.get_annot_vecs(all_randomize_daids_) def reindex_step(count, count_list, time_list_reindex): daids = all_randomize_daids_[0:count] vecs = np.vstack(ibs.get_annot_vecs(daids)) with ut.Timer(verbose=False) as t: flann = make_flann_index(vecs, flann_params) # NOQA count_list.append(count) time_list_reindex.append(t.ellapsed) def addition_step(count, flann, count_list2, time_list_addition): daids = all_randomize_daids_[count:count + 1] vecs = np.vstack(ibs.get_annot_vecs(daids)) with ut.Timer(verbose=False) as t: flann.add_points(vecs) count_list2.append(count) time_list_addition.append(t.ellapsed) def make_initial_index(initial): daids = all_randomize_daids_[0:initial + 1] vecs = np.vstack(ibs.get_annot_vecs(daids)) flann = make_flann_index(vecs, flann_params) return flann WITH_REINDEX = not ut.get_argflag('--no-with-reindex') if WITH_REINDEX: # Reindex Part reindex_lbl = 'Reindexing' _reindex_iter = range(1, max_num, reindex_stride) reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_lbl, freq=1) for count in reindex_iter: reindex_step(count, count_list, time_list_reindex) # Add Part flann = make_initial_index(initial) addition_lbl = 'Addition' _addition_iter = range(initial + 1, max_num, addition_stride) addition_iter = ut.ProgressIter(_addition_iter, lbl=addition_lbl) for count in addition_iter: addition_step(count, flann, count_list2, time_list_addition) print('---') print('Reindex took time_list_reindex %.2s seconds' % sum(time_list_reindex)) print('Addition took time_list_reindex %.2s seconds' % sum(time_list_addition)) print('---') statskw = dict(precision=2, newlines=True) print('Reindex stats ' + ut.get_stats_str(time_list_reindex, **statskw)) print('Addition stats ' + ut.get_stats_str(time_list_addition, **statskw)) print('Plotting') #with pt.FigureContext: next_fnum = iter(range(0, 2)).next # python3 PY3 pt.figure(fnum=next_fnum()) if WITH_REINDEX: pt.plot2(count_list, time_list_reindex, marker='-o', equal_aspect=False, x_label='num_annotations', label=reindex_lbl + ' Time', dark=False) #pt.figure(fnum=next_fnum()) pt.plot2(count_list2, time_list_addition, marker='-o', equal_aspect=False, x_label='num_annotations', label=addition_lbl + ' Time') pt pt.legend()
def run_ibeis(): r""" CommandLine: python -m ibeis python -m ibeis find_installed_tomcat python -m ibeis get_annot_groundtruth:1 """ import ibeis # NOQA #ut.set_process_title('IBEIS_main') #main_locals = ibeis.main() #ibeis.main_loop(main_locals) #ut.set_process_title('IBEIS_main') cmdline_varags = ut.get_cmdline_varargs() if len(cmdline_varags) > 0 and cmdline_varags[0] == 'rsync': from ibeis.scripts import rsync_ibeisdb rsync_ibeisdb.rsync_ibsdb_main() sys.exit(0) if ub.argflag('--devcmd'): # Hack to let devs mess around when using an installer version # TODO: add more hacks #import utool.tests.run_tests #utool.tests.run_tests.run_tests() ut.embed() # Run the tests of other modules elif ub.argflag('--run-utool-tests'): raise Exception('Deprecated functionality') elif ub.argflag('--run-vtool_ibeis-tests'): raise Exception('Deprecated functionality') elif ub.argflag(('--run-ibeis-tests', '--run-tests')): raise Exception('Deprecated functionality') if ub.argflag('-e'): """ ibeis -e print -a default -t default """ # Run dev script if -e given import ibeis.dev # NOQA ibeis.dev.devmain() print('... exiting') sys.exit(0) # Attempt to run a test using the funciton name alone # with the --tf flag # if False: # import ibeis.tests.run_tests # import ibeis.tests.reset_testdbs # import ibeis.scripts.thesis # ignore_prefix = [ # #'ibeis.tests', # 'ibeis.control.__SQLITE3__', # '_autogen_explicit_controller'] # ignore_suffix = ['_grave'] # func_to_module_dict = { # 'demo_bayesnet': 'ibeis.unstable.demobayes', # } # ut.main_function_tester('ibeis', ignore_prefix, ignore_suffix, # func_to_module_dict=func_to_module_dict) #if ub.argflag('-e'): # import ibeis # expt_kw = ut.get_arg_dict(ut.get_func_kwargs(ibeis.run_experiment), # prefix_list=['--', '-']) # ibeis.run_experiment(**expt_kw) # sys.exit(0) doctest_modname = ut.get_argval( ('--doctest-module', '--tmod', '-tm', '--testmod'), type_=str, default=None, help_='specify a module to doctest') if doctest_modname is not None: """ Allow any doctest to be run the main ibeis script python -m ibeis --tmod utool.util_str --test-align:0 python -m ibeis --tmod ibeis.algo.hots.pipeline --test-request_ibeis_query_L0:0 --show python -m ibeis --tf request_ibeis_query_L0:0 --show ./dist/ibeis/IBEISApp --tmod ibeis.algo.hots.pipeline --test-request_ibeis_query_L0:0 --show # NOQA ./dist/ibeis/IBEISApp --tmod utool.util_str --test-align:0 ./dist/IBEIS.app/Contents/MacOS/IBEISApp --tmod utool.util_str --test-align:0 ./dist/IBEIS.app/Contents/MacOS/IBEISApp --run-utool-tests ./dist/IBEIS.app/Contents/MacOS/IBEISApp --run-vtool_ibeis-tests """ print('[ibeis] Testing module') mod_alias_list = { 'exptdraw': 'ibeis.expt.experiment_drawing' } doctest_modname = mod_alias_list.get(doctest_modname, doctest_modname) module = ut.import_modname(doctest_modname) (nPass, nTotal, failed_list, error_report_list) = ut.doctest_funcs(module=module) retcode = 1 - (len(failed_list) == 0) #print(module) sys.exit(retcode) import ibeis main_locals = ibeis.main() execstr = ibeis.main_loop(main_locals) # <DEBUG CODE> if 'back' in main_locals and CMD: back = main_locals['back'] front = getattr(back, 'front', None) # NOQA #front = back.front #ui = front.ui ibs = main_locals['ibs'] # NOQA print('-- EXECSTR --') print(ub.codeblock(execstr)) print('-- /EXECSTR --') exec(execstr)
def show_multiple_chips(ibs, aid_list, in_image=True, fnum=0, sel_aids=[], subtitle='', annote=False, **kwargs): """ CommandLine: python -m ibeis.viz.viz_name --test-show_multiple_chips --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db NNP_Master3 --aids=6435,9861,137,6563,9167,12547,9332,12598,13285 --no-inimage --notitle python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db NNP_Master3 --aids=137,6563,12547,9332,12598,13285 --no-inimage --notitle --adjust=.05 python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db NNP_Master3 --aids=6563,9332,13285,12598 --no-inimage --notitle --adjust=.05 --rc=1,4 python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db PZ_Master0 --aids=1288 --no-inimage --notitle --adjust=.05 python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db PZ_Master0 --aids=4020,4839 --no-inimage --notitle --adjust=.05 python -m ibeis.viz.viz_name --test-show_multiple_chips --db NNP_Master3 --aids=6524,6540,6571,6751 --no-inimage --notitle --adjust=.05 --diskshow python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST -a default:index=0:4 --show --aids=1 --doboth --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1 --doboth --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1 --doboth --rc=2,1 --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1 --doboth --rc=2,1 --show --notitle --trydrawline --no-draw_lbls python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1,2 --doboth --show --notitle --trydrawline python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1,2,3,4,5 --doboth --rc=2,5 --show --chrlbl --trydrawline --qualtitle --no-figtitle --notitle --doboth --doboth --show python -m ibeis.viz.viz_name --test-show_multiple_chips --db NNP_Master3 --aids=15419 --doboth --rc=2,1 --show --notitle --trydrawline --no-draw_lbls Example: >>> # DISABLE_DOCTEST >>> from ibeis.viz.viz_name import * # NOQA >>> import ibeis >>> ibs, aid_list, in_image = testdata_multichips() >>> if True: >>> import matplotlib as mpl >>> from ibeis.scripts.thesis import TMP_RC >>> mpl.rcParams.update(TMP_RC) >>> fnum = 0 >>> sel_aids = [] >>> subtitle = '' >>> annote = False >>> fig = show_multiple_chips(ibs, aid_list, in_image, fnum, sel_aids, subtitle, annote) >>> ut.quit_if_noshow() >>> fig.canvas.draw() >>> ut.show_if_requested() """ fnum = pt.ensure_fnum(fnum) nAids = len(aid_list) if nAids == 0: fig = df2.figure(fnum=fnum, pnum=(1, 1, 1), **kwargs) df2.imshow_null(fnum=fnum, **kwargs) return fig # Trigger computation of all chips in parallel ibsfuncs.ensure_annotation_data(ibs, aid_list, chips=(not in_image or annote), feats=annote) print('[viz_name] * annot_vuuid=%r' % ((ibs.get_annot_visual_uuids(aid_list), ))) print('[viz_name] * aid_list=%r' % ((aid_list, ))) DOBOTH = ut.get_argflag('--doboth') rc = ut.get_argval('--rc', type_=list, default=None) if rc is None: nRows, nCols = ph.get_square_row_cols(nAids * (2 if DOBOTH else 1)) else: nRows, nCols = rc notitle = ut.get_argflag('--notitle') draw_lbls = not ut.get_argflag('--no-draw_lbls') show_chip_kw = dict(annote=annote, in_image=in_image, notitle=notitle, draw_lbls=draw_lbls) #print('[viz_name] * r=%r, c=%r' % (nRows, nCols)) #gs2 = gridspec.GridSpec(nRows, nCols) pnum_ = df2.get_pnum_func(nRows, nCols) fig = df2.figure(fnum=fnum, pnum=pnum_(0), **kwargs) fig.clf() ax_list1 = [] for px, aid in enumerate(aid_list): print('px = %r' % (px, )) _fig, _ax1 = viz_chip.show_chip(ibs, aid=aid, pnum=pnum_(px), **show_chip_kw) print('other_aids = %r' % (ibs.get_annot_contact_aids(aid), )) ax = df2.gca() ax_list1.append(_ax1) if aid in sel_aids: df2.draw_border(ax, df2.GREEN, 4) if ut.get_argflag('--chrlbl') and not DOBOTH: ax.set_xlabel('(' + chr(ord('a') - 1 + px) + ')') elif ut.get_argflag('--numlbl') and not DOBOTH: ax.set_xlabel('(' + str(px + 1) + ')') #plot_aid3(ibs, aid) # HACK to show in image and not in image if DOBOTH: #ut.embed() #ph.get_plotdat_dict(ax_list1[1]) #ph.get_plotdat_dict(ax_list2[1]) ax_list2 = [] show_chip_kw['in_image'] = not show_chip_kw['in_image'] start = px + 1 for px, aid in enumerate(aid_list, start=start): _fig, _ax2 = viz_chip.show_chip(ibs, aid=aid, pnum=pnum_(px), **show_chip_kw) ax = df2.gca() ax_list2.append(_ax2) if ut.get_argflag('--chrlbl'): ax.set_xlabel('(' + chr(ord('a') - start + px) + ')') elif ut.get_argflag('--numlbl'): ax.set_xlabel('(' + str(px - start + 1) + ')') if ut.get_argflag('--qualtitle'): qualtext = ibs.get_annot_quality_texts(aid) ax.set_title(qualtext) if aid in sel_aids: df2.draw_border(ax, df2.GREEN, 4) if in_image: ax_list1, ax_list2 = ax_list2, ax_list1 if ut.get_argflag('--trydrawline'): # Unfinished #ut.embed() # Draw lines between corresponding axes # References: # http://stackoverflow.com/questions/17543359/drawing-lines-between-two-plots-in-matplotlib import matplotlib as mpl import vtool_ibeis as vt # !!! #http://matplotlib.org/users/transforms_tutorial.html #invTransFigure_fn1 = fig.transFigure.inverted().transform #invTransFigure_fn2 = fig.transFigure.inverted().transform #print(ax_list1) #print(ax_list2) assert len(ax_list1) == len(ax_list2) for ax1, ax2 in zip(ax_list1, ax_list2): #_ = ax1.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) #bbox1 = (0, 0, _.width * fig.dpi, _.height * fig.dpi) # returns in figure coordinates #bbox1 = df2.get_axis_bbox(ax=ax1) #if bbox1[-1] < 0: # # Weird bug # bbox1 = bbox1[1] print('--') print('ax1 = %r' % (ax1, )) print('ax2 = %r' % (ax2, )) chipshape = ph.get_plotdat(ax1, 'chipshape') #_bbox1 = ax1.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) #bbox1 = (0, 0, _bbox1.width * fig.dpi, _bbox1.height * fig.dpi) bbox1 = (0, 0, chipshape[1], chipshape[0]) aid_ = ph.get_plotdat(ax2, 'aid') aid_list_ = ph.get_plotdat(ax2, 'aid_list') index = aid_list_.index(aid_) annotation_bbox_list = ph.get_plotdat(ax2, 'annotation_bbox_list') bbox2 = annotation_bbox_list[index] print('bbox1 = %r' % (bbox1, )) print('bbox2 = %r' % (bbox2, )) vert_list1 = np.array(vt.verts_from_bbox(bbox1)) vert_list2 = np.array(vt.verts_from_bbox(bbox2)) print('vert_list1 = %r' % (vert_list1, )) print('vert_list2 = %r' % (vert_list2, )) #for vx in [0, 1, 2, 3]: for vx in [0, 1]: vert1 = vert_list1[vx].tolist() vert2 = vert_list2[vx].tolist() print(' ***') print(' * vert1 = %r' % (vert1, )) print(' * vert2 = %r' % (vert2, )) coordsA = coordsB = 'data' #coords = 'axes points' #'axes fraction' #'axes pixels' #coordsA = 'axes pixels' #coordsB = 'data' #'figure fraction' #'figure pixels' #'figure pixels' #'figure points' #'polar' #'offset points' con = mpl.patches.ConnectionPatch(xyA=vert1, xyB=vert2, coordsA=coordsA, coordsB=coordsB, axesA=ax1, axesB=ax2, linewidth=1, color='k') #, arrowstyle="-") #ut.embed() #con.set_zorder(None) ax1.add_artist(con) #ax2.add_artist(con) #ut.embed() #verts2.T[1] -= bbox2[-1] #bottom_left1, bottom_right1 = verts1[1:3].tolist() #bottom_left2, bottom_right2 = verts2[1:3].tolist() ##transAxes1 = ax1.transData.inverted() #transAxes1_fn = ax1.transData.transform #transAxes2_fn = ax2.transData.transform #transAxes1_fn = ut.identity #transAxes2_fn = ut.identity #coord_bl1 = transFigure.transform(transAxes1.transform(bottom_left1)) #coord_br1 = transFigure.transform(transAxes1.transform(bottom_right1)) #coord_bl1 = invTransFigure_fn1(transAxes1_fn(bottom_left1)) #print('bottom_left2 = %r' % (bottom_left2,)) #coord_bl1 = (5, 5) #coord_bl2 = invTransFigure_fn2(transAxes2_fn(bottom_left2)) #print('coord_bl2 = %r' % (coord_bl2,)) #coord_br1 = invTransFigure_fn1(transAxes1_fn(bottom_right1)) #coord_br2 = invTransFigure_fn2(transAxes2_fn(bottom_right2)) ##print('coord_bl1 = %r' % (coord_bl1,)) #line_coords1 = np.vstack([coord_bl1, coord_bl2]) #line_coords2 = np.vstack([coord_br1, coord_br2]) #print('line_coords1 = %r' % (line_coords1,)) #line1 = mpl.lines.Line2D((line_coords1[0]), (line_coords1[1]), transform=fig.transFigure) #line2 = mpl.lines.Line2D((line_coords2[0]), (line_coords2[1]), transform=fig.transFigure) #xs1, ys1 = line_coords1.T #xs2, ys2 = line_coords2.T #linekw = dict(transform=fig.transFigure) #linekw = dict() #print('xs1 = %r' % (xs1,)) #print('ys1 = %r' % (ys1,)) #line1 = mpl.lines.Line2D(xs1, ys1, **linekw) #line2 = mpl.lines.Line2D(xs2, ys2, **linekw) # NOQA #shrinkA=5, shrinkB=5, mutation_scale=20, fc="w") #ax2.add_artist(con) #fig.lines.append(line1) #fig.lines.append(line2) pass return fig
def myquery(): r""" BUG:: THERE IS A BUG SOMEWHERE: HOW IS THIS POSSIBLE? if everything is weightd ) how di the true positive even get a score while the true negative did not qres_copy.filtkey_list = ['ratio', 'fg', 'homogerr', 'distinctiveness'] CORRECT STATS { 'max' : [0.832, 0.968, 0.604, 0.000], 'min' : [0.376, 0.524, 0.000, 0.000], 'mean' : [0.561, 0.924, 0.217, 0.000], 'std' : [0.114, 0.072, 0.205, 0.000], 'nMin' : [1, 1, 1, 51], 'nMax' : [1, 1, 1, 1], 'shape': (52, 4), } INCORRECT STATS { 'max' : [0.759, 0.963, 0.264, 0.000], 'min' : [0.379, 0.823, 0.000, 0.000], 'mean' : [0.506, 0.915, 0.056, 0.000], 'std' : [0.125, 0.039, 0.078, 0.000], 'nMin' : [1, 1, 1, 24], 'nMax' : [1, 1, 1, 1], 'shape': (26, 4), # score_diff, tp_score, tn_score, p, K, dcvs_clip_max, fg_power, homogerr_power 0.494, 0.494, 0.000, 73.000, 2, 0.500, 0.100, 10.000 see how seperability changes as we very things CommandLine: python -m ibeis.algo.hots.devcases --test-myquery python -m ibeis.algo.hots.devcases --test-myquery --show --index 0 python -m ibeis.algo.hots.devcases --test-myquery --show --index 1 python -m ibeis.algo.hots.devcases --test-myquery --show --index 2 References: http://en.wikipedia.org/wiki/Pareto_distribution <- look into Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots.devcases import * # NOQA >>> ut.dev_ipython_copypaster(myquery) if ut.inIPython() else myquery() >>> pt.show_if_requested() """ from ibeis.algo.hots import special_query # NOQA from ibeis.algo.hots import distinctiveness_normalizer # NOQA from ibeis import viz # NOQA import plottool as pt index = ut.get_argval('--index', int, 0) ibs, aid1, aid2, tn_aid = testdata_my_exmaples(index) qaids = [aid1] daids = [aid2] + [tn_aid] qvuuid = ibs.get_annot_visual_uuids(aid1) cfgdict_vsone = dict( sv_on=True, #sv_on=False, #codename='vsone_unnorm_dist_ratio_extern_distinctiveness', codename='vsone_unnorm_ratio_extern_distinctiveness', sver_output_weighting=True, ) use_cache = False save_qcache = False qres_list, qreq_ = ibs.query_chips(qaids, daids, cfgdict=cfgdict_vsone, return_request=True, use_cache=use_cache, save_qcache=save_qcache, verbose=True) qreq_.load_distinctiveness_normalizer() qres = qres_list[0] top_aids = qres.get_top_aids() # NOQA qres_orig = qres # NOQA def try_config(qreq_, qres_orig, cfgdict): """ function to grid search over """ qres_copy = copy.deepcopy(qres_orig) qreq_vsone_ = qreq_ qres_vsone = qres_copy filtkey = hstypes.FiltKeys.DISTINCTIVENESS newfsv_list, newscore_aids = special_query.get_extern_distinctiveness(qreq_, qres_copy, **cfgdict) special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_vsone, newfsv_list, newscore_aids, filtkey) tp_score = qres_copy.aid2_score[aid2] tn_score = qres_copy.aid2_score[tn_aid] return qres_copy, tp_score, tn_score #[.01, .1, .2, .5, .6, .7, .8, .9, 1.0]), #FiltKeys = hstypes.FiltKeys # FIXME: Use other way of doing gridsearch grid_basis = distinctiveness_normalizer.DCVS_DEFAULT.get_grid_basis() gridsearch = ut.GridSearch(grid_basis, label='qvuuid=%r' % (qvuuid,)) print('Begin Grid Search') for cfgdict in ut.ProgressIter(gridsearch, lbl='GridSearch'): qres_copy, tp_score, tn_score = try_config(qreq_, qres_orig, cfgdict) gridsearch.append_result(tp_score, tn_score) print('Finish Grid Search') # Get best result best_cfgdict = gridsearch.get_rank_cfgdict() qres_copy, tp_score, tn_score = test_config(qreq_, qres_orig, best_cfgdict) # Examine closely what you can do with scores if False: qres_copy = copy.deepcopy(qres_orig) qreq_vsone_ = qreq_ filtkey = hstypes.FiltKeys.DISTINCTIVENESS newfsv_list, newscore_aids = special_query.get_extern_distinctiveness(qreq_, qres_copy, **cfgdict) ut.embed() def make_cm_very_old_tuple(qres_copy): assert ut.listfind(qres_copy.filtkey_list, filtkey) is None weight_filters = hstypes.WEIGHT_FILTERS weight_filtxs, nonweight_filtxs = special_query.index_partition(qres_copy.filtkey_list, weight_filters) aid2_fsv = {} aid2_fs = {} aid2_score = {} for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids): pass break #scorex_vsone = ut.listfind(qres_copy.filtkey_list, filtkey) #if scorex_vsone is None: # TODO: add spatial verification as a filter score # augment the vsone scores # TODO: paramaterize weighted_ave_score = True if weighted_ave_score: # weighted average scoring new_fs_vsone = special_query.weighted_average_scoring(new_fsv_vsone, weight_filtxs, nonweight_filtxs) else: # product scoring new_fs_vsone = special_query.product_scoring(new_fsv_vsone) new_score_vsone = new_fs_vsone.sum() aid2_fsv[daid] = new_fsv_vsone aid2_fs[daid] = new_fs_vsone aid2_score[daid] = new_score_vsone return aid2_fsv, aid2_fs, aid2_score # Look at plot of query products for new_fsv_vsone, daid in zip(newfsv_list, newscore_aids): new_fs_vsone = special_query.product_scoring(new_fsv_vsone) scores_list = np.array(new_fs_vsone)[:, None].T pt.plot_sorted_scores(scores_list, logscale=False, figtitle=str(daid)) pt.iup() special_query.apply_new_qres_filter_scores(qreq_vsone_, qres_copy, newfsv_list, newscore_aids, filtkey) # PRINT INFO import functools #ut.rrrr() get_stats_str = functools.partial(ut.get_stats_str, axis=0, newlines=True, precision=3) tp_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[aid2]), ':') tn_stats_str = ut.align(get_stats_str(qres_copy.aid2_fsv[tn_aid]), ':') info_str_list = [] info_str_list.append('qres_copy.filtkey_list = %r' % (qres_copy.filtkey_list,)) info_str_list.append('CORRECT STATS') info_str_list.append(tp_stats_str) info_str_list.append('INCORRECT STATS') info_str_list.append(tn_stats_str) info_str = '\n'.join(info_str_list) print(info_str) # SHOW BEST RESULT #qres_copy.ishow_top(ibs, fnum=pt.next_fnum()) #qres_orig.ishow_top(ibs, fnum=pt.next_fnum()) # Text Informatio param_lbl = 'dcvs_power' param_stats_str = gridsearch.get_dimension_stats_str(param_lbl) print(param_stats_str) csvtext = gridsearch.get_csv_results(10) print(csvtext) # Paramter visuzliation fnum = pt.next_fnum() # plot paramter influence param_label_list = gridsearch.get_param_lbls() pnum_ = pt.get_pnum_func(2, len(param_label_list)) for px, param_label in enumerate(param_label_list): gridsearch.plot_dimension(param_label, fnum=fnum, pnum=pnum_(px)) # plot match figure pnum2_ = pt.get_pnum_func(2, 2) qres_copy.show_matches(ibs, aid2, fnum=fnum, pnum=pnum2_(2)) qres_copy.show_matches(ibs, tn_aid, fnum=fnum, pnum=pnum2_(3)) # Add figure labels figtitle = 'Effect of parameters on vsone separation for a single case' subtitle = 'qvuuid = %r' % (qvuuid) figtitle += '\n' + subtitle pt.set_figtitle(figtitle) # Save Figure #fig_fpath = pt.save_figure(usetitle=True) #print(fig_fpath) # Write CSV Results #csv_fpath = fig_fpath + '.csv.txt' #ut.write_to(csv_fpath, csvtext) #qres_copy.ishow_top(ibs) #from matplotlib import pyplot as plt #plt.show() #print(ut.repr2())) # TODO: plot max variation dims #import plottool as pt #pt.plot(p_list, diff_list) """
IMAGE_BARE_THUMB_SUFFIX = '_thumb_bare.jpg' CHIP_THUMB_SUFFIX = '_chip_thumb.jpg' VS_EXEMPLARS_KEY = 'vs_exemplars' INTRA_OCCUR_KEY = 'intra_occurrence' HARD_NOTE_TAG = '<HARDCASE>' # HACK if ut.get_computer_name() == 'ibeis.cs.uic.edu': #_DEFAULT_WILDBOOK_TARGET = 'prod' _DEFAULT_WILDBOOK_TARGET = 'lewa2' else: _DEFAULT_WILDBOOK_TARGET = 'ibeis' WILDBOOK_TARGET = ut.get_argval('--wildbook-target', type_=str, default=_DEFAULT_WILDBOOK_TARGET, help_='specify the Wildbook target deployment') class ZIPPED_URLS(object): PZ_MTEST = 'https://lev.cs.rpi.edu/public/databases/PZ_MTEST.zip' NAUTS = 'https://lev.cs.rpi.edu/public/databases/NAUT_test.zip' WDS = 'https://lev.cs.rpi.edu/public/databases/wd_peter2.zip' PZ_DISTINCTIVE = 'https://lev.cs.rpi.edu/public/models/distinctivness_zebra_plains.zip' GZ_DISTINCTIVE = 'https://lev.cs.rpi.edu/public/models/distinctivness_zebra_grevys.zip' if six.PY2: __STR__ = unicode # change to str if needed else: __STR__ = str
def show_model(model, evidence={}, soft_evidence={}, **kwargs): """ References: http://stackoverflow.com/questions/22207802/pygraphviz-networkx-set-node-level-or-layer Ignore: pkg-config --libs-only-L libcgraph sudo apt-get install libgraphviz-dev -y sudo apt-get install libgraphviz4 -y # sudo apt-get install pkg-config sudo apt-get install libgraphviz-dev # pip install git+git://github.com/pygraphviz/pygraphviz.git pip install pygraphviz python -c "import pygraphviz; print(pygraphviz.__file__)" sudo pip3 install pygraphviz --install-option="--include-path=/usr/include/graphviz" --install-option="--library-path=/usr/lib/graphviz/" python3 -c "import pygraphviz; print(pygraphviz.__file__)" """ if ut.get_argval('--hackmarkov') or ut.get_argval('--hackjunc'): draw_tree_model(model, **kwargs) return import wbia.plottool as pt import networkx as netx import matplotlib as mpl fnum = pt.ensure_fnum(None) fig = pt.figure(fnum=fnum, pnum=(3, 1, (slice(0, 2), 0)), doclf=True) # NOQA # fig = pt.figure(fnum=fnum, pnum=(3, 2, (1, slice(1, 2))), doclf=True) # NOQA ax = pt.gca() var2_post = {f.variables[0]: f for f in kwargs.get('factor_list', [])} netx_graph = model # netx_graph.graph.setdefault('graph', {})['size'] = '"10,5"' # netx_graph.graph.setdefault('graph', {})['rankdir'] = 'LR' pos = get_hacked_pos(netx_graph) # netx.nx_agraph.pygraphviz_layout(netx_graph) # pos = netx.nx_agraph.pydot_layout(netx_graph, prog='dot') # pos = netx.nx_agraph.graphviz_layout(netx_graph) drawkw = dict(pos=pos, ax=ax, with_labels=True, node_size=1500) if evidence is not None: node_colors = [ # (pt.TRUE_BLUE (pt.WHITE if node not in soft_evidence else pt.LIGHT_PINK) if node not in evidence else pt.FALSE_RED for node in netx_graph.nodes() ] for node in netx_graph.nodes(): cpd = model.var2_cpd[node] if cpd.ttype == 'score': pass drawkw['node_color'] = node_colors netx.draw(netx_graph, **drawkw) show_probs = True if show_probs: textprops = { 'family': 'monospace', 'horizontalalignment': 'left', #'horizontalalignment': 'center', #'size': 12, 'size': 8, } textkw = dict( xycoords='data', boxcoords='offset points', pad=0.25, framewidth=True, arrowprops=dict(arrowstyle='->'), # bboxprops=dict(fc=node_attr['fillcolor']), ) netx_nodes = model.nodes(data=True) node_key_list = ut.get_list_column(netx_nodes, 0) pos_list = ut.dict_take(pos, node_key_list) artist_list = [] offset_box_list = [] for pos_, node in zip(pos_list, netx_nodes): x, y = pos_ variable = node[0] cpd = model.var2_cpd[variable] prior_marg = (cpd if cpd.evidence is None else cpd.marginalize( cpd.evidence, inplace=False)) prior_text = None text = None if variable in evidence: text = cpd.variable_statenames[evidence[variable]] elif variable in var2_post: post_marg = var2_post[variable] text = pgm_ext.make_factor_text(post_marg, 'post') prior_text = pgm_ext.make_factor_text(prior_marg, 'prior') else: if len(evidence) == 0 and len(soft_evidence) == 0: prior_text = pgm_ext.make_factor_text(prior_marg, 'prior') show_post = kwargs.get('show_post', False) show_prior = kwargs.get('show_prior', False) show_prior = True show_post = True show_ev = evidence is not None and variable in evidence if (show_post or show_ev) and text is not None: offset_box = mpl.offsetbox.TextArea(text, textprops) artist = mpl.offsetbox.AnnotationBbox( # offset_box, (x + 5, y), xybox=(20., 5.), offset_box, (x, y + 5), xybox=(4.0, 20.0), # box_alignment=(0, 0), box_alignment=(0.5, 0), **textkw) offset_box_list.append(offset_box) artist_list.append(artist) if show_prior and prior_text is not None: offset_box2 = mpl.offsetbox.TextArea(prior_text, textprops) artist2 = mpl.offsetbox.AnnotationBbox( # offset_box2, (x - 5, y), xybox=(-20., -15.), # offset_box2, (x, y - 5), xybox=(-15., -20.), offset_box2, (x, y - 5), xybox=(-4, -20.0), # box_alignment=(1, 1), box_alignment=(0.5, 1), **textkw) offset_box_list.append(offset_box2) artist_list.append(artist2) for artist in artist_list: ax.add_artist(artist) xmin, ymin = np.array(pos_list).min(axis=0) xmax, ymax = np.array(pos_list).max(axis=0) num_annots = len(model.ttype2_cpds['name']) if num_annots > 4: ax.set_xlim((xmin - 40, xmax + 40)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(30, 7) else: ax.set_xlim((xmin - 42, xmax + 42)) ax.set_ylim((ymin - 50, ymax + 50)) fig.set_size_inches(23, 7) fig = pt.gcf() title = 'num_names=%r, num_annots=%r' % ( model.num_names, num_annots, ) map_assign = kwargs.get('map_assign', None) # max_marginal_list = [] # for name, marginal in marginalized_joints.items(): # states = list(ut.iprod(*marginal.statenames)) # vals = marginal.values.ravel() # x = vals.argmax() # max_marginal_list += ['P(' + ', '.join(states[x]) + ') = ' + str(vals[x])] # title += str(marginal) top_assignments = kwargs.get('top_assignments', None) if top_assignments is not None: map_assign, map_prob = top_assignments[0] if map_assign is not None: # title += '\nMAP=' + ut.repr2(map_assign, strvals=True) title += '\nMAP: ' + map_assign + ' @' + '%.2f%%' % ( 100 * map_prob, ) if kwargs.get('show_title', True): pt.set_figtitle(title, size=14) # pt.set_xlabel() def hack_fix_centeralign(): if textprops['horizontalalignment'] == 'center': logger.info('Fixing centeralign') fig = pt.gcf() fig.canvas.draw() # Superhack for centered text. Fix bug in # /usr/local/lib/python2.7/dist-packages/matplotlib/offsetbox.py # /usr/local/lib/python2.7/dist-packages/matplotlib/text.py for offset_box in offset_box_list: offset_box.set_offset z = offset_box._text.get_window_extent() (z.x1 - z.x0) / 2 offset_box._text T = offset_box._text.get_transform() A = mpl.transforms.Affine2D() A.clear() A.translate((z.x1 - z.x0) / 2, 0) offset_box._text.set_transform(T + A) hack_fix_centeralign() top_assignments = kwargs.get('top_assignments', None) if top_assignments is not None: bin_labels = ut.get_list_column(top_assignments, 0) bin_vals = ut.get_list_column(top_assignments, 1) # bin_labels = ['\n'.join(ut.textwrap.wrap(_lbl, width=30)) for _lbl in bin_labels] pt.draw_histogram( bin_labels, bin_vals, fnum=fnum, pnum=(3, 8, (2, slice(4, None))), transpose=True, use_darkbackground=False, # xtick_rotation=-10, ylabel='Prob', xlabel='assignment', ) pt.set_title('Assignment probabilities')
def _init_sqldbcore(ibs, request_dbversion=None): """ Example: >>> # DISABLE_DOCTEST >>> from ibeis.control.IBEISControl import * # NOQA >>> import ibeis # NOQA >>> #ibs = ibeis.opendb('PZ_MTEST') >>> #ibs = ibeis.opendb('PZ_Master0') >>> ibs = ibeis.opendb('testdb1') >>> #ibs = ibeis.opendb('PZ_Master0') Ignore: aid_list = ibs.get_valid_aids() #ibs.update_annot_visual_uuids(aid_list) vuuid_list = ibs.get_annot_visual_uuids(aid_list) aid_list2 = ibs.get_annot_aids_from_visual_uuid(vuuid_list) assert aid_list2 == aid_list # v1.3.0 testdb1:264us, PZ_MTEST:3.93ms, PZ_Master0:11.6s %timeit ibs.get_annot_aids_from_visual_uuid(vuuid_list) # v1.3.1 testdb1:236us, PZ_MTEST:1.83ms, PZ_Master0:140ms ibs.print_imageset_table(exclude_columns=['imageset_uuid']) """ from ibeis.control import _sql_helpers from ibeis.control import DB_SCHEMA # Before load, ensure database has been backed up for the day backup_idx = ut.get_argval('--loadbackup', type_=int, default=None) sqldb_fpath = None if backup_idx is not None: backups = _sql_helpers.get_backup_fpaths(ibs) print('backups = %r' % (backups,)) sqldb_fpath = backups[backup_idx] print('CHOSE BACKUP sqldb_fpath = %r' % (sqldb_fpath,)) if backup_idx is None and not ut.get_argflag('--nobackup'): try: _sql_helpers.ensure_daily_database_backup(ibs.get_ibsdir(), ibs.sqldb_fname, ibs.backupdir) except IOError as ex: ut.printex(ex, ( 'Failed making daily backup. ' 'Run with --nobackup to disable')) raise # IBEIS SQL State Database #ibs.db_version_expected = '1.1.1' if request_dbversion is None: ibs.db_version_expected = '1.5.3' else: ibs.db_version_expected = request_dbversion # TODO: add this functionality to SQLController if backup_idx is None: new_version, new_fname = dtool.sql_control.dev_test_new_schema_version( ibs.get_dbname(), ibs.get_ibsdir(), ibs.sqldb_fname, ibs.db_version_expected, version_next='1.5.3') ibs.db_version_expected = new_version ibs.sqldb_fname = new_fname if sqldb_fpath is None: assert backup_idx is None sqldb_fpath = join(ibs.get_ibsdir(), ibs.sqldb_fname) readonly = None else: readonly = True ibs.db = dtool.SQLDatabaseController( fpath=sqldb_fpath, text_factory=const.__STR__, inmemory=False, readonly=readonly) ibs.readonly = ibs.db.readonly if backup_idx is None: # Ensure correct schema versions _sql_helpers.ensure_correct_version( ibs, ibs.db, ibs.db_version_expected, DB_SCHEMA, verbose=ut.VERBOSE, )
def execute_commands(tpl_rman, wbia_rman): import utool as ut GET_ARGVAL = ut.get_argval ut.init_catch_ctrl_c() if 0: print('Version Check Source:') for repo in tpl_rman.repos: print('python -c "import {0}; print({0}.__file__)"'.format( repo.modname)) print('python -c "import {0}; print({0}.__version__)"'.format( repo.modname)) # ----------- # Execute Commands on Core Repos # ----------- CODE_DIR, pythoncmd, WIN32, PY2, PY3 = get_sysinfo() print('wbia_rman = %r' % (wbia_rman, )) wildme_ssh_flags = GET_ARGFLAG('--move-wildme') or GET_ARGFLAG( '--move-wildme-ssh') wildme_https_flags = GET_ARGFLAG('--move-wildme-https') or GET_ARGFLAG( '--move-wildme-http') if wildme_ssh_flags or wildme_https_flags: fmt = 'ssh' if wildme_ssh_flags else 'https' move_wildme(wbia_rman, fmt) # Commands on global git repos if GET_ARGFLAG('--status'): wbia_rman.issue('git status') sys.exit(0) wbia_rman.ensure() if GET_ARGFLAG('--dump') or GET_ARGFLAG('--dump-scripts'): dpath = '_super_scripts/' + 'scripts' + get_plat_specifier() ut.ensuredir(dpath) dumps = [ (tpl_rman, 'cv2', 'build'), (tpl_rman, 'cv2', 'install'), (wbia_rman, 'flann', 'build'), (wbia_rman, 'flann', 'install'), (wbia_rman, 'hesaff', 'build'), (tpl_rman, 'PyQt', 'system_to_venv'), (tpl_rman, 'libgpuarray', 'build'), ] for rman, mod, sname in dumps: from os.path import join # if mod not in rman: # print('mod=%r not available in rman=%r' % (mod, rman)) # continue script = rman[mod].get_script(sname).text suffix = get_plat_specifier() sh_fpath = join(dpath, mod + '_' + sname + suffix + '.sh') ut.write_to(sh_fpath, script) if GET_ARGFLAG('--requirements'): ut.cmd('pip install -r requirements.txt') # HACKED IN SCRIPTS WHILE IM STILL FIGURING OUT TPL DEPS if GET_ARGFLAG('--opencv'): # There is now a pypi for opencv! Yay # ut.cmd('pip install opencv-python') # Bummer, but we need opencv source for pyhessaff # we should just make a wheel for pyhessaff cv_repo = tpl_rman['cv2'] cv_repo.clone() script = cv_repo.get_script('build') script.exec_() cv_repo = tpl_rman['cv2'] script = cv_repo.get_script('install') script.exec_() if GET_ARGFLAG('--flann'): script = wbia_rman['flann'].get_script('build') script.exec_() script = wbia_rman['flann'].get_script('install') script.exec_() if GET_ARGFLAG('--pyqt'): script = tpl_rman['PyQt'].get_script('system_to_venv') script.exec_() if GET_ARGFLAG('--hesaff'): script = wbia_rman['hesaff'].get_script('build') script.exec_() if GET_ARGFLAG('--pydarknet'): script = wbia_rman['pydarknet'].get_script('build') script.exec_() if GET_ARGFLAG('--pyrf'): script = wbia_rman['pyrf'].get_script('build') script.exec_() if GET_ARGFLAG('--torch'): # Theano and lasange code should be moved to pytorch tpl_rman['pytorch'].clone(recursive=True) tpl_rman['pytorch'].issue('git submodule update --init') tpl_rman['pytorch'].issue('python setup install') tpl_rman['pytorch'].issue('pip install torchvision') # tpl_rman['pytorch'].issue('NO_CUDNN=TRUE && python setup install') # tpl_rman['pytorch'].issue('pip install -e .') if GET_ARGFLAG('--libgpuarray') or GET_ARGFLAG('--dcnn'): tpl_rman['libgpuarray'].clone() script = tpl_rman['libgpuarray'].get_script('build') script.exec_() if GET_ARGFLAG('--dcnn'): tpl_rman['theano'].clone() # tpl_rman['pylearn2'].clone() tpl_rman['lasagne'].clone() tpl_rman['theano'].issue('pip install -e .') # tpl_rman['pylearn2'].issue('pip install -e .') tpl_rman['lasagne'].issue('pip install -e .') # tpl_rman['pylearn2'].python_develop() # tpl_rman['theano'].python_develop() # tpl_rman['lasagne'].python_develop() # _=== if GET_ARGFLAG('--fix') or GET_ARGFLAG('--check'): missing_dynlib = tpl_rman.check_cpp_build() missing_dynlib += wbia_rman.check_cpp_build() missing_install = tpl_rman.check_installed() missing_install += wbia_rman.check_installed() problems = [] problems += wbia_rman.check_importable() problems += tpl_rman.check_importable() if GET_ARGFLAG('--fix'): print('Trying to fix problems') for repo in missing_dynlib: repo.custom_build() for repo, recommended_fix in problems: print('Trying to fix repo = %r' % (repo, )) print(' * recommended_fix = %r' % (recommended_fix, )) if recommended_fix == 'rebuild': repo.custom_build() print( 'Can currently only fix one module at a time. Please re-run' ) sys.exit(1) else: print('Not sure how to fix %r' % (repo, )) if GET_ARGFLAG('--pull'): wbia_rman.issue('git pull') if GET_ARGFLAG('--build'): # Build tpl repos # tpl_rman.custom_build() # wbia_rman.custom_build() # Build only IBEIS repos with setup.py _rman = wbia_rman.only_with_pysetup() _rman.issue('{pythoncmd} setup.py build'.format(pythoncmd=pythoncmd)) # Like install, but better if you are developing if GET_ARGFLAG('--develop'): _rman = wbia_rman.only_with_pysetup() # # _rman.issue('{pythoncmd} setup.py develop'.format(pythoncmd=pythoncmd), # # sudo=not ut.in_virtual_env()) _rman.issue( '{pythoncmd} -m pip install -e .'.format(pythoncmd=pythoncmd), sudo=not ut.in_virtual_env(), ) if GET_ARGFLAG('--clean'): _rman = wbia_rman.only_with_pysetup() _rman.issue('{pythoncmd} setup.py clean'.format(pythoncmd=pythoncmd)) if GET_ARGFLAG('--install'): print( 'WARNING: Dont use install if you are a developer. Use develop instead.' ) _rman = wbia_rman.only_with_pysetup() _rman.issue('{pythoncmd} setup.py install'.format(pythoncmd=pythoncmd)) if GET_ARGFLAG('--push'): wbia_rman.issue('git push') if GET_ARGFLAG('--branch'): wbia_rman.issue('git branch') sys.exit(0) if GET_ARGFLAG('--tag-status'): wbia_rman.issue('git tag') # Tag everything tag_name = GET_ARGVAL('--newtag', type_=str, default=None) if tag_name is not None: wbia_rman.issue( 'git tag -a "{tag_name}" -m "super_setup autotag {tag_name}"'. format(**locals())) wbia_rman.issue('git push --tags') if GET_ARGFLAG('--bext'): wbia_rman.issue('{pythoncmd} setup.py build_ext --inplace'.format( pythoncmd=pythoncmd)) commit_msg = GET_ARGVAL('--commit', type_=str, default=None) if commit_msg is not None: wbia_rman.issue('git commit -am "{commit_msg}"'.format(**locals())) # Change Branch branch_name = GET_ARGVAL('--checkout', type_=str, default=None) if branch_name is not None: try: wbia_rman.issue('git checkout "{branch_name}"'.format(**locals())) except Exception: print('ERROR: Could not checkout branch: %r' % (branch_name, )) # Creates new branches newbranch_name = GET_ARGVAL('--newbranch', type_=str, default=None) if newbranch_name is not None: # rman.issue('git stash"'.format(**locals())) wbia_rman.issue( 'git checkout -b "{newbranch_name}"'.format(**locals())) wbia_rman.issue( 'git push --set-upstream origin {newbranch_name}'.format( **locals())) # rman.issue('git stash pop"'.format(**locals())) # Creates new branches newlocalbranch_name = GET_ARGVAL('--newlocalbranch', type_=str, default=None) if newlocalbranch_name is not None: # rman.issue('git stash"'.format(**locals())) wbia_rman.issue( 'git checkout -b "{newlocalbranch_name}"'.format(**locals())) # rman.issue('git push --set-upstream origin {newlocalbranch_name}'.format(**locals())) # rman.issue('git stash pop"'.format(**locals())) # Creates new branches mergebranch_name = GET_ARGVAL('--merge', type_=str, default=None) if mergebranch_name is not None: wbia_rman.issue('git merge "{mergebranch_name}"'.format(**locals())) # Change ownership if GET_ARGFLAG('--serverchmod'): wbia_rman.issue('chmod -R 755 *') if GET_ARGFLAG('--chown'): # Fixes problems where repos are checked out as root username = os.environ.get('USERNAME', ut.get_argval('--username')) if username is None: username = os.environ.get('USER', None) if username is None: raise AssertionError( 'cannot find username in commandline or environment vars') usergroup = username wbia_rman.issue('chown -R {username}:{usergroup} *'.format(**locals()), sudo=True) upstream_branch = GET_ARGVAL('--set-upstream', type_=str, default=None) if upstream_branch is not None: # git 2.0 wbia_rman.issue( 'git branch --set-upstream-to=origin/{upstream_branch} {upstream_branch}' .format(**locals())) upstream_push = GET_ARGVAL('--upstream-push', type_=str, default=None) if upstream_push is not None: wbia_rman.issue( 'git push --set-upstream origin {upstream_push}'.format( **locals())) if GET_ARGFLAG('--test'): failures = [] for repo_dpath in wbia_rman.repo_dirs: # ut.getp_ mod_dpaths = ut.get_submodules_from_dpath(repo_dpath, recursive=False, only_packages=True) modname_list = ut.lmap(ut.get_modname_from_modpath, mod_dpaths) print('Checking modules = %r' % (modname_list, )) for modname in modname_list: try: ut.import_modname(modname) print(modname + ' success') except ImportError: failures += [modname] print(modname + ' failure') print('failures = %s' % (ut.repr3(failures), )) if False: try: from six.moves import input except ImportError: input = raw_input # NOQA # General global git command gg_cmd = GET_ARGVAL('--gg', None) # global command if gg_cmd is not None: ans = ('yes' if GET_ARGFLAG('-y') else input( 'Are you sure you want to run: %r on all directories? ' % (gg_cmd, ))) if ans == 'yes': wbia_rman.issue(gg_cmd)
def test_draw_keypoint_main(): r""" CommandLine: python -m pyhesaff.tests.test_draw_keypoint --test-test_draw_keypoint_main --show Example: >>> # DISABLE_DOCTEST >>> from pyhesaff.tests.test_draw_keypoint import * # NOQA >>> test_draw_keypoint_main() >>> ut.show_if_requested() """ from plottool import draw_func2 as df2 from plottool import mpl_keypoint import vtool.keypoint as ktool # NOQA # TODO: Gui tests yield: # Jul 13 13:14:53 www.longerdog.com Python[23974] <Error>: This user is not allowed access to the window system right now. # don't do window access without --show TAU = 2 * np.pi # Hack these directions to be relative to gravity #RIGHT = ((0 * TAU / 4) - ktool.GRAVITY_THETA) % TAU DOWN = ((1 * TAU / 4) - ktool.GRAVITY_THETA) % TAU #LEFT = ((2 * TAU / 4) - ktool.GRAVITY_THETA) % TAU #UP = ((3 * TAU / 4) - ktool.GRAVITY_THETA) % TAU def test_keypoint(xscale=1, yscale=1, ori=DOWN, skew=0): # Test Keypoint x, y = 0, 0 iv11, iv21, iv22 = xscale, skew, yscale kp = np.array([x, y, iv11, iv21, iv22, ori]) # Test SIFT descriptor sift = np.zeros(128) sift[0:8] = 1 sift[8:16] = .5 sift[16:24] = .0 sift[24:32] = .5 sift[32:40] = .8 sift[40:48] = .8 sift[48:56] = .1 sift[56:64] = .2 sift[64:72] = .3 sift[72:80] = .4 sift[80:88] = .5 sift[88:96] = .6 sift[96:104] = .7 sift[104:112] = .8 sift[112:120] = .9 sift[120:128] = 1 sift = sift / np.sqrt((sift**2).sum()) sift = np.round(sift * 255) kpts = np.array([kp]) sifts = np.array([sift]) return kpts, sifts def square_axis(ax, s=3): ax.set_xlim(-s, s) ax.set_ylim(-s, s) ax.set_aspect('equal') ax.invert_yaxis() df2.set_xticks([]) df2.set_yticks([]) def test_shape(ori=0, skew=0, xscale=1, yscale=1, pnum=(1, 1, 1), fnum=1): df2.figure(fnum=fnum, pnum=pnum) kpts, sifts = test_keypoint(ori=ori, skew=skew, xscale=xscale, yscale=yscale) ax = df2.gca() square_axis(ax) mpl_keypoint.draw_keypoints(ax, kpts, sifts=sifts, ell_color=df2.ORANGE, ori=True, rect_color=df2.DARK_RED, ori_color=df2.DEEP_PINK, eig_color=df2.PINK, rect=True, eig=True, bin_color=df2.RED, arm1_color=df2.YELLOW, arm2_color=df2.BLACK) kptsstr = '\n'.join(ktool.get_kpts_strs(kpts)) #print(kptsstr) df2.upperleft_text(kptsstr) title = 'xyscale=(%.1f, %.1f),\n skew=%.1f, ori=%.2ftau' % ( xscale, yscale, skew, ori / TAU) df2.set_title(title) df2.dark_background() return kpts, sifts np.set_printoptions(precision=3) #THETA1 = DOWN #THETA2 = (DOWN + DOWN + RIGHT) / 3 #THETA3 = (DOWN + RIGHT) / 2 #THETA4 = (DOWN + RIGHT + RIGHT) / 3 #THETA5 = RIGHT nRows = 2 nCols = 4 import plottool as pt #pnum_ = pt.pnum_generator(nRows, nCols).next pnum_ = pt.pnum_generator(nRows, nCols) #def pnum_(px=None): # global px_ # if px is None: # px_ += 1 # px = px_ # return (nRows, nCols, px) MIN_ORI = ut.get_argval('--min-ori', float, DOWN) MAX_ORI = ut.get_argval('--max-ori', float, DOWN + TAU - .2) MIN_X = .5 MAX_X = 2 MIN_SWEW = ut.get_argval('--min-skew', float, 0) MAX_SKEW = ut.get_argval('--max-skew', float, 1) MIN_Y = .5 MAX_Y = 2 #kp_list = [] for row, col in iprod(range(nRows), range(nCols)): #print((row, col)) alpha = col / (nCols) beta = row / (nRows) xsca = (MIN_X * (1 - alpha)) + (MAX_X * (alpha)) ori = (MIN_ORI * (1 - alpha)) + (MAX_ORI * (alpha)) skew = (MIN_SWEW * (1 - beta)) + (MAX_SKEW * (beta)) ysca = (MIN_Y * (1 - beta)) + (MAX_Y * (beta)) kpts, sifts = test_shape(pnum=six.next(pnum_), ori=ori, skew=skew, xscale=xsca, yscale=ysca)
from __future__ import absolute_import, division, print_function import numpy as np import plottool.draw_func2 as df2 from plottool import fig_presenter #from plottool import custom_figure #from plottool import custom_constants #from os.path import join import utool as ut ut.noinject(__name__, '[plot_helpers]') #(print, print_, printDBG, rrr, profile) = ut.inject(__name__, '[plot_helpers]', DEBUG=False) SIFT_OR_VECFIELD = ut.get_argval('--vecfield', type_=bool) def draw(): df2.adjust_subplots_safe() fig_presenter.draw() #def dump_figure(dumpdir, subdir=None, quality=False, overwrite=False, verbose=2, # reset=True): # """ Dumps figure to disk based on the figurename """ # if quality is True: # custom_constants.FIGSIZE = custom_constants.golden_wh2(14) # #custom_constants.DPI = 120 # custom_constants.DPI = 120 # #custom_constants.FIGSIZE = custom_constants.golden_wh2(12) # #custom_constants.DPI = 120 # custom_constants.FONTS.figtitle = custom_constants.FONTS.small # elif quality is False: # #custom_constants.FIGSIZE = custom_constants.golden_wh2(8)
def demo2(): """ CommandLine: python -m ibeis.algo.graph.demo demo2 --viz python -m ibeis.algo.graph.demo demo2 Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.graph.demo import * # NOQA >>> result = demo2() >>> print(result) """ import plottool as pt from ibeis.scripts.thesis import TMP_RC import matplotlib as mpl mpl.rcParams.update(TMP_RC) # ---- Synthetic data params params = { 'redun.pos': 2, 'redun.neg': 2, } # oracle_accuracy = .98 # oracle_accuracy = .90 # oracle_accuracy = (.8, 1.0) oracle_accuracy = (.85, 1.0) # oracle_accuracy = 1.0 # --- draw params VISUALIZE = ut.get_argflag('--viz') # QUIT_OR_EMEBED = 'embed' QUIT_OR_EMEBED = 'quit' TARGET_REVIEW = ut.get_argval('--target', type_=int, default=None) START = ut.get_argval('--start', type_=int, default=None) END = ut.get_argval('--end', type_=int, default=None) # ------------------ # rng = np.random.RandomState(42) # infr = demodata_infr(num_pccs=4, size=3, size_std=1, p_incon=0) # infr = demodata_infr(num_pccs=6, size=7, size_std=1, p_incon=0) # infr = demodata_infr(num_pccs=3, size=5, size_std=.2, p_incon=0) infr = demodata_infr(pcc_sizes=[5, 2, 4]) infr.verbose = 100 # apply_dummy_viewpoints(infr) # infr.ensure_cliques() infr.ensure_cliques() infr.ensure_full() # infr.apply_edge_truth() # Dummy scoring infr.init_simulation(oracle_accuracy=oracle_accuracy, name='demo2') # infr_gt = infr.copy() dpath = ut.ensuredir(ut.truepath('~/Desktop/demo')) ut.remove_files_in_dir(dpath) fig_counter = it.count(0) def show_graph(infr, title, final=False, selected_edges=None): if not VISUALIZE: return # TODO: rich colored text? latest = '\n'.join(infr.latest_logs()) showkw = dict( # fontsize=infr.graph.graph['fontsize'], # fontname=infr.graph.graph['fontname'], show_unreviewed_edges=True, show_inferred_same=False, show_inferred_diff=False, outof=(len(infr.aids)), # show_inferred_same=True, # show_inferred_diff=True, selected_edges=selected_edges, show_labels=True, simple_labels=True, # show_recent_review=not final, show_recent_review=False, # splines=infr.graph.graph['splines'], reposition=False, # with_colorbar=True ) verbose = infr.verbose infr.verbose = 0 infr_ = infr.copy() infr_ = infr infr_.verbose = verbose infr_.show(pickable=True, verbose=0, **showkw) infr.verbose = verbose # print('status ' + ut.repr4(infr_.status())) # infr.show(**showkw) ax = pt.gca() pt.set_title(title, fontsize=20) fig = pt.gcf() fontsize = 22 if True: # postprocess xlabel lines = [] for line in latest.split('\n'): if False and line.startswith('ORACLE ERROR'): lines += ['ORACLE ERROR'] else: lines += [line] latest = '\n'.join(lines) if len(lines) > 10: fontsize = 16 if len(lines) > 12: fontsize = 14 if len(lines) > 14: fontsize = 12 if len(lines) > 18: fontsize = 10 if len(lines) > 23: fontsize = 8 if True: pt.adjust_subplots(top=.95, left=0, right=1, bottom=.45, fig=fig) ax.set_xlabel('\n' + latest) xlabel = ax.get_xaxis().get_label() xlabel.set_horizontalalignment('left') # xlabel.set_x(.025) xlabel.set_x(-.6) # xlabel.set_fontname('CMU Typewriter Text') xlabel.set_fontname('Inconsolata') xlabel.set_fontsize(fontsize) ax.set_aspect('equal') # ax.xaxis.label.set_color('red') from os.path import join fpath = join(dpath, 'demo_{:04d}.png'.format(next(fig_counter))) fig.savefig( fpath, dpi=300, # transparent=True, edgecolor='none') # pt.save_figure(dpath=dpath, dpi=300) infr.latest_logs() if VISUALIZE: infr.update_visual_attrs(groupby='name_label') infr.set_node_attrs('pin', 'true') node_dict = ut.nx_node_dict(infr.graph) print(ut.repr4(node_dict[1])) if VISUALIZE: infr.latest_logs() # Pin Nodes into the target groundtruth position show_graph(infr, 'target-gt') print(ut.repr4(infr.status())) infr.clear_feedback() infr.clear_name_labels() infr.clear_edges() print(ut.repr4(infr.status())) infr.latest_logs() if VISUALIZE: infr.update_visual_attrs() infr.prioritize('prob_match') if VISUALIZE or TARGET_REVIEW is None or TARGET_REVIEW == 0: show_graph(infr, 'initial state') def on_new_candidate_edges(infr, edges): # hack updateing visual attrs as a callback infr.update_visual_attrs() infr.on_new_candidate_edges = on_new_candidate_edges infr.params.update(**params) infr.refresh_candidate_edges() VIZ_ALL = (VISUALIZE and TARGET_REVIEW is None and START is None) print('VIZ_ALL = %r' % (VIZ_ALL, )) if VIZ_ALL or TARGET_REVIEW == 0: show_graph(infr, 'find-candidates') # _iter2 = enumerate(infr.generate_reviews(**params)) # _iter2 = list(_iter2) # assert len(_iter2) > 0 # prog = ut.ProgIter(_iter2, label='demo2', bs=False, adjust=False, # enabled=False) count = 1 first = 1 for edge, priority in infr._generate_reviews(data=True): msg = 'review #%d, priority=%.3f' % (count, priority) print('\n----------') infr.print('pop edge {} with priority={:.3f}'.format(edge, priority)) # print('remaining_reviews = %r' % (infr.remaining_reviews()),) # Make the next review if START is not None: VIZ_ALL = count >= START if END is not None and count >= END: break infr.print(msg) if ut.allsame(infr.pos_graph.node_labels(*edge)) and first: # Have oracle make a mistake early feedback = infr.request_oracle_review(edge, accuracy=0) first -= 1 else: feedback = infr.request_oracle_review(edge) AT_TARGET = TARGET_REVIEW is not None and count >= TARGET_REVIEW - 1 SHOW_CANDIATE_POP = True if SHOW_CANDIATE_POP and (VIZ_ALL or AT_TARGET): # import utool # utool.embed() infr.print( ut.repr2(infr.task_probs['match_state'][edge], precision=4, si=True)) infr.print('len(queue) = %r' % (len(infr.queue))) # Show edge selection infr.print('Oracle will predict: ' + feedback['evidence_decision']) show_graph(infr, 'pre' + msg, selected_edges=[edge]) if count == TARGET_REVIEW: infr.EMBEDME = QUIT_OR_EMEBED == 'embed' infr.add_feedback(edge, **feedback) infr.print('len(queue) = %r' % (len(infr.queue))) # infr.apply_nondynamic_update() # Show the result if VIZ_ALL or AT_TARGET: show_graph(infr, msg) # import sys # sys.exit(1) if count == TARGET_REVIEW: break count += 1 infr.print('status = ' + ut.repr4(infr.status(extended=False))) show_graph(infr, 'post-review (#reviews={})'.format(count), final=True) # ROUND 2 FIGHT # if TARGET_REVIEW is None and round2_params is not None: # # HACK TO GET NEW THINGS IN QUEUE # infr.params = round2_params # _iter2 = enumerate(infr.generate_reviews(**params)) # prog = ut.ProgIter(_iter2, label='round2', bs=False, adjust=False, # enabled=False) # for count, (aid1, aid2) in prog: # msg = 'reviewII #%d' % (count) # print('\n----------') # print(msg) # print('remaining_reviews = %r' % (infr.remaining_reviews()),) # # Make the next review evidence_decision # feedback = infr.request_oracle_review(edge) # if count == TARGET_REVIEW: # infr.EMBEDME = QUIT_OR_EMEBED == 'embed' # infr.add_feedback(edge, **feedback) # # Show the result # if PRESHOW or TARGET_REVIEW is None or count >= TARGET_REVIEW - 1: # show_graph(infr, msg) # if count == TARGET_REVIEW: # break # show_graph(infr, 'post-re-review', final=True) if not getattr(infr, 'EMBEDME', False): if ut.get_computer_name().lower() in ['hyrule', 'ooo']: pt.all_figures_tile(monitor_num=0, percent_w=.5) else: pt.all_figures_tile() ut.show_if_requested()
def run_tests(): """ >>> from ibeis.tests.run_tests import * # NOQA """ # starts logging for tests import ibeis ibeis._preload() # Build module list and run tests import sys if True: ensure_testing_data() if ut.in_pyinstaller_package(): # Run tests for installer doctest_modname_list_ = static_doctest_modnames() else: doctest_modname_list_ = dynamic_doctest_modnames() exclude_doctest_pattern = ut.get_argval(('--exclude-doctest-patterns', '--x'), type_=list, default=[]) if exclude_doctest_pattern is not None: import re is_ok = [all([re.search(pat, name) is None for pat in exclude_doctest_pattern]) for name in doctest_modname_list_] doctest_modname_list = ut.compress(doctest_modname_list_, is_ok) else: doctest_modname_list = doctest_modname_list_ coverage = ut.get_argflag(('--coverage', '--cov',)) if coverage: import coverage cov = coverage.Coverage(source=doctest_modname_list) cov.start() print('Starting coverage') exclude_lines = [ 'pragma: no cover', 'def __repr__', 'if self.debug:', 'if settings.DEBUG', 'raise AssertionError', 'raise NotImplementedError', 'if 0:', 'if ut.VERBOSE', 'if _debug:', 'if __name__ == .__main__.:', 'print(.*)', ] for line in exclude_lines: cov.exclude(line) doctest_modname_list2 = [] try: import guitool_ibeis # NOQA except ImportError: HAVE_GUI = False else: HAVE_GUI = True # Remove gui things if possible import re if not HAVE_GUI: doctest_modname_list = [ modname for modname in doctest_modname_list_ if not re.search('\\bgui\\b', modname) and not re.search('\\bviz\\b', modname) ] for modname in doctest_modname_list: try: exec('import ' + modname, globals(), locals()) except ImportError as ex: ut.printex(ex, iswarning=True) # import parse # if not HAVE_GUI: # try: # parsed = parse.parse('No module named {}', str(ex)) # if parsed is None: # parsed = parse.parse('cannot import name {}', str(ex)) # if parsed is not None: # if parsed[0].endswith('_gui'): # print('skipping gui module %r' % (parsed[0],)) # continue # if parsed[0].startswith('viz_'): # print('skipping viz module %r' % (parsed[0],)) # continue # if parsed[0].startswith('interact_'): # print('skipping interact module %r' % (parsed[0],)) # continue # # if parsed[0] in ['sip']: # # print('skipping Qt module %r' % (parsed[0],)) # # continue # except: # pass if not ut.in_pyinstaller_package(): raise else: doctest_modname_list2.append(modname) module_list = [sys.modules[name] for name in doctest_modname_list2] # Write to py.test / nose format if ut.get_argflag('--tonose'): convert_tests_from_ibeis_to_nose(module_list) return 0 nPass, nTotal, failed_cmd_list = ut.doctest_module_list(module_list) if coverage: print('Stoping coverage') cov.stop() print('Saving coverage') cov.save() print('Generating coverage html report') cov.html_report() if nPass != nTotal: return 1 else: return 0
def show_chip(ibs, aid, in_image=False, annote=True, title_suffix='', weight_label=None, weights=None, config2_=None, **kwargs): r""" Driver function to show chips Args: ibs (ibeis.IBEISController): aid (int): annotation rowid in_image (bool): displays annotation with the context of its source image annote (bool): enables overlay annoations title_suffix (str): weight_label (None): (default = None) weights (None): (default = None) config2_ (dict): (default = None) Kwargs: enable_chip_title_prefix, nokpts, kpts_subset, kpts, text_color, notitle, draw_lbls, show_aidstr, show_gname, show_name, show_nid, show_exemplar, show_num_gt, show_quality_text, show_viewcode, fnum, title, figtitle, pnum, interpolation, cmap, heatmap, data_colorbar, darken, update, xlabel, redraw_image, ax, alpha, docla, doclf, projection, pts, ell color (3/4-tuple, ndarray, or str): colors for keypoints CommandLine: python -m ibeis.viz.viz_chip show_chip --show --ecc python -c "import utool as ut; ut.print_auto_docstr('ibeis.viz.viz_chip', 'show_chip')" python -m ibeis.viz.viz_chip show_chip --show --db NNP_Master3 --aids 14047 --no-annote python -m ibeis.viz.viz_chip show_chip --show --db NNP_Master3 --aids 14047 --no-annote python -m ibeis.viz.viz_chip show_chip --show --db PZ_MTEST --aid 1 --bgmethod=cnn python -m ibeis.viz.viz_chip show_chip --show --db PZ_MTEST --aid 1 --bgmethod=cnn --scale_max=30 python -m ibeis.viz.viz_chip show_chip --show --db PZ_MTEST --aid 1 --ecc --draw_lbls=False --notitle --save=~/slides/lnbnn_query.jpg --dpi=300 Example: >>> # VIZ_TEST >>> from ibeis.viz.viz_chip import * # NOQA >>> import numpy as np >>> import vtool as vt >>> in_image = False >>> ibs, aid_list, kwargs, config2_ = testdata_showchip() >>> aid = aid_list[0] >>> if True: >>> import matplotlib as mpl >>> from ibeis.scripts.thesis import TMP_RC >>> mpl.rcParams.update(TMP_RC) >>> if ut.get_argflag('--ecc'): >>> kpts = ibs.get_annot_kpts(aid, config2_=config2_) >>> weights = ibs.get_annot_fgweights([aid], ensure=True, config2_=config2_)[0] >>> kpts = ut.random_sample(kpts[weights > .9], 200, seed=0) >>> ecc = vt.get_kpts_eccentricity(kpts) >>> scale = 1 / vt.get_scales(kpts) >>> #s = ecc if config2_.affine_invariance else scale >>> s = scale >>> colors = pt.scores_to_color(s, cmap_='jet') >>> kwargs['color'] = colors >>> kwargs['kpts'] = kpts >>> kwargs['ell_linewidth'] = 3 >>> kwargs['ell_alpha'] = .7 >>> show_chip(ibs, aid, in_image=in_image, config2_=config2_, **kwargs) >>> pt.show_if_requested() """ if ut.VERBOSE: print('[viz] show_chip(aid=%r)' % (aid,)) #ibs.assert_valid_aids((aid,)) # Get chip #print('in_image = %r' % (in_image,)) chip = vh.get_chips(ibs, aid, in_image=in_image, config2_=config2_) # Create chip title chip_text = vh.get_annot_texts(ibs, [aid], **kwargs)[0] if kwargs.get('enable_chip_title_prefix', True): chip_title_text = chip_text + title_suffix else: chip_title_text = title_suffix chip_title_text = chip_title_text.strip('\n') # Draw chip fig, ax = pt.imshow(chip, **kwargs) # Populate axis user data vh.set_ibsdat(ax, 'viztype', 'chip') vh.set_ibsdat(ax, 'aid', aid) if annote and not kwargs.get('nokpts', False): # Get and draw keypoints if 'color' not in kwargs: if weight_label == 'fg_weights': if weights is None and ibs.has_species_detector(ibs.get_annot_species_texts(aid)): weight_label = 'fg_weights' weights = ibs.get_annot_fgweights([aid], ensure=True, config2_=config2_)[0] if weights is not None: cmap_ = 'hot' #if weight_label == 'dstncvs': # cmap_ = 'rainbow' color = pt.scores_to_color(weights, cmap_=cmap_, reverse_cmap=False) kwargs['color'] = color kwargs['ell_color'] = color kwargs['pts_color'] = color kpts_ = vh.get_kpts(ibs, aid, in_image, config2_=config2_, kpts_subset=kwargs.get('kpts_subset', None), kpts=kwargs.pop('kpts', None)) pt.viz_keypoints._annotate_kpts(kpts_, **kwargs) if kwargs.get('draw_lbls', True): pt.upperleft_text(chip_text, color=kwargs.get('text_color', None)) use_title = not kwargs.get('notitle', False) if use_title: pt.set_title(chip_title_text) if in_image: gid = ibs.get_annot_gids(aid) aid_list = ibs.get_image_aids(gid) annotekw = viz_image.get_annot_annotations( ibs, aid_list, sel_aids=[aid], draw_lbls=kwargs.get('draw_lbls', True)) # Put annotation centers in the axis ph.set_plotdat(ax, 'annotation_bbox_list', annotekw['bbox_list']) ph.set_plotdat(ax, 'aid_list', aid_list) pt.viz_image2.draw_image_overlay(ax, **annotekw) zoom_ = ut.get_argval('--zoom', type_=float, default=None) if zoom_ is not None: import vtool as vt # Zoom into the chip for some image context rotated_verts = ibs.get_annot_rotated_verts(aid) bbox = ibs.get_annot_bboxes(aid) #print(bbox) #print(rotated_verts) rotated_bbox = vt.bbox_from_verts(rotated_verts) imgw, imgh = ibs.get_image_sizes(gid) pad_factor = zoom_ pad_length = min(bbox[2], bbox[3]) * pad_factor minx = max(rotated_bbox[0] - pad_length, 0) miny = max(rotated_bbox[1] - pad_length, 0) maxx = min((rotated_bbox[0] + rotated_bbox[2]) + pad_length, imgw) maxy = min((rotated_bbox[1] + rotated_bbox[3]) + pad_length, imgh) #maxy = imgh - maxy #miny = imgh - miny ax = pt.gca() ax.set_xlim(minx, maxx) ax.set_ylim(miny, maxy) ax.invert_yaxis() else: ph.set_plotdat(ax, 'chipshape', chip.shape) #if 'featweights' in vars() and 'color' in kwargs: if weights is not None and weight_label is not None: ## HACK HACK HACK if len(weights) > 0: cb = pt.colorbar(weights, kwargs['color']) cb.set_label(weight_label) return fig, ax
# TMP_RC = { # 'axes.titlesize': 12, # 'axes.labelsize': 12, # 'font.family': 'DejaVu Sans', # 'xtick.labelsize': 12, # 'ytick.labelsize': 12, # # 'legend.fontsize': 18, # # 'legend.alpha': .8, # 'legend.fontsize': 12, # 'legend.facecolor': 'w', # } TMP_RC = { 'axes.titlesize': 12, 'axes.labelsize': ut.get_argval('--labelsize', default=12), 'font.family': 'sans-serif', 'font.serif': 'CMU Serif', 'font.sans-serif': 'CMU Sans Serif', 'font.monospace': 'CMU Typewriter Text', 'xtick.labelsize': 12, 'ytick.labelsize': 12, # 'legend.alpha': .8, 'legend.fontsize': 12, 'legend.facecolor': 'w', } W, H = 7.4375, 3.0 def dbname_to_species_nice(dbname):
def testdata_expts( defaultdb='testdb1', default_acfgstr_name_list=['default:qindex=0:10:4,dindex=0:20'], default_test_cfg_name_list=['default'], a=None, t=None, p=None, qaid_override=None, daid_override=None, initial_aids=None, use_cache=None, dbdir=None, ibs=None, ): r""" Use this if you want data from an experiment. Command line interface to quickly get testdata for test_results. Command line flags can be used to specify db, aidcfg, pipecfg, qaid override, daid override (and maybe initial aids). CommandLine: python -m wbia.init.main_helpers testdata_expts Example: >>> # DISABLE_DOCTEST >>> from wbia.other.dbinfo import * # NOQA >>> import wbia >>> ibs, testres = wbia.testdata_expts(defaultdb='pz_mtest', >>> a='timectrl:qsize=2', >>> t='invar:ai=[false],ri=false', >>> use_cache=false) >>> print('testres = %r' % (testres,)) """ if ut.VERBOSE: logger.info('[main_helpers] testdata_expts') import wbia from wbia.expt import harness if a is not None: default_acfgstr_name_list = a if t is not None and p is None: p = t if p is not None: default_test_cfg_name_list = p if isinstance(default_acfgstr_name_list, six.string_types): default_acfgstr_name_list = [default_acfgstr_name_list] if isinstance(default_test_cfg_name_list, six.string_types): default_test_cfg_name_list = [default_test_cfg_name_list] # from wbia.expt import experiment_helpers if dbdir is not None: dbdir = ut.truepath(dbdir) if ibs is None: ibs = wbia.opendb(defaultdb=defaultdb, dbdir=dbdir) acfg_name_list = ut.get_argval( ('--aidcfg', '--acfg', '-a'), type_=list, default=default_acfgstr_name_list ) test_cfg_name_list = ut.get_argval( ('-t', '-p'), type_=list, default=default_test_cfg_name_list ) daid_override = ut.get_argval( ('--daid-override', '--daids-override'), type_=list, default=daid_override ) qaid_override = ut.get_argval( ('--qaid', '--qaids-override', '--qaid-override'), type_=list, default=qaid_override, ) # Hack a cache here use_bulk_cache = not ut.get_argflag(('--nocache', '--nocache-hs')) use_bulk_cache &= ut.is_developer() if use_cache is not None: use_bulk_cache &= use_cache use_bulk_cache &= False # use_bulk_cache = True if use_bulk_cache: from os.path import dirname cache_dir = ut.ensuredir((dirname(ut.get_module_dir(wbia)), 'BULK_TESTRES')) _cache_wrp = ut.cached_func('testreslist', cache_dir=cache_dir) _load_testres = _cache_wrp(harness.run_expt) else: _load_testres = harness.run_expt testres = _load_testres( ibs, acfg_name_list, test_cfg_name_list, qaid_override=qaid_override, daid_override=daid_override, initial_aids=initial_aids, use_cache=use_cache, ) # testres = test_result.combine_testres_list(ibs, testres_list) if ut.VERBOSE: logger.info(testres) return ibs, testres
def _setup_links(self, cfg_prefix, config=None): """ Called only when setting up an experiment to make a measurement. Creates symlinks such that all data is written to a directory that depends on a computer name, cfg_prefix and an arbitrary configuration dict. Then force the link in the basic directory to point to abs_dpath. """ # Setup directory from os.path import expanduser assert self.dname is not None computer_id = ut.get_argval('--comp', default=ut.get_computer_name()) conf_dpath = ut.ensuredir((expanduser(self.base_dpath), 'configured')) comp_dpath = ut.ensuredir((join(conf_dpath, computer_id))) link_dpath = ut.ensuredir((self.base_dpath, 'link')) # if True: # # move to new system # old_dpath = join(conf_dpath, self.dbname + '_' + computer_id) # if exists(old_dpath): # ut.move(old_dpath, join(comp_dpath, self.dbname)) try: cfgstr = ut.repr3(config.getstate_todict_recursive()) except AttributeError: cfgstr = ut.repr3(config) hashid = ut.hash_data(cfgstr)[0:6] suffix = '_'.join([cfg_prefix, hashid]) dbcode = self.dbname + '_' + suffix abs_dpath = ut.ensuredir(join(comp_dpath, dbcode)) self.dname = dbcode self.dpath = abs_dpath self.abs_dpath = abs_dpath # Place a basic link in the base link directory links = [] links.append(expanduser(join(link_dpath, self.dbname))) # # Make a configured but computer agnostic link # links.append(expanduser(join(conf_dpath, self.dbname))) for link in links: try: # Overwrite any existing link so the most recently used is # the default self.link = ut.symlink(abs_dpath, link, overwrite=True) except Exception: if exists(abs_dpath): newpath = ut.non_existing_path(abs_dpath, suffix='_old') ut.move(link, newpath) self.link = ut.symlink(abs_dpath, link) ut.writeto(join(abs_dpath, 'info.txt'), cfgstr)
def name_model_mode5(num_annots, num_names=None, verbose=True, mode=1): mode = ut.get_argval('--mode', default=mode) annots = ut.chr_range(num_annots, base=ut.get_argval('--base', default='a')) # The indexes of match CPDs will not change if another annotation is added upper_diag_idxs = ut.colwise_diag_idxs(num_annots, 2) if num_names is None: num_names = num_annots # -- Define CPD Templates name_cpd_t = pgm_ext.TemplateCPD('name', ('n', num_names), varpref='N', special_basis_pool=SPECIAL_BASIS_POOL) name_cpds = [name_cpd_t.new_cpd(parents=aid) for aid in annots] def match_pmf(match_type, n1, n2): return { True: { 'same': 1.0, 'diff': 0.0 }, False: { 'same': 0.0, 'diff': 1.0 }, }[n1 == n2][match_type] match_cpd_t = pgm_ext.TemplateCPD( 'match', ['diff', 'same'], varpref='M', evidence_ttypes=[name_cpd_t, name_cpd_t], pmf_func=match_pmf, ) namepair_cpds = ut.list_unflat_take(name_cpds, upper_diag_idxs) match_cpds = [match_cpd_t.new_cpd(parents=cpds) for cpds in namepair_cpds] def trimatch_pmf(match_ab, match_bc, match_ca): lookup = { 'same': { 'same': { 'same': 1, 'diff': 0, }, 'diff': { 'same': 0, 'diff': 1, }, }, 'diff': { 'same': { 'same': 0, 'diff': 1, }, 'diff': { 'same': 0.5, 'diff': 0.5, }, }, } return lookup[match_ca][match_bc][match_ab] trimatch_cpd_t = pgm_ext.TemplateCPD( 'tri_match', ['diff', 'same'], varpref='T', evidence_ttypes=[match_cpd_t, match_cpd_t], pmf_func=trimatch_pmf, ) # triple_idxs = ut.colwise_diag_idxs(num_annots, 3) tid2_match = {cpd._template_id: cpd for cpd in match_cpds} trimatch_cpds = [] # such hack for cpd in match_cpds: parents = [] this_ = list(cpd._template_id) for aid in annots: if aid in this_: continue for aid2 in this_: key = aid2 + aid if key not in tid2_match: key = aid + aid2 parents += [tid2_match[key]] trimatch_cpds += [trimatch_cpd_t.new_cpd(parents=parents)] def score_pmf(score_type, match_type): score_lookup = { 'same': { 'low': 0.1, 'high': 0.9, 'veryhigh': 0.9 }, 'diff': { 'low': 0.9, 'high': 0.09, 'veryhigh': 0.01 }, } val = score_lookup[match_type][score_type] return val score_cpd_t = pgm_ext.TemplateCPD( 'score', ['low', 'high'], varpref='S', evidence_ttypes=[match_cpd_t], pmf_func=score_pmf, ) score_cpds = [ score_cpd_t.new_cpd(parents=cpds) for cpds in zip(match_cpds) ] # score_cpds = [score_cpd_t.new_cpd(parents=cpds) # for cpds in zip(trimatch_cpds)] cpd_list = name_cpds + score_cpds + match_cpds + trimatch_cpds logger.info('score_cpds = %r' % (ut.list_getattr(score_cpds, 'variable'), )) # Make Model model = pgm_ext.define_model(cpd_list) model.num_names = num_names if verbose: model.print_templates() return model