def preload_commands(dbdir, **kwargs): """ Preload commands work with command line arguments and global caches """ #print('[main_cmd] preload_commands') if params.args.dump_argv: print(ut.dict_str(vars(params.args), sorted_=False)) if params.args.dump_global_cache: ut.global_cache_dump() # debug command, dumps to stdout if params.args.set_workdir is not None: sysres.set_workdir(params.args.set_workdir) if params.args.get_workdir: print(' Current work dir = %s' % sysres.get_workdir()) if params.args.logdir is not None: sysres.set_logdir(params.args.logdir) if params.args.get_logdir: print(' Current log dir = %s' % (sysres.get_logdir(),)) if params.args.view_logdir: ut.view_directory(sysres.get_logdir()) if ut.get_argflag('--vwd'): vwd() if ut.get_argflag('--vdq'): print('got arg --vdq') vdq(dbdir) if kwargs.get('delete_ibsdir', False): ibsfuncs.delete_ibeis_database(dbdir) if params.args.convert: preload_convert_hsdb(dbdir) if params.args.preload_exit: print('[main_cmd] preload exit') sys.exit(1)
def main(): r""" python win32bootstrap.py --dl numpy --nocache python win32bootstrap.py --dl numpy-1.9.2rc1 --force python win32bootstrap.py --dl numpy-1.9.2rc1 --run python win32bootstrap.py --force python win32bootstrap.py --dryrun python win32bootstrap.py --dryrun --dl numpy scipy python win32bootstrap.py --dl numpy C:\Users\jon.crall\AppData\Roaming\utool\numpy-1.9.2rc1+mkl-cp27-none-win32.whl pip install C:/Users/jon.crall/AppData/Roaming/utool/numpy-1.9.2rc1+mkl-cp27-none-win32.whl """ # Packages that you are requesting pkg_list = [] if ut.get_argflag('--all'): pkg_list = KNOWN_PKG_LIST else: print('specify --all to download all packages') print('or specify --dl pkgname to download that package') pkg_list.extend(ut.get_argval('--dl', list, [])) dryrun = ut.get_argflag('--dryrun') pkg_exe_list = bootstrap_sysreq(pkg_list, dryrun=dryrun) if ut.get_argflag('--run'): for pkg_exe in pkg_exe_list: if pkg_exe.endswith('.whl'): ut.cmd('pip install ' + pkg_exe)
def autogen_ipynb(ibs, launch=None, run=None): r""" Autogenerates standard IBEIS Image Analysis IPython notebooks. CommandLine: python -m ibeis --tf autogen_ipynb --run --db lynx python -m ibeis --tf autogen_ipynb --ipynb --db PZ_MTEST --asreport python -m ibeis --tf autogen_ipynb --ipynb --db PZ_MTEST --noexample --withtags python -m ibeis --tf autogen_ipynb --db PZ_MTEST # TODO: Add support for dbdir to be specified python -m ibeis --tf autogen_ipynb --db ~/work/PZ_MTEST python -m ibeis --tf autogen_ipynb --ipynb --db Oxford -a default:qhas_any=\(query,\),dpername=1,exclude_reference=True,dminqual=good python -m ibeis --tf autogen_ipynb --ipynb --db PZ_MTEST -a default -t best:lnbnn_normalizer=[None,normlnbnn-test] python -m ibeis.templates.generate_notebook --exec-autogen_ipynb --db wd_peter_blinston --ipynb python -m ibeis --tf autogen_ipynb --db PZ_Master1 --ipynb python -m ibeis --tf autogen_ipynb --db PZ_Master1 -a timectrl:qindex=0:100 -t best best:normsum=True --ipynb --noexample python -m ibeis --tf autogen_ipynb --db PZ_Master1 -a timectrl --run jupyter-notebook Experiments-lynx.ipynb killall python python -m ibeis --tf autogen_ipynb --db humpbacks --ipynb -t default:proot=BC_DTW -a default:has_any=hasnotch python -m ibeis --tf autogen_ipynb --db humpbacks --ipynb -t default:proot=BC_DTW default:proot=vsmany -a default:has_any=hasnotch,mingt=2,qindex=0:50 --noexample Example: >>> # SCRIPT >>> from ibeis.templates.generate_notebook import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='testdb1') >>> result = autogen_ipynb(ibs) >>> print(result) """ dbname = ibs.get_dbname() fname = 'Experiments-' + dbname nb_fpath = fname + '.ipynb' if ut.get_argflag('--cells'): notebook_cells = make_ibeis_cell_list(ibs) print('\n# ---- \n'.join(notebook_cells)) return # TODO: Add support for dbdir to be specified notebook_str = make_ibeis_notebook(ibs) ut.writeto(nb_fpath, notebook_str) run = ut.get_argflag('--run') if run is None else run launch = launch if launch is not None else ut.get_argflag('--ipynb') if run: run_nb = ut.run_ipython_notebook(notebook_str) output_fpath = ut.export_notebook(run_nb, fname) ut.startfile(output_fpath) elif launch: ut.cmd('jupyter-notebook', nb_fpath, detatch=True) #ut.cmd('ipython-notebook', nb_fpath) #ut.startfile(nb_fpath) else: print('notebook_str =\n%s' % (notebook_str,))
def test_zmq_task(): """ CommandLine: python -m ibeis.web.zmq_task_queue --exec-test_zmq_task python -b -m ibeis.web.zmq_task_queue --exec-test_zmq_task python -m ibeis.web.zmq_task_queue --main python -m ibeis.web.zmq_task_queue --main --bg python -m ibeis.web.zmq_task_queue --main --fg Example: >>> # SCRIPT >>> from ibeis.web.zmq_task_queue import * # NOQA >>> test_zmq_task() """ _init_signals() # now start a few clients, and fire off some requests client_id = np.random.randint(1000) jobiface = JobInterface(client_id) reciever = JobBackend() if ut.get_argflag('--bg'): from ibeis.init import sysres dbdir = sysres.get_args_dbdir('cache', False, None, None, cache_priority=False) reciever.initialize_background_processes(dbdir) print('[testzmq] parent process is looping forever') while True: time.sleep(1) elif ut.get_argflag('--fg'): jobiface.initialize_client_thread() else: dbdir = sysres.get_args_dbdir('cache', False, None, None, cache_priority=False) reciever.initialize_background_processes(dbdir) jobiface.initialize_client_thread() # Foreground test script print('... waiting for jobs') if ut.get_argflag('--cmd'): ut.embed() jobiface.queue_job() else: print('[test] ... emit test1') jobid1 = jobiface.queue_job('helloworld', 1) jobiface.wait_for_job_result(jobid1) #jobiface.get_job_status(jobid1) #jobid_list = [jobiface.queue_job('helloworld', 5) for _ in range(NUM_JOBS)] #jobid_list += [jobiface.queue_job('get_valid_aids')] jobid_list = [] #identify_jobid = jobiface.queue_job('query_chips', [1], [3, 4, 5], cfgdict={'K': 1}) identify_jobid = jobiface.queue_job('query_chips_simple_dict', [1], [3, 4, 5], cfgdict={'K': 1}) for jobid in jobid_list: jobiface.wait_for_job_result(jobid) jobiface.wait_for_job_result(identify_jobid) print('FINISHED TEST SCRIPT')
def testdata_show_qres(): import ibeis cm, qreq_ = ibeis.testdata_cm() kwargs = dict( top_aids=ut.get_argval('--top-aids', type_=int, default=3), sidebyside=not ut.get_argflag('--no-sidebyside'), annot_mode=ut.get_argval('--annot_mode', type_=int, default=1), viz_name_score=not ut.get_argflag('--no-viz_name_score'), max_nCols=ut.get_argval('--max_nCols', type_=int, default=None) ) return qreq_.ibs, cm, qreq_, kwargs
def are_you_sure(parent=None, msg=None, title='Confirmation', default=None): """ Prompt user for conformation before changing something """ msg = 'Are you sure?' if msg is None else msg print('[gt] Asking User if sure') print('[gt] title = %s' % (title,)) print('[gt] msg =\n%s' % (msg,)) if ut.get_argflag('-y') or ut.get_argflag('--yes'): # DONT ASK WHEN SPECIFIED return True ans = user_option(parent=parent, msg=msg, title=title, options=['Yes', 'No'], use_cache=False, default=default) return ans == 'Yes'
def __init__(self): #self.num_engines = 3 self.num_engines = NUM_ENGINES self.engine_queue_proc = None self.collect_queue_proc = None self.engine_procs = None self.collect_proc = None # -- only_engine = ut.get_argflag('--only-engine') self.spawn_collector = not only_engine self.spawn_engine = not ut.get_argflag('--no-engine') self.spawn_queue = not only_engine
def initialize_job_manager(ibs): """ Run from the webserver Example: >>> # DISABLE_DOCTEST >>> from ibeis.web.zmq_task_queue import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb('testdb1') Example: >>> # WEB_DOCTEST >>> from ibeis.web.zmq_task_queue import * # NOQA >>> import ibeis >>> web_instance = ibeis.opendb_bg_web(db='testdb1', wait=10) >>> baseurl = 'http://127.0.1.1:5000' >>> _payload = {'image_attrs_list': [], 'annot_attrs_list': []} >>> payload = ut.map_dict_vals(ut.to_json, _payload) >>> #resp = requests.post(baseurl + '/api/core/helloworld/?f=b', data=payload) >>> resp = requests.post(baseurl + '/api/core/add_images_json/', data=payload) >>> print(resp) >>> web_instance.terminate() >>> json_dict = resp.json() >>> text = json_dict['response'] >>> print(text) """ ibs.job_manager = ut.DynStruct() ibs.job_manager.jobiface = JobInterface(0) if not ut.get_argflag('--fg'): ibs.job_manager.reciever = JobBackend() ibs.job_manager.reciever.initialize_background_processes(dbdir=ibs.get_dbdir()) ibs.job_manager.jobiface.initialize_client_thread()
def exec_(script): import utool as ut print('+**** exec %s script *******' % (script.type_)) print('repo = %r' % (repo,)) with ut.ChdirContext(repo.dpath): if script.is_fpath_valid(): normbuild_flag = '--no-rmbuild' if ut.get_argflag(normbuild_flag): ut.cmd(script.fpath + ' ' + normbuild_flag) else: ut.cmd(script.fpath) else: if script.text is not None: print('ABOUT TO EXECUTE') ut.print_code(script.text, 'bash') if ut.are_you_sure('execute above script?'): from os.path import join scriptdir = ut.ensure_app_resource_dir('utool', 'build_scripts') script_path = join(scriptdir, 'script_' + script.type_ + '_' + ut.hashstr27(script.text) + '.sh') ut.writeto(script_path, script.text) _ = ut.cmd('bash ', script_path) # NOQA else: print("CANT QUITE EXECUTE THIS YET") ut.print_code(script.text, 'bash') #os.system(scriptname) print('L**** exec %s script *******' % (script.type_))
def vizualize_vocabulary(ibs, invindex): """ cleaned up version of dump_word_patches. Makes idf scatter plots and dumps the patches that contributed to each word. CommandLine: python -m ibeis.algo.hots.smk.smk_plots --test-vizualize_vocabulary python -m ibeis.algo.hots.smk.smk_plots --test-vizualize_vocabulary --vf Example: >>> from ibeis.algo.hots.smk.smk_plots import * # NOQA >>> from ibeis.algo.hots.smk import smk_debug >>> from ibeis.algo.hots.smk import smk_repr >>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=64000) >>> #tup = smk_debug.testdata_raw_internals0(db='GZ_ALL', nWords=8000) >>> tup = smk_debug.testdata_raw_internals0(db='PZ_Master0', nWords=64000) >>> #tup = smk_debug.testdata_raw_internals0(db='PZ_Mothers', nWords=8000) >>> ibs, annots_df, daids, qaids, invindex, qreq_ = tup >>> smk_repr.compute_data_internals_(invindex, qreq_.qparams, delete_rawvecs=False) >>> vizualize_vocabulary(ibs, invindex) """ invindex.idx2_wxs = np.array(invindex.idx2_wxs) print('[smk_plots] Vizualizing vocabulary') # DUMPING PART --- dumps patches to disk figdir = ibs.get_fig_dir() ut.ensuredir(figdir) if ut.get_argflag('--vf'): ut.view_directory(figdir) # Compute Word Statistics metrics = compute_word_metrics(invindex) wx2_nMembers, wx2_pdist_stats, wx2_wdist_stats = metrics #(wx2_pdist, wx2_wdist, wx2_nMembers, wx2_pdist_stats, wx2_wdist_stats) = metrics #wx2_prad = {wx: pdist_stats['max'] for wx, pdist_stats in six.iteritems(wx2_pdist_stats) if 'max' in pdist_stats} #wx2_wrad = {wx: wdist_stats['max'] for wx, wdist_stats in six.iteritems(wx2_wdist_stats) if 'max' in wdist_stats} wx2_prad = {wx: stats['max'] for wx, stats in wx2_pdist_stats.items() if 'max' in stats} wx2_wrad = {wx: stats['max'] for wx, stats in wx2_wdist_stats.items() if 'max' in stats} #wx2_prad = get_metric(metrics, 'wx2_pdist_stats', 'max') #wx2_wrad = get_metric(metrics, 'wx2_wdist_stats', 'max') wx_sample1 = select_by_metric(wx2_nMembers) wx_sample2 = select_by_metric(wx2_prad) wx_sample3 = select_by_metric(wx2_wrad) wx_sample = wx_sample1 + wx_sample2 + wx_sample3 overlap123 = len(wx_sample) - len(set(wx_sample)) print('overlap123 = %r' % overlap123) wx_sample = set(wx_sample) print('len(wx_sample) = %r' % len(wx_sample)) #make_scatterplots(ibs, figdir, invindex, metrics) vocabdir = join(figdir, 'vocab_patches2') wx2_dpath = get_word_dpaths(vocabdir, wx_sample, metrics) make_wordfigures(ibs, metrics, invindex, figdir, wx_sample, wx2_dpath)
def std_build_command(repo='.'): """ My standard build script names. Calls mingw_build.bat on windows and unix_build.sh on unix """ import utool as ut print("+**** stdbuild *******") print('repo = %r' % (repo,)) if sys.platform.startswith('win32'): #scriptname = './mingw_build.bat' # vtool --rebuild-sver didnt work with this line scriptname = 'mingw_build.bat' else: scriptname = './unix_build.sh' if repo == '': # default to cwd repo = '.' else: os.chdir(repo) ut.assert_exists(scriptname) normbuild_flag = '--no-rmbuild' if ut.get_argflag(normbuild_flag): scriptname += ' ' + normbuild_flag # Execute build ut.cmd(scriptname) #os.system(scriptname) print("L**** stdbuild *******")
def std_build_command(repo="."): """ DEPRICATE My standard build script names. Calls mingw_build.bat on windows and unix_build.sh on unix """ import utool as ut print("+**** stdbuild *******") print("repo = %r" % (repo,)) if sys.platform.startswith("win32"): # vtool --rebuild-sver didnt work with this line # scriptname = './mingw_build.bat' scriptname = "mingw_build.bat" else: scriptname = "./unix_build.sh" if repo == "": # default to cwd repo = "." else: os.chdir(repo) ut.assert_exists(scriptname) normbuild_flag = "--no-rmbuild" if ut.get_argflag(normbuild_flag): scriptname += " " + normbuild_flag # Execute build ut.cmd(scriptname) # os.system(scriptname) print("L**** stdbuild *******")
def exec_(script): import utool as ut print("+**** exec %s script *******" % (script.type_)) print("repo = %r" % (repo,)) with ut.ChdirContext(repo.dpath): if script.is_fpath_valid(): normbuild_flag = "--no-rmbuild" if ut.get_argflag(normbuild_flag): ut.cmd(script.fpath + " " + normbuild_flag) else: ut.cmd(script.fpath) else: if script.text is not None: print("ABOUT TO EXECUTE") ut.print_code(script.text, "bash") if ut.are_you_sure("execute above script?"): from os.path import join scriptdir = ut.ensure_app_resource_dir("utool", "build_scripts") script_path = join( scriptdir, "script_" + script.type_ + "_" + ut.hashstr27(script.text) + ".sh" ) ut.writeto(script_path, script.text) _ = ut.cmd("bash ", script_path) # NOQA else: print("CANT QUITE EXECUTE THIS YET") ut.print_code(script.text, "bash") # os.system(scriptname) print("L**** exec %s script *******" % (script.type_))
def show_chip_distinctiveness_plot(chip, kpts, dstncvs, fnum=1, pnum=None): import plottool as pt pt.figure(fnum, pnum=pnum) ax = pt.gca() divider = pt.ensure_divider(ax) #ax1 = divider.append_axes("left", size="50%", pad=0) ax1 = ax ax2 = divider.append_axes("bottom", size="100%", pad=0.05) #f, (ax1, ax2) = pt.plt.subplots(1, 2, sharex=True) cmapstr = 'rainbow' # 'hot' color_list = pt.df2.plt.get_cmap(cmapstr)(ut.norm_zero_one(dstncvs)) sortx = dstncvs.argsort() #pt.df2.plt.plot(qfx2_dstncvs[sortx], c=color_list[sortx]) pt.plt.sca(ax1) pt.colorline(np.arange(len(sortx)), dstncvs[sortx], cmap=pt.plt.get_cmap(cmapstr)) pt.gca().set_xlim(0, len(sortx)) pt.dark_background() pt.plt.sca(ax2) pt.imshow(chip, darken=.2) # MATPLOTLIB BUG CANNOT SHOW DIFFERENT ALPHA FOR POINTS AND KEYPOINTS AT ONCE #pt.draw_kpts2(kpts, pts_color=color_list, ell_color=color_list, ell_alpha=.1, ell=True, pts=True) #pt.draw_kpts2(kpts, color_list=color_list, pts_alpha=1.0, pts_size=1.5, # ell=True, ell_alpha=.1, pts=False) ell = ut.get_argflag('--ell') pt.draw_kpts2(kpts, color_list=color_list, pts_alpha=1.0, pts_size=1.5, ell=ell, ell_alpha=.3, pts=not ell) pt.plt.sca(ax)
def ensure_text(fname, text, repo_dpath='.', force=None, locals_={}, chmod=None): """ Args: fname (str): file name text (str): repo_dpath (str): directory path string(default = '.') force (bool): (default = False) locals_ (dict): (default = {}) Example: >>> # DISABLE_DOCTEST >>> from utool.util_project import * # NOQA >>> import utool as ut >>> result = setup_repo() >>> print(result) """ import utool as ut ut.colorprint('Ensuring fname=%r' % (fname), 'yellow') # if not fname.endswith('__init__.py'): # # HACK # return if force is None and ut.get_argflag('--force-%s' % (fname,)): force = True text_ = ut.remove_codeblock_syntax_sentinals(text) fmtkw = locals_.copy() fmtkw['fname'] = fname text_ = text_.format(**fmtkw) + '\n' fpath = join(repo_dpath, fname) ut.dump_autogen_code(fpath, text_)
def testdata_ibeis(**kwargs): """ DEPRICATE Step 1 builds ibs for testing Example: >>> from ibeis.algo.hots.smk.smk_debug import * # NOQA >>> kwargs = {} """ print(' === Test Data IBEIS ===') print('kwargs = ' + ut.dict_str(kwargs)) print('[smk_debug] testdata_ibeis') db = kwargs.get('db', ut.get_argval('--db', str, 'PZ_MTEST')) #with ut.Indenter('ENSURE'): if db == 'PZ_MTEST': ibeis.ensure_pz_mtest() ibs = ibeis.opendb(db=db) ibs._default_config() aggregate = kwargs.get('aggregate', ut.get_argflag(('--agg', '--aggregate'))) nWords = kwargs.get( 'nWords', ut.get_argval(('--nWords', '--nCentroids'), int, default=8E3)) nAssign = kwargs.get( 'nAssign', ut.get_argval(('--nAssign', '--K'), int, default=10)) # Configs ibs.cfg.query_cfg.pipeline_root = 'smk' ibs.cfg.query_cfg.smk_cfg.aggregate = aggregate ibs.cfg.query_cfg.smk_cfg.smk_alpha = 3 ibs.cfg.query_cfg.smk_cfg.smk_thresh = 0 ibs.cfg.query_cfg.smk_cfg.vocabtrain_cfg.nWords = nWords ibs.cfg.query_cfg.smk_cfg.vocabassign_cfg.nAssign = nAssign if ut.VERYVERBOSE: ibs.cfg.query_cfg.smk_cfg.printme3() return ibs
def _init_ibeis(dbdir=None, verbose=None, use_cache=True, web=None, **kwargs): """ Private function that calls code to create an ibeis controller """ import utool as ut from ibeis import params from ibeis.control import IBEISControl if verbose is None: verbose = ut.VERBOSE if verbose and NOT_QUIET: print('[main] _init_ibeis()') # Use command line dbdir unless user specifies it if dbdir is None: ibs = None ut.printWARN('[main!] WARNING args.dbdir is None') else: kwargs = kwargs.copy() request_dbversion = kwargs.pop('request_dbversion', None) asproxy = kwargs.pop('asproxy', None) ibs = IBEISControl.request_IBEISController( dbdir=dbdir, use_cache=use_cache, request_dbversion=request_dbversion, asproxy=asproxy) if web is None: web = ut.get_argflag(('--webapp', '--webapi', '--web', '--browser'), help_='automatically launch the web app / web api') #web = params.args.webapp if web: from ibeis.web import app port = params.args.webport app.start_from_ibeis(ibs, port=port, **kwargs) return ibs
def ensure_text(fname, text, repo_dpath='.', force=None, locals_={}, chmod=None): """ Args: fname (str): file name text (str): repo_dpath (str): directory path string(default = '.') force (bool): (default = False) locals_ (dict): (default = {}) Example: >>> # DISABLE_DOCTEST >>> from utool.util_git import * # NOQA >>> import utool as ut >>> result = setup_repo() >>> print(result) """ import utool as ut ut.colorprint('Ensuring fname=%r' % (fname), 'yellow') if force is None and ut.get_argflag('--force-%s' % (fname,)): force = True fpath = join(repo_dpath, fname) if force or not ut.checkpath(fpath, verbose=2, n=5): text_ = ut.remove_codeblock_syntax_sentinals(text) fmtkw = locals_.copy() fmtkw['fname'] = fname text_ = text_.format(**fmtkw) + '\n' ut.writeto(fpath, text_) try: if chmod: ut.chmod(fpath, chmod) except Exception as ex: ut.printex(ex, iswarning=True)
def _sed(r, regexpr, repl, force=False, recursive=False, dpath_list=None): if True: import utool as ut force = ut.smart_cast2(force) ut.sed(regexpr, repl, force=force, recursive=recursive, dpath_list=dpath_list, verbose=True) return else: # _grep(r, [repl], dpath_list=dpath_list, recursive=recursive) force = rutil.cast(force, bool) recursive = rutil.cast(recursive, bool) import utool as ut pyext = ut.get_argflag("--pyext") if pyext: include_patterns = ["*.py"] else: include_patterns = ["*.py", "*.cxx", "*.cpp", "*.hxx", "*.hpp", "*.c", "*.h", "*.pyx", "*.pxi"] if ut.get_argflag("--all"): include_patterns = ["*"] # if ut.get_argflag('--tex'): include_patterns = ["*.tex"] if dpath_list is None: dpath_list = [os.getcwd()] regexpr = extend_regex(regexpr) # import re print("sed-ing %r" % (dpath_list,)) print(" * regular include_patterns : %r" % (include_patterns,)) print(" * (orig) regular expression : %r" % (regexpr,)) print(" * (origstr) regular expression : %s" % (regexpr,)) # regexpr = re.escape(regexpr) print(" * regular expression : %r" % (regexpr,)) print(" * (str)regular expression : %s" % (regexpr,)) print(" * replacement : %r" % (repl,)) print(" * recursive: %r" % (recursive,)) print(" * force: %r" % (force,)) if "\x08" in regexpr: print("Remember \\x08 != \\b") print("subsituting for you for you") regexpr = regexpr.replace("\x08", "\\b") print(" * regular expression : %r" % (regexpr,)) # Walk through each directory recursively num_changed = 0 for fpath in _matching_fnames(dpath_list, include_patterns, recursive=recursive): num_changed += len(__regex_sedfile(fpath, regexpr, repl, force)) print("total lines changed = %r" % (num_changed,))
def dump_autogen_code(fpath, autogen_text, codetype='python', fullprint=None): """ Helper that write a file if -w is given on command line, otherwise it just prints it out. It has the opption of comparing a diff to the file. """ import utool as ut dowrite = ut.get_argflag(('-w', '--write')) show_diff = ut.get_argflag('--diff') num_context_lines = ut.get_argval('--diff', type_=int, default=None) show_diff = show_diff or num_context_lines is not None num_context_lines = ut.get_argval('--diff', type_=int, default=None) if fullprint is None: fullprint = True if fullprint is False: fullprint = ut.get_argflag('--print') print('[autogen] Autogenerated %s...\n+---\n' % (fpath,)) if not dowrite: if fullprint: ut.print_code(autogen_text, lexer_name=codetype) print('\nL___') else: print('specify --print to write to stdout') pass print('specify -w to write, or --diff to compare') print('...would write to: %s' % fpath) if show_diff: if ut.checkpath(fpath, verbose=True): prev_text = ut.read_from(fpath) textdiff = ut.get_textdiff(prev_text, autogen_text, num_context_lines=num_context_lines) try: ut.print_difftext(textdiff) except UnicodeDecodeError: import unicodedata textdiff = unicodedata.normalize('NFKD', textdiff).encode('ascii', 'ignore') ut.print_difftext(textdiff) if dowrite: print('WARNING: Not writing. Remove --diff from command line') elif dowrite: ut.write_to(fpath, autogen_text)
def get_extract_tuple(aid, fx, k=-1): rchip = ibs.get_annot_chips(aid) kp = ibs.get_annot_kpts(aid)[fx] sift = ibs.get_annot_vecs(aid)[fx] if not ut.get_argflag('--texknormplot'): aidstr = vh.get_aidstrs(aid) nidstr = vh.get_nidstrs(ibs.get_annot_nids(aid)) id_str = ' ' + aidstr + ' ' + nidstr + ' fx=%r' % (fx,) else: id_str = nidstr = aidstr = '' info = '' if k == -1: if pt.is_texmode(): info = '\\vspace{1cm}' info += 'Query $\\mathbf{d}_i$' info += '\n\\_' info += '\n\\_' else: if len(id_str) > '': info = 'Query: %s' % (id_str,) else: info = 'Query' type_ = 'Query' elif k < K: type_ = 'Match' if ut.get_argflag('--texknormplot') and pt.is_texmode(): #info = 'Match:\n$k=%r$, $\\frac{||\\mathbf{d}_i - \\mathbf{d}_j||}{Z}=%.3f$' % (k, qfx2_dist[0, k]) info = '\\vspace{1cm}' info += 'Match: $\\mathbf{d}_{j_%r}$\n$\\textrm{dist}=%.3f$' % (k, qfx2_dist[0, k]) info += '\n$s_{\\tt{LNBNN}}=%.3f$' % (qfx2_dist[0, K + Knorm - 1] - qfx2_dist[0, k]) else: info = 'Match:%s\nk=%r, dist=%.3f' % (id_str, k, qfx2_dist[0, k]) info += '\nLNBNN=%.3f' % (qfx2_dist[0, K + Knorm - 1] - qfx2_dist[0, k]) elif k < Knorm + K: type_ = 'Norm' if ut.get_argflag('--texknormplot') and pt.is_texmode(): #info = 'Norm: $j_%r$\ndist=%.3f' % (id_str, k, qfx2_dist[0, k]) info = '\\vspace{1cm}' info += 'Norm: $j_%r$\n$\\textrm{dist}=%.3f$' % (k, qfx2_dist[0, k]) info += '\n\\_' else: info = 'Norm: %s\n$k=%r$, dist=$%.3f$' % (id_str, k, qfx2_dist[0, k]) else: raise Exception('[viz] problem k=%r') return (rchip, kp, sift, fx, aid, info, type_)
def show_name(ibs, nid, in_image=True, fnum=0, sel_aids=[], subtitle='', annote=False, aid_list=None, index_list=None, **kwargs): r""" Args: ibs (IBEISController): ibeis controller object nid (?): in_image (bool): fnum (int): figure number sel_aids (list): subtitle (str): annote (bool): CommandLine: python -m ibeis.viz.viz_name --test-show_name --dpath ~/latex/crall-candidacy-2015 --save 'figures/{name}.jpg' --no-figtitle --notitle --db NNP_Master3 --figsize=9,4 --clipwhite --dpi=180 --adjust=.05 --index_list=[0,1,2,3] --rc=2,4 --append temp_out_figure.tex --name=IBEIS_PZ_0739 --no-draw_lbls --doboth --no-inimage --diskshow python -m ibeis.viz.viz_name --test-show_name --no-figtitle --notitle --db NNP_Master3 --figsize=9,4 --clipwhite --dpi=180 --adjust=.05 --index_list=[0,1,2,3] --rc=2,4 --append temp_out_figure.tex --name=IBEIS_PZ_0739 --no-draw_lbls --doboth --no-inimage --show python -m ibeis.viz.viz_name --test-show_name --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.viz.viz_name import * # NOQA >>> ibs, nid, in_image, index_list = testdata_showname() >>> fnum = 0 >>> sel_aids = [] >>> subtitle = '' >>> annote = False >>> # execute function >>> show_name(ibs, nid, in_image, fnum, sel_aids, subtitle, annote, index_list=index_list) >>> ut.show_if_requested() """ print('[viz_name] show_name nid=%r, index_list=%r, aid_list=%r' % (nid, index_list, aid_list)) if aid_list is None: aid_list = ibs.get_name_aids(nid) else: assert ut.list_all_eq_to(ibs.get_annot_nids(aid_list), nid) if index_list is not None: aid_list = ut.take(aid_list, index_list) name = ibs.get_name_texts((nid,)) print('[viz_name] * name=%r aid_list=%r' % (name, aid_list)) show_multiple_chips(ibs, aid_list, in_image=in_image, fnum=fnum, sel_aids=sel_aids, annote=annote, **kwargs) if isinstance(nid, np.ndarray): nid = nid[0] if isinstance(name, np.ndarray): name = name[0] use_figtitle = not ut.get_argflag('--no-figtitle') if use_figtitle: figtitle = 'Name View nid=%r name=%r' % (nid, name) df2.set_figtitle(figtitle)
def _load_named_config(ibs, cfgname=None): r""" """ # TODO: update cfgs between versions # Try to load previous config otherwise default #use_config_cache = not (ut.is_developer() and not ut.get_argflag(('--nocache-pref',))) use_config_cache = not ut.get_argflag(('--nocache-pref',)) ibs.cfg = Config.load_named_config(cfgname, ibs.get_dbdir(), use_config_cache) ibs.reset_table_cache()
def testdata_expts(defaultdb='testdb1', default_acfgstr_name_list=['default:qindex=0:10:4,dindex=0:20'], default_test_cfg_name_list=['default'], a=None, t=None, qaid_override=None, daid_override=None, initial_aids=None, ): """ Use this if you want data from an experiment. Command line interface to quickly get testdata for test_results. Command line flags can be used to specify db, aidcfg, pipecfg, qaid override, daid override (and maybe initial aids). """ print('[main_helpers] testdata_expts') import ibeis from ibeis.expt import experiment_harness from ibeis.expt import test_result if a is not None: default_acfgstr_name_list = a if t is not None: default_test_cfg_name_list = t if isinstance(default_acfgstr_name_list, six.string_types): default_acfgstr_name_list = [default_acfgstr_name_list] if isinstance(default_test_cfg_name_list, six.string_types): default_test_cfg_name_list = [default_test_cfg_name_list] #from ibeis.expt import experiment_helpers ibs = ibeis.opendb(defaultdb=defaultdb) acfg_name_list = ut.get_argval(('--aidcfg', '--acfg', '-a'), type_=list, default=default_acfgstr_name_list) test_cfg_name_list = ut.get_argval(('-t', '-p'), type_=list, default=default_test_cfg_name_list) daid_override = ut.get_argval(('--daid-override', '--daids-override'), type_=list, default=daid_override) qaid_override = ut.get_argval(('--qaid', '--qaids-override', '--qaid-override'), type_=list, default=qaid_override) # Hack a cache here use_bigtest_cache3 = not ut.get_argflag(('--nocache', '--nocache-hs')) use_bigtest_cache3 &= ut.is_developer() use_bigtest_cache3 &= False if use_bigtest_cache3: from os.path import dirname, join cache_dir = ut.ensuredir(join(dirname(ut.get_module_dir(ibeis)), 'BIG_TESTLIST_CACHE3')) load_testres = ut.cached_func('testreslist', cache_dir=cache_dir)(experiment_harness.run_test_configurations2) else: load_testres = experiment_harness.run_test_configurations2 testres_list = load_testres( ibs, acfg_name_list, test_cfg_name_list, qaid_override=qaid_override, daid_override=daid_override, initial_aids=initial_aids) testres = test_result.combine_testres_list(ibs, testres_list) print(testres) return ibs, testres
def test_feats(ibs, qaid_list, daid_list=None): """ test_feats shows features using several different parameters Args: ibs (IBEISController): qaid_list (int): query annotation id CommandLine: python dev.py -t test_feats --db PZ_MTEST --all --qindex 0 --show -w Example: >>> import ibeis >>> ibs = ibeis.opendb('testdb1') >>> qaid_list = [1] """ from ibeis import viz from ibeis.expt import experiment_configs import utool as ut NUM_PASSES = 1 if not utool.get_argflag('--show') else 2 varyparams_list = [experiment_configs.featparams] def test_featcfg_combo(ibs, aid, alldictcomb, count, nKpts_list, cfgstr_list): for dict_ in ut.progiter(alldictcomb, lbl='FeatCFG Combo: '): # Set ibs parameters to the current config for key_, val_ in six.iteritems(dict_): ibs.cfg.feat_cfg[key_] = val_ cfgstr_ = ibs.cfg.feat_cfg.get_cfgstr() if count == 0: # On first run just record info kpts = ibs.get_annot_kpts(aid) nKpts_list.append(len(kpts)) cfgstr_list.append(cfgstr_) if count == 1: kpts = ibs.get_annot_kpts(aid) # If second run happens display info cfgpackstr = utool.packstr(cfgstr_, textwidth=80, breakchars=',', newline_prefix='', break_words=False, wordsep=',') title_suffix = (' len(kpts) = %r \n' % len(kpts)) + cfgpackstr viz.show_chip(ibs, aid, fnum=pt.next_fnum(), title_suffix=title_suffix, darken=.8, ell_linewidth=2, ell_alpha=.6) alldictcomb = utool.flatten(map(utool.all_dict_combinations, varyparams_list)) for count in range(NUM_PASSES): nKpts_list = [] cfgstr_list = [] for aid in qaid_list: test_featcfg_combo(ibs, aid, alldictcomb, count, nKpts_list, cfgstr_list) #for dict_ in alldictcomb: if count == 0: nKpts_list = np.array(nKpts_list) cfgstr_list = np.array(cfgstr_list) print(get_sortbystr(cfgstr_list, nKpts_list, 'cfg', 'nKpts'))
def learn_k(): r""" CommandLine: python -m ibeis.other.optimize_k --test-learn_k python -m ibeis.other.optimize_k --test-learn_k --show python -m ibeis.other.optimize_k --test-learn_k --show --dummy Example: >>> # DISABLE_DOCTEST >>> from ibeis.other.optimize_k import * # NOQA >>> import plottool as pt >>> # build test data >>> # execute function >>> known_nd_data, known_target_points, given_data_dims, opt_model_params = learn_k() >>> # verify results >>> ut.quit_if_noshow() >>> plot_search_surface(known_nd_data, known_target_points, given_data_dims, opt_model_params) >>> pt.all_figures_bring_to_front() >>> pt.show_if_requested() """ # Compute Training Data varydict = { #'K': [4, 7, 10, 13, 16, 19, 22, 25][:4], #'K': [1, 2, 3, 4, 8, 10, 13, 15], 'K': [1, 2, 4, 8, 16], #'nDaids': [20, 100, 250, 500, 750, 1000], } nDaids_basis = [20, 30, 50, 75, 100, 200, 250, 300, 325, 350, 400, 500, 600, 750, 800, 900, 1000, 1500] DUMMY = ut.get_argflag('--dummy') if DUMMY: nDaids_list, K_list, nError_list = test_training_data(varydict, nDaids_basis) nError_list = nError_list.astype(np.float32) / nError_list.max() else: dbname = ut.get_argval('--db', default='PZ_Master0') ibs = ibeis.opendb(dbname) verbose = False qaids, daids_list = collect_ibeis_training_annotations(ibs, nDaids_basis, verbose=verbose) nDaids_list, K_list, nError_list = evaluate_training_data(ibs, qaids, daids_list, varydict, nDaids_basis, verbose=verbose) nError_list = nError_list.astype(np.float32) / len(qaids) print('\nFinished Get Training Data') print('len(qaids) = %r' % (len(qaids))) print(ut.get_stats_str(nError_list)) #unique_nDaids = np.unique(nDaids_list) # Alias to general optimization problem known_nd_data = np.vstack([nDaids_list, K_list]).T known_target_points = nError_list # Mark the data we are given vs what we want to learn given_data_dims = [0] #learn_data_dims = [1] # Minimize K params opt_model_params = minimize_compute_K_params(known_nd_data, known_target_points, given_data_dims) return known_nd_data, known_target_points, given_data_dims, opt_model_params
def initialize_job_manager(ibs): """ Starts a background zmq job engine. Initializes a zmq object in this thread that can talk to the background processes. Run from the webserver CommandLine: python -m ibeis.web.job_engine --exec-initialize_job_manager:0 Example: >>> # DISABLE_DOCTEST >>> from ibeis.web.job_engine import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='testdb1') >>> from ibeis.web import apis_engine >>> from ibeis.web import job_engine >>> ibs.load_plugin_module(job_engine) >>> ibs.load_plugin_module(apis_engine) >>> ibs.initialize_job_manager() >>> print('Initializqation success. Now closing') >>> ibs.close_job_manager() >>> print('Closing success.') Example: >>> # WEB_DOCTEST >>> from ibeis.web.job_engine import * # NOQA >>> import ibeis >>> import requests >>> web_instance = ibeis.opendb_bg_web(db='testdb1') >>> baseurl = 'http://127.0.1.1:5000' >>> _payload = {'image_attrs_list': [], 'annot_attrs_list': []} >>> payload = ut.map_dict_vals(ut.to_json, _payload) >>> resp1 = requests.post(baseurl + '/api/test/helloworld/?f=b', data=payload) >>> #resp2 = requests.post(baseurl + '/api/image/json/', data=payload) >>> #print(resp2) >>> web_instance.terminate() >>> #json_dict = resp2.json() >>> #text = json_dict['response'] >>> #print(text) """ ibs.job_manager = ut.DynStruct() ibs.job_manager.jobiface = JobInterface(id_=0) if not ut.get_argflag('--fg'): ibs.job_manager.reciever = JobBackend() ibs.job_manager.reciever.initialize_background_processes(dbdir=ibs.get_dbdir()) ibs.job_manager.jobiface.initialize_client_thread() # Wait until the collector becomes live while 0 and True: result = ibs.get_job_status(-1) print('result = %r' % (result,)) if result['status'] == 'ok': break
def _mpl_set_backend(target_backend): import matplotlib as mpl if ut.get_argflag('--leave-mpl-backend-alone'): print('[pt] LEAVE THE BACKEND ALONE !!! was specified') print('[pt] not changing mpl backend') else: #mpl.use(target_backend, warn=True, force=True) mpl.use(target_backend, warn=True, force=False) #mpl.use(target_backend, warn=False, force=False) current_backend = mpl.get_backend() if not ut.QUIET and ut.VERBOSE: print('[pt] current backend is: %r' % current_backend)
def TIME_GEN_PREPROC_IMG(ibs): from ibeis.algo.preproc.preproc_image import add_images_params_gen print('[TIME_GEN_PREPROC_IMG]') gid_list = ibs.get_valid_gids() gpath_list = ibs.get_image_paths(gid_list) # STABILITY if not utool.get_argflag('--nostable'): # TEST 1 with utool.Timer('parallel chunksize=1'): output1 = list(add_images_params_gen(gpath_list, chunksize=1)) print(utool.truncate_str(str(output1), 80)) assert len(output1) == len(gpath_list), 'chuncksize changes output' # TEST 2 with utool.Timer('parallel chunksize=2'): output2 = list(add_images_params_gen(gpath_list, chunksize=2)) print(utool.truncate_str(str(output2), 80)) assert output1 == output2, 'chuncksize changes output' # TEST N with utool.Timer('parallel chunksize=None'): outputN = list(add_images_params_gen(gpath_list, chunksize=None)) print(utool.truncate_str(str(output2), 80)) assert outputN == output2, 'chuncksize changes output' # BENCHMARK setup = utool.unindent( ''' from ibeis.algo.preproc.preproc_image import add_images_params_gen genkw = dict(prog=False, verbose=True) gpath_list = %r ''' % (gpath_list,)) print(utool.truncate_str(str(gpath_list), 80)) print('Processing %d images' % (len(gpath_list),)) timeit3 = partial(timeit2, setup=setup, number=3) timeit3('list(add_images_params_gen(gpath_list, chunksize=None, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=None, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=1, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=2, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=4, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=8, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=16, **genkw))') timeit3('list(add_images_params_gen(gpath_list, chunksize=32, **genkw))') print('[/TIME_GEN_PREPROC_IMG]') return locals()
def get_args_dbdir(defaultdb=None, allow_newdir=False, db=None, dbdir=None, cache_priority=False): """ Machinery for finding a database directory such a hacky function with bad coding. Needs to just return a database dir and use the following priority dbdir, db, cache, something like that... """ if not ut.QUIET and ut.VERBOSE: print('[sysres] get_args_dbdir: parsing commandline for dbdir') print('[sysres] defaultdb=%r, allow_newdir=%r, cache_priority=%r' % (defaultdb, allow_newdir, cache_priority)) print('[sysres] db=%r, dbdir=%r' % (db, dbdir)) if ut.get_argflag('--nodbcache'): return dbdir def _db_arg_priorty(dbdir_, db_): invalid = ['', ' ', '.', 'None'] # Invalidate bad db's if dbdir_ in invalid: dbdir_ = None if db_ in invalid: db_ = None # Return values with a priority if dbdir_ is not None: return realpath(dbdir_) if db_ is not None: return db_to_dbdir(db_, allow_newdir=allow_newdir) return None if not cache_priority: # Check function's passed args dbdir = _db_arg_priorty(dbdir, db) if dbdir is not None: return dbdir # Get command line args dbdir = params.args.dbdir db = params.args.db # TODO; use these instead of params #ut.get_argval('--dbdir', return_was_specified=True)) #ut.get_argval('--db', return_was_specified=True) # Check command line passed args dbdir = _db_arg_priorty(dbdir, db) if dbdir is not None: return dbdir # Return cached database directory if defaultdb == 'cache': return get_default_dbdir() else: return db_to_dbdir(defaultdb, allow_newdir=allow_newdir)
def define_custom_scripts(tpl_rman, wbia_rman, PY2, PY3): """ export THEANO_FLAGS="device=cpu,print_active_device=True,enable_initial_driver_test=True" set THEANO_FLAGS=device=cpu,print_active_device=True,enable_initial_driver_test=True,print_test_value=True python -c "import pydot; print(pydot.__file__)" python -c "import pydot; print(pydot.__version__)" python -c "import pydot; print(pydot.find_graphviz())" DEVICE="cuda" python -c "import pygpu;pygpu.test()" python -c "import theano; print(theano.__file__)" # python -c "import pylearn2; print(pylearn2.__file__)" python -c "import lasagne; print(lasagne.__file__)" python -c "import wbia_cnn; print(wbia_cnn.__file__)" python -c "import detecttools; print(detecttools.__file__)" # http://stackoverflow.com/questions/18042919/how-to-install-pyqt5-on-a-new-virtualenv-and-work-on-an-idle pip install vext.pyqt5 sudo apt-get install pyqt5-dev sudo apt-get install python3-pyqt5 python python -c "import sip; print('[test] Python can import sip')" python -c "import sip; print('sip.__file__=%r' % (sip.__file__,))" python -c "import sip; print('sip.SIP_VERSION=%r' % (sip.SIP_VERSION,))" python -c "import sip; print('sip.SIP_VERSION_STR=%r' % (sip.SIP_VERSION_STR,))" ln -s /usr/lib/python3/dist-packages/PyQt5/ /home/joncrall/venv3/lib/python3.4/site-packages/PyQt5 ln -s /usr/lib/python3/dist-packages/sip*.so /home/joncrall/venv3/lib/python3.4/site-packages/ ln -s /usr/lib/python3/dist-packages/sip*.py /home/joncrall/venv3/lib/python3.4/site-packages/ """ import utool as ut major = str(sys.version_info.major) minor = str(sys.version_info.minor) majorminor = [major, minor] pyoff = '2' if sys.version_info.major == 3 else '3' pyon = majorminor[0] plat_spec = get_plat_specifier() # build_dname = 'build' + ''.join(majorminor) build_dname = 'cmake_builds/build' + plat_spec script_fmtdict = { 'pyexe': sys.executable, 'pyversion': 'python' + '.'.join(majorminor), 'pypkg_var': 'PYTHON' + pyon + '_PACKAGES_PATH', 'build_dname': build_dname, 'pyoff': pyoff, 'pyon': pyon, 'cv_pyon_var': 'BUILD_opencv_python' + pyon, 'cv_pyoff_var': 'BUILD_opencv_python' + pyoff, 'plat_spec': plat_spec, 'source_dpath': '../..', 'libext': ut.get_lib_ext(), } if os.environ.get('VIRTUAL_ENV', '') == '': if sys.platform.startswith('darwin'): local_prefix = '/opt/local' else: local_prefix = '/usr/local' else: local_prefix = os.environ['VIRTUAL_ENV'] opencv_dir = os.path.join(local_prefix, '/share/OpenCV') if not os.path.exists(opencv_dir): if not ut.get_argflag('--opencv'): opencv_dir = '' print( 'OpenCV is not installed in the expected location: {}'.format( opencv_dir)) print( 'Running this script with --opencv will build and install it there' ) # define bash variables for different combinations of python distros and # virtual environments python_bash_setup = ut.codeblock(r""" # STARTBLOCK bash if [[ "$VIRTUAL_ENV" == "" ]]; then # The case where we are installying system-wide # It is recommended that a virtual enviornment is used instead export PYTHON_EXECUTABLE=$(which {pyversion}) if [[ '$OSTYPE' == 'darwin'* ]]; then # Mac system info export LOCAL_PREFIX=/opt/local export {pypkg_var}=$($PYTHON_EXECUTABLE -c "import site; print(site.getsitepackages()[0])") export PYTHON_PACKAGES_PATH=${pypkg_var} export _SUDO="sudo" else # Linux system info export LOCAL_PREFIX=/usr/local export {pypkg_var}=$LOCAL_PREFIX/lib/{pyversion}/dist-packages export PYTHON_PACKAGES_PATH=${pypkg_var} export _SUDO="sudo" fi # No windows support here else # The prefered case where we are in a virtual environment export PYTHON_EXECUTABLE=$(which python) # export LOCAL_PREFIX=$VIRTUAL_ENV/local export LOCAL_PREFIX=$VIRTUAL_ENV export {pypkg_var}=$LOCAL_PREFIX/lib/{pyversion}/site-packages export PYTHON_PACKAGES_PATH=${pypkg_var} export _SUDO="" fi echo "LOCAL_PREFIX = $LOCAL_PREFIX" echo "{pypkg_var} = ${pypkg_var}" # ENDBLOCK bash """).format(**script_fmtdict) script_fmtdict['python_bash_setup'] = python_bash_setup # =================== # PYFLANN SETUP SCRIPTS # =================== wbia_rman['pyflann'].add_script( 'build', ut.codeblock(r""" # STARTBLOCK bash {python_bash_setup} cd {repo_dir} mkdir -p {build_dname} cd {build_dname} cmake -G "Unix Makefiles" \ -DCMAKE_BUILD_TYPE="Release" \ -DPYTHON_EXECUTABLE=$PYTHON_EXECUTABLE \ -DBUILD_EXAMPLES=Off \ -DBUILD_TESTS=Off \ -DBUILD_PYTHON_BINDINGS=On \ -DBUILD_MATLAB_BINDINGS=Off \ -DBUILD_CUDA_LIB=Off\ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX\ {source_dpath} export NCPUS=$(grep -c ^processor /proc/cpuinfo) make -j$NCPUS # ENDBLOCK bash """).format(repo_dir=wbia_rman['pyflann'].dpath, **script_fmtdict), ) wbia_rman['pyflann'].add_script( 'install', ut.codeblock(r""" # STARTBLOCK bash # The pyflann source lives here cd {repo_dir}/src/python # Need to run build to move the libs to the build directory python setup.py build # Use pip to editable install pip install -e {repo_dir}/src/python # Old way of doing it # But the setup script is generated during build # python {repo_dir}/build/src/python/setup.py develop python -c "from vtool_ibeis._pyflann_backend import pyflann as pyflann; print(pyflann.__file__)" --verb-flann python -c "from vtool_ibeis._pyflann_backend import pyflann as pyflann; print(pyflann)" --verb-flann # ENDBLOCK bash """).format(repo_dir=wbia_rman['pyflann'].dpath), ) # =================== # HESAFF # =================== wbia_rman['hesaff'].add_script( 'build', ut.codeblock(r""" # STARTBLOCK bash {python_bash_setup} cd $CODE_DIR/hesaff mkdir -p {build_dname} cd {build_dname} # only specify an explicit opencv directory if we know one exists if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV" else OPENCV_ARGS="" fi echo 'Configuring with cmake' if [[ '$OSTYPE' == 'darwin'* ]]; then cmake -G "Unix Makefiles" \ -DCMAKE_OSX_ARCHITECTURES=x86_64 \ -DCMAKE_C_COMPILER=clang2 \ -DCMAKE_CXX_COMPILER=clang2++ \ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \ $OPENCV_ARGS \ {source_dpath} else cmake -G "Unix Makefiles" \ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \ $OPENCV_ARGS \ {source_dpath} fi export NCPUS=$(grep -c ^processor /proc/cpuinfo) make -j$NCPUS export MAKE_EXITCODE=$? echo "MAKE_EXITCODE=$MAKE_EXITCODE" # Move the compiled library into the source folder if [[ $MAKE_EXITCODE == 0 ]]; then #make VERBOSE=1 cp -v libhesaff{libext} {source_dpath}/pyhesaff/libhesaff{plat_spec}{libext} fi # ENDBLOCK """).format(**script_fmtdict), ) # =================== # PYDARKNET # =================== wbia_rman['pydarknet'].add_script( 'build', ut.codeblock(r""" # STARTBLOCK bash {python_bash_setup} cd $CODE_DIR/pydarknet mkdir -p {build_dname} cd {build_dname} if [[ "$(which nvcc)" == "" ]]; then export CMAKE_CUDA=Off else export CMAKE_CUDA=On fi # only specify an explicit opencv directory if we know one exists if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV" else OPENCV_ARGS="" fi echo 'Configuring with cmake' if [[ '$OSTYPE' == 'darwin'* ]]; then export CONFIG="-DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_C_COMPILER=clang2 -DCMAKE_CXX_COMPILER=clang2++ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS" else export CONFIG="-DCMAKE_BUILD_TYPE='Release' -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS" fi export CONFIG="$CONFIG -DCUDA=$CMAKE_CUDA" echo "CONFIG = $CONFIG" cmake $CONFIG -G 'Unix Makefiles' {source_dpath} ################################# echo 'Building with make' export NCPUS=$(grep -c ^processor /proc/cpuinfo) make -j$NCPUS -w ################################# export MAKE_EXITCODE=$? echo "MAKE_EXITCODE=$MAKE_EXITCODE" # Move the compiled library into the source folder if [[ $MAKE_EXITCODE == 0 ]]; then echo 'Moving the shared library' # cp -v lib* ../pydarknet cp -v lib*{libext} {source_dpath}/pydarknet # cp -v libdarknet{libext} {source_dpath}/pydarknet/libdarknet{plat_spec}{libext} fi # ENDBLOCK """).format(**script_fmtdict), ) # =================== # PYRF # =================== wbia_rman['pyrf'].add_script( 'build', ut.codeblock(r""" # STARTBLOCK bash {python_bash_setup} cd $CODE_DIR/pyrf mkdir -p {build_dname} cd {build_dname} # only specify an explicit opencv directory if we know one exists if [ -d "$LOCAL_PREFIX/share/OpenCV" ]; then OPENCV_ARGS="-DOpenCV_DIR=$LOCAL_PREFIX/share/OpenCV" else OPENCV_ARGS="" fi echo 'Configuring with cmake' if [[ '$OSTYPE' == 'darwin'* ]]; then export CONFIG="-DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_C_COMPILER=clang2 -DCMAKE_CXX_COMPILER=clang2++ -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS" else export CONFIG="-DCMAKE_BUILD_TYPE='Release' -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX $OPENCV_ARGS" fi echo "CONFIG = $CONFIG" cmake $CONFIG -G 'Unix Makefiles' {source_dpath} ################################# echo 'Building with make' export NCPUS=$(grep -c ^processor /proc/cpuinfo) make -j$NCPUS -w ################################# export MAKE_EXITCODE=$? echo "MAKE_EXITCODE=$MAKE_EXITCODE" # Move the compiled library into the source folder if [[ $MAKE_EXITCODE == 0 ]]; then echo 'Moving the shared library' # cp -v lib* ../pyrf cp -v lib*{libext} {source_dpath}/pyrf # cp -v libpyrf{libext} {source_dpath}/pyrf/libpyrf{plat_spec}{libext} fi # ENDBLOCK """).format(**script_fmtdict), ) # =================== # OPENCV SETUP SCRIPTS # =================== """ ./super_setup.py --dump-scripts """ tpl_rman['cv2'].add_script( 'build', ut.codeblock(r""" # STARTBLOCK bash {python_bash_setup} # Checkout opencv core cd $CODE_DIR # export REPO_DIR=$CODE_DIR/opencv export REPO_DIR={repo_dpath} # git clone https://github.com/Itseez/opencv.git cd $REPO_DIR # Checkout opencv extras git clone https://github.com/Itseez/opencv_contrib.git # cd opencv_contrib # git pull # cd .. # git pull mkdir -p $REPO_DIR/{build_dname} cd $REPO_DIR/{build_dname} cmake -G "Unix Makefiles" \ -D WITH_OPENMP=ON \ -D CMAKE_BUILD_TYPE=RELEASE \ -D {cv_pyoff_var}=Off \ -D {cv_pyon_var}=On \ -D PYTHON_DEFAULT_EXECUTABLE="{pyexe}" \ -D {pypkg_var}=${pypkg_var} \ -D CMAKE_INSTALL_PREFIX=$LOCAL_PREFIX \ -D OPENCV_EXTRA_MODULES_PATH=$REPO_DIR/opencv_contrib/modules \ -D WITH_CUDA=Off \ -D BUILD_opencv_dnn=Off \ -D BUILD_opencv_dnn_modern=Off \ -D WITH_VTK=Off \ -D WITH_CUDA=Off \ -D WITH_MATLAB=Off \ $REPO_DIR # -D WITH_OPENCL=Off \ # -D BUILD_opencv_face=Off \ # -D BUILD_opencv_objdetect=Off \ # -D BUILD_opencv_video=Off \ # -D BUILD_opencv_videoio=Off \ # -D BUILD_opencv_videostab=Off \ # -D BUILD_opencv_ximgproc=Off \ # -D BUILD_opencv_xobjdetect=Off \ # -D BUILD_opencv_xphoto=Off \ # -D BUILD_opencv_datasets=Off \ # -D CXX_FLAGS="-std=c++11" \ %TODO export NCPUS=$(grep -c ^processor /proc/cpuinfo) make -j$NCPUS # ENDBLOCK """).format(repo_dpath=ut.unexpanduser(tpl_rman['cv2'].dpath), **script_fmtdict), ) tpl_rman['cv2'].add_script( 'install', ut.codeblock(r""" # STARTBLOCK bash {python_bash_setup} cd $CODE_DIR/opencv/{build_dname} $_SUDO make install # Hack because cv2 does not want to be installed for some reason # cp lib/cv2.so $PYTHON_PACKAGES_PATH # Seems to work now that local is removed from prefix # cp -v lib/cv2.so $PYTHON_PACKAGES_PATH # Test makesure things working python -c "import numpy; print(numpy.__file__)" python -c "import numpy; print(numpy.__version__)" python -c "import cv2; print(cv2.__version__)" python -c "import cv2; print(cv2.__file__)" #python -c "import vtool_ibeis" # Check if we have contrib modules python -c "import cv2; print(cv2.xfeatures2d)" # ENDBLOCK """).format(**script_fmtdict), ) # if GET_ARGFLAG('--libgpuarray'): tpl_rman['libgpuarray'].add_script( 'build', ut.codeblock(r""" # STARTBLOCK bash # Ensure the repo was checked out if [ ! -d {repo_dpath} ]; then git clone https://github.com/Theano/libgpuarray.git {repo_dpath} fi {python_bash_setup} cd {repo_dpath} # need a specific version of libgpuarray git checkout tags/v0.6.2 -b v0.6.2 mkdir -p {repo_dpath}/{build_dname} cd {repo_dpath}/{build_dname} # First build the C library cmake {repo_dpath} -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$LOCAL_PREFIX export NCPUS=$(grep -c ^processor /proc/cpuinfo) make -j$NCPUS $_SUDO make install # Now build the python libarary cd {repo_dpath} python setup.py build_ext -L $LOCAL_PREFIX/lib -I $LOCAL_PREFIX/include python setup.py build # python setup.py install $_SUDO pip install -e {repo_dpath} # DEVICE="<test device>" python -c "import pygpu;pygpu.test()" # DEVICE="gpu0" python -c "import pygpu;pygpu.test()" cd ~ $_SUDO pip install nose DEVICE="cuda" python -c "import pygpu;pygpu.test()" # pip uninstall pygpu # ENDBLOCK """).format(repo_dpath=ut.unexpanduser(tpl_rman['libgpuarray'].dpath), **script_fmtdict), ) # =================== # PYQT SETUP SCRIPTS # =================== if ut.in_virtual_env(): try: fmtdict = { 'sys_dist_packages': ut.get_global_dist_packages_dir(), 'venv_site_packages': ut.get_site_packages_dir(), 'pyqt': 'PyQt4' if PY2 else 'PyQt5', # Need the PyQT5 SVG module for IPython to work properly 'debian-python-qt': ('python-qt4' if PY2 else 'qt5-default python3-pyqt5 debian-python-qt-svg'), 'pip-python-qt': 'python-qt4' if PY2 else 'python-qt5', } # sys_dist_packages = ut.get_global_dist_packages_dir() # sys_pyqt_dir = sys_dist_packages + '/{pyqt}' # Allows us to use a system qt install in a virtual environment. system_to_venv = ut.codeblock(r""" # STARTBLOCK bash # Creates a symlink to the global PyQt in a virtual env export GLOBAL_DIST_PACKAGES="{sys_dist_packages}" export VENV_DIST_PACKAGES="{venv_site_packages}" if [ -d $GLOBAL_DIST_PACKAGES/{pyqt} ]; then echo "have qt" ls $GLOBAL_DIST_PACKAGES/{pyqt} ls $VENV_DIST_PACKAGES/{pyqt} else # Ensure PyQt is installed first (FIXME make this work for non-debian systems) sudo apt-get install {debian-python-qt} # pip install {pip-python-qt} fi if [ -d $GLOBAL_DIST_PACKAGES/{pyqt} ]; then # Install system pyqt packages to virtual envirment via symlink ln -s $GLOBAL_DIST_PACKAGES/{pyqt}/ $VENV_DIST_PACKAGES/{pyqt} ln -s $GLOBAL_DIST_PACKAGES/sip*.so $VENV_DIST_PACKAGES/ ln -s $GLOBAL_DIST_PACKAGES/sip*.py $VENV_DIST_PACKAGES/ else echo "{pyqt} DOES NOT SEEM TO BE INSTALLED ON THE SYSTEM" fi echo "testing" python -c "import {pyqt}; print({pyqt})" # ENDBLOCK bash """).format(**fmtdict) # TODO: add custom build alternative tpl_rman['PyQt'].add_script('system_to_venv', system_to_venv) except NotImplementedError: pass
""" import logging import utool as ut import vtool as vt from six.moves import zip from os.path import abspath, dirname, expanduser, join, exists # NOQA import numpy as np import sys (print, rrr, profile) = ut.inject2(__name__, '[ssd]') logger = logging.getLogger('wbia') # SCRIPT_PATH = abspath(dirname(__file__)) SCRIPT_PATH = abspath(expanduser(join('~', 'code', 'ssd'))) if not ut.get_argflag('--no-ssd'): try: assert exists(SCRIPT_PATH) def add_path(path): # if path not in sys.path: sys.path.insert(0, path) # Add pycaffe to PYTHONPATH pycaffe_path = join(SCRIPT_PATH, 'python') add_path(pycaffe_path) import caffe rrr(caffe) from google.protobuf import text_format
def show_multiple_chips(ibs, aid_list, in_image=True, fnum=0, sel_aids=[], subtitle='', annote=False, **kwargs): """ CommandLine: python -m ibeis.viz.viz_name --test-show_multiple_chips --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db NNP_Master3 --aids=6435,9861,137,6563,9167,12547,9332,12598,13285 --no-inimage --notitle python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db NNP_Master3 --aids=137,6563,12547,9332,12598,13285 --no-inimage --notitle --adjust=.05 python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db NNP_Master3 --aids=6563,9332,13285,12598 --no-inimage --notitle --adjust=.05 --rc=1,4 python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db PZ_Master0 --aids=1288 --no-inimage --notitle --adjust=.05 python -m ibeis.viz.viz_name --test-show_multiple_chips --show --db PZ_Master0 --aids=4020,4839 --no-inimage --notitle --adjust=.05 python -m ibeis.viz.viz_name --test-show_multiple_chips --db NNP_Master3 --aids=6524,6540,6571,6751 --no-inimage --notitle --adjust=.05 --diskshow python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST -a default:index=0:4 --show --aids=1 --doboth --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1 --doboth --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1 --doboth --rc=2,1 --show --no-inimage python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1 --doboth --rc=2,1 --show --notitle --trydrawline --no-draw_lbls python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1,2 --doboth --show --notitle --trydrawline python -m ibeis.viz.viz_name --test-show_multiple_chips --db PZ_MTEST --aids=1,2,3,4,5 --doboth --rc=2,5 --show --chrlbl --trydrawline --qualtitle --no-figtitle --notitle --doboth --doboth --show python -m ibeis.viz.viz_name --test-show_multiple_chips --db NNP_Master3 --aids=15419 --doboth --rc=2,1 --show --notitle --trydrawline --no-draw_lbls Example: >>> # DISABLE_DOCTEST >>> from ibeis.viz.viz_name import * # NOQA >>> import ibeis >>> ibs, aid_list, in_image = testdata_multichips() >>> fnum = 0 >>> sel_aids = [] >>> subtitle = '' >>> annote = False >>> fig = show_multiple_chips(ibs, aid_list, in_image, fnum, sel_aids, subtitle, annote) >>> ut.quit_if_noshow() >>> fig.canvas.draw() >>> ut.show_if_requested() """ fnum = pt.ensure_fnum(fnum) nAids = len(aid_list) if nAids == 0: fig = df2.figure(fnum=fnum, pnum=(1, 1, 1), **kwargs) df2.imshow_null(fnum=fnum, **kwargs) return fig # Trigger computation of all chips in parallel ibsfuncs.ensure_annotation_data(ibs, aid_list, chips=(not in_image or annote), feats=annote) print('[viz_name] * annot_vuuid=%r' % ((ibs.get_annot_visual_uuids(aid_list), ))) print('[viz_name] * aid_list=%r' % ((aid_list, ))) DOBOTH = ut.get_argflag('--doboth') rc = ut.get_argval('--rc', type_=list, default=None) if rc is None: nRows, nCols = ph.get_square_row_cols(nAids * (2 if DOBOTH else 1)) else: nRows, nCols = rc notitle = ut.get_argflag('--notitle') draw_lbls = not ut.get_argflag('--no-draw_lbls') show_chip_kw = dict(annote=annote, in_image=in_image, notitle=notitle, draw_lbls=draw_lbls) #print('[viz_name] * r=%r, c=%r' % (nRows, nCols)) #gs2 = gridspec.GridSpec(nRows, nCols) pnum_ = df2.get_pnum_func(nRows, nCols) fig = df2.figure(fnum=fnum, pnum=pnum_(0), **kwargs) fig.clf() ax_list1 = [] for px, aid in enumerate(aid_list): print('px = %r' % (px, )) _fig, _ax1 = viz_chip.show_chip(ibs, aid=aid, pnum=pnum_(px), **show_chip_kw) print('other_aids = %r' % (ibs.get_annot_contact_aids(aid), )) ax = df2.gca() ax_list1.append(_ax1) if aid in sel_aids: df2.draw_border(ax, df2.GREEN, 4) if ut.get_argflag('--chrlbl') and not DOBOTH: ax.set_xlabel('(' + chr(ord('a') - 1 + px) + ')') elif ut.get_argflag('--numlbl') and not DOBOTH: ax.set_xlabel('(' + str(px + 1) + ')') #plot_aid3(ibs, aid) # HACK to show in image and not in image if DOBOTH: #ut.embed() #ph.get_plotdat_dict(ax_list1[1]) #ph.get_plotdat_dict(ax_list2[1]) ax_list2 = [] show_chip_kw['in_image'] = not show_chip_kw['in_image'] start = px + 1 for px, aid in enumerate(aid_list, start=start): _fig, _ax2 = viz_chip.show_chip(ibs, aid=aid, pnum=pnum_(px), **show_chip_kw) ax = df2.gca() ax_list2.append(_ax2) if ut.get_argflag('--chrlbl'): ax.set_xlabel('(' + chr(ord('a') - start + px) + ')') elif ut.get_argflag('--numlbl'): ax.set_xlabel('(' + str(px - start + 1) + ')') if ut.get_argflag('--qualtitle'): qualtext = ibs.get_annot_quality_texts(aid) ax.set_title(qualtext) if aid in sel_aids: df2.draw_border(ax, df2.GREEN, 4) if in_image: ax_list1, ax_list2 = ax_list2, ax_list1 if ut.get_argflag('--trydrawline'): # Unfinished #ut.embed() # Draw lines between corresponding axes # References: # http://stackoverflow.com/questions/17543359/drawing-lines-between-two-plots-in-matplotlib import matplotlib as mpl import vtool as vt # !!! #http://matplotlib.org/users/transforms_tutorial.html #invTransFigure_fn1 = fig.transFigure.inverted().transform #invTransFigure_fn2 = fig.transFigure.inverted().transform #print(ax_list1) #print(ax_list2) assert len(ax_list1) == len(ax_list2) for ax1, ax2 in zip(ax_list1, ax_list2): #_ = ax1.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) #bbox1 = (0, 0, _.width * fig.dpi, _.height * fig.dpi) # returns in figure coordinates #bbox1 = df2.get_axis_bbox(ax=ax1) #if bbox1[-1] < 0: # # Weird bug # bbox1 = bbox1[1] print('--') print('ax1 = %r' % (ax1, )) print('ax2 = %r' % (ax2, )) chipshape = ph.get_plotdat(ax1, 'chipshape') #_bbox1 = ax1.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) #bbox1 = (0, 0, _bbox1.width * fig.dpi, _bbox1.height * fig.dpi) bbox1 = (0, 0, chipshape[1], chipshape[0]) aid_ = ph.get_plotdat(ax2, 'aid') aid_list_ = ph.get_plotdat(ax2, 'aid_list') index = aid_list_.index(aid_) annotation_bbox_list = ph.get_plotdat(ax2, 'annotation_bbox_list') bbox2 = annotation_bbox_list[index] print('bbox1 = %r' % (bbox1, )) print('bbox2 = %r' % (bbox2, )) vert_list1 = np.array(vt.verts_from_bbox(bbox1)) vert_list2 = np.array(vt.verts_from_bbox(bbox2)) print('vert_list1 = %r' % (vert_list1, )) print('vert_list2 = %r' % (vert_list2, )) #for vx in [0, 1, 2, 3]: for vx in [0, 1]: vert1 = vert_list1[vx].tolist() vert2 = vert_list2[vx].tolist() print(' ***') print(' * vert1 = %r' % (vert1, )) print(' * vert2 = %r' % (vert2, )) coordsA = coordsB = 'data' #coords = 'axes points' #'axes fraction' #'axes pixels' #coordsA = 'axes pixels' #coordsB = 'data' #'figure fraction' #'figure pixels' #'figure pixels' #'figure points' #'polar' #'offset points' con = mpl.patches.ConnectionPatch(xyA=vert1, xyB=vert2, coordsA=coordsA, coordsB=coordsB, axesA=ax1, axesB=ax2, linewidth=1, color='k') #, arrowstyle="-") #ut.embed() #con.set_zorder(None) ax1.add_artist(con) #ax2.add_artist(con) #ut.embed() #verts2.T[1] -= bbox2[-1] #bottom_left1, bottom_right1 = verts1[1:3].tolist() #bottom_left2, bottom_right2 = verts2[1:3].tolist() ##transAxes1 = ax1.transData.inverted() #transAxes1_fn = ax1.transData.transform #transAxes2_fn = ax2.transData.transform #transAxes1_fn = ut.identity #transAxes2_fn = ut.identity #coord_bl1 = transFigure.transform(transAxes1.transform(bottom_left1)) #coord_br1 = transFigure.transform(transAxes1.transform(bottom_right1)) #coord_bl1 = invTransFigure_fn1(transAxes1_fn(bottom_left1)) #print('bottom_left2 = %r' % (bottom_left2,)) #coord_bl1 = (5, 5) #coord_bl2 = invTransFigure_fn2(transAxes2_fn(bottom_left2)) #print('coord_bl2 = %r' % (coord_bl2,)) #coord_br1 = invTransFigure_fn1(transAxes1_fn(bottom_right1)) #coord_br2 = invTransFigure_fn2(transAxes2_fn(bottom_right2)) ##print('coord_bl1 = %r' % (coord_bl1,)) #line_coords1 = np.vstack([coord_bl1, coord_bl2]) #line_coords2 = np.vstack([coord_br1, coord_br2]) #print('line_coords1 = %r' % (line_coords1,)) #line1 = mpl.lines.Line2D((line_coords1[0]), (line_coords1[1]), transform=fig.transFigure) #line2 = mpl.lines.Line2D((line_coords2[0]), (line_coords2[1]), transform=fig.transFigure) #xs1, ys1 = line_coords1.T #xs2, ys2 = line_coords2.T #linekw = dict(transform=fig.transFigure) #linekw = dict() #print('xs1 = %r' % (xs1,)) #print('ys1 = %r' % (ys1,)) #line1 = mpl.lines.Line2D(xs1, ys1, **linekw) #line2 = mpl.lines.Line2D(xs2, ys2, **linekw) # NOQA #shrinkA=5, shrinkB=5, mutation_scale=20, fc="w") #ax2.add_artist(con) #fig.lines.append(line1) #fig.lines.append(line2) pass return fig
from wbia.guitool.api_item_model import APIItemModel from wbia.guitool.api_table_view import APITableView from wbia.guitool.api_tree_view import APITreeView # from wbia.guitool import guitool_components as comp from functools import partial from six.moves import range import utool as ut import six (print, rrr, profile) = ut.inject2(__name__, '[APIItemWidget]') logger = logging.getLogger('wbia') WIDGET_BASE = QtWidgets.QWidget VERBOSE_ITEM_WIDGET = ut.get_argflag( ('--verbose-item-widget', '--verbiw')) or ut.VERBOSE def simple_api_item_widget(): r""" Very simple example of basic APIItemWidget widget with CustomAPI CommandLine: python -m wbia.guitool.api_item_widget --test-simple_api_item_widget python -m wbia.guitool.api_item_widget --test-simple_api_item_widget --show Example: >>> # ENABLE_DOCTEST >>> # xdoctest: +REQUIRES(--gui) >>> from wbia.guitool.api_item_widget import * # NOQA >>> import wbia.guitool as gt
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import utool as ut import plottool.draw_sv as draw_sv (print, print_, printDBG, rrr, profile) = ut.inject(__name__, '[viz_sver]', DEBUG=False) WRITE_SV_DEBUG = ut.get_argflag('--write-sv-debug') def _get_sv_vartup_for_plottool(ibs, aid1, aid2, chipmatch_FILT, aid2_svtup, config2_=None): """ Compiles IBEIS information into info suitable for plottool """ chip1, chip2 = ibs.get_annot_chips([aid1, aid2], config2_=config2_) kpts1, kpts2 = ibs.get_annot_kpts([aid1, aid2], config2_=config2_) aid2_fm = chipmatch_FILT.aid2_fm fm = aid2_fm[aid2] (homog_inliers, homog_err, H, aff_inliers, aff_err, Aff) = aid2_svtup[aid2] homog_tup = (homog_inliers, H) aff_tup = (aff_inliers, Aff) sv_vartup = chip1, chip2, kpts1, kpts2, fm, homog_tup, aff_tup return sv_vartup def _compute_svvars(ibs, aid1): """
assert np.all(idx4 == idx3), 'load failed' print('\nloaded succesfully (BUT NEED TO MAINTAIN BAD DATA)') if False: print('\n\n---TEST LOAD SAVED INDEX 2 (with removed points)') clean_vecs = np.delete(vecs, remove_idx_list, axis=0) flann4 = pyflann.FLANN(**flann_params) print('\n * CALL LOAD') flann4.load_index('test3.flann', clean_vecs) #assert np.all(idx1 == _idx1), 'rebuild is not determenistic!' if __name__ == '__main__': """ CommandLine: python -m ibeis.algo.hots._neighbor_experiment python -m ibeis.algo.hots._neighbor_experiment --allexamples python -m ibeis.algo.hots._neighbor_experiment --allexamples --noface --nosrc """ import multiprocessing multiprocessing.freeze_support() # for win32 import utool as ut # NOQA if ut.get_argflag('--test-augment_nnindexer_experiment'): # See if exec has something to do with memory leaks augment_nnindexer_experiment() ut.show_if_requested() pass else: ut.doctest_funcs()
def show_name(ibs, nid, in_image=True, fnum=0, sel_aids=[], subtitle='', annote=False, aid_list=None, index_list=None, **kwargs): r""" Args: ibs (IBEISController): ibeis controller object nid (?): in_image (bool): fnum (int): figure number sel_aids (list): subtitle (str): annote (bool): CommandLine: python -m ibeis.viz.viz_name --test-show_name --dpath ~/latex/crall-candidacy-2015 --save 'figures/{name}.jpg' --no-figtitle --notitle --db NNP_Master3 --figsize=9,4 --clipwhite --dpi=180 --adjust=.05 --index_list=[0,1,2,3] --rc=2,4 --append temp_out_figure.tex --name=IBEIS_PZ_0739 --no-draw_lbls --doboth --no-inimage --diskshow python -m ibeis.viz.viz_name --test-show_name --no-figtitle --notitle --db NNP_Master3 --figsize=9,4 --clipwhite --dpi=180 --adjust=.05 --index_list=[0,1,2,3] --rc=2,4 --append temp_out_figure.tex --name=IBEIS_PZ_0739 --no-draw_lbls --doboth --no-inimage --show python -m ibeis.viz.viz_name --test-show_name --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.viz.viz_name import * # NOQA >>> ibs, nid, in_image, index_list = testdata_showname() >>> fnum = 0 >>> sel_aids = [] >>> subtitle = '' >>> annote = False >>> # execute function >>> show_name(ibs, nid, in_image, fnum, sel_aids, subtitle, annote, index_list=index_list) >>> ut.show_if_requested() """ print('[viz_name] show_name nid=%r, index_list=%r, aid_list=%r' % (nid, index_list, aid_list)) if aid_list is None: aid_list = ibs.get_name_aids(nid) else: assert ut.list_all_eq_to(ibs.get_annot_nids(aid_list), nid) if index_list is not None: aid_list = ut.take(aid_list, index_list) name = ibs.get_name_texts((nid, )) print('[viz_name] * name=%r aid_list=%r' % (name, aid_list)) show_multiple_chips(ibs, aid_list, in_image=in_image, fnum=fnum, sel_aids=sel_aids, annote=annote, **kwargs) if isinstance(nid, np.ndarray): nid = nid[0] if isinstance(name, np.ndarray): name = name[0] use_figtitle = not ut.get_argflag('--no-figtitle') if use_figtitle: figtitle = 'Name View nid=%r name=%r' % (nid, name) df2.set_figtitle(figtitle)
def run_tests(): """ >>> from ibeis.tests.run_tests import * # NOQA """ # starts logging for tests import ibeis ibeis._preload() # Build module list and run tests import sys if True: ensure_testing_data() if ut.in_pyinstaller_package(): # Run tests for installer doctest_modname_list_ = static_doctest_modnames() else: doctest_modname_list_ = dynamic_doctest_modnames() exclude_doctest_pattern = ut.get_argval(('--exclude-doctest-patterns', '--x'), type_=list, default=[]) if exclude_doctest_pattern is not None: import re is_ok = [all([re.search(pat, name) is None for pat in exclude_doctest_pattern]) for name in doctest_modname_list_] doctest_modname_list = ut.compress(doctest_modname_list_, is_ok) else: doctest_modname_list = doctest_modname_list_ coverage = ut.get_argflag(('--coverage', '--cov',)) if coverage: import coverage cov = coverage.Coverage(source=doctest_modname_list) cov.start() print('Starting coverage') exclude_lines = [ 'pragma: no cover', 'def __repr__', 'if self.debug:', 'if settings.DEBUG', 'raise AssertionError', 'raise NotImplementedError', 'if 0:', 'if ut.VERBOSE', 'if _debug:', 'if __name__ == .__main__.:', 'print(.*)', ] for line in exclude_lines: cov.exclude(line) doctest_modname_list2 = [] try: import guitool_ibeis # NOQA except ImportError: HAVE_GUI = False else: HAVE_GUI = True # Remove gui things if possible import re if not HAVE_GUI: doctest_modname_list = [ modname for modname in doctest_modname_list_ if not re.search('\\bgui\\b', modname) and not re.search('\\bviz\\b', modname) ] for modname in doctest_modname_list: try: exec('import ' + modname, globals(), locals()) except ImportError as ex: ut.printex(ex, iswarning=True) # import parse # if not HAVE_GUI: # try: # parsed = parse.parse('No module named {}', str(ex)) # if parsed is None: # parsed = parse.parse('cannot import name {}', str(ex)) # if parsed is not None: # if parsed[0].endswith('_gui'): # print('skipping gui module %r' % (parsed[0],)) # continue # if parsed[0].startswith('viz_'): # print('skipping viz module %r' % (parsed[0],)) # continue # if parsed[0].startswith('interact_'): # print('skipping interact module %r' % (parsed[0],)) # continue # # if parsed[0] in ['sip']: # # print('skipping Qt module %r' % (parsed[0],)) # # continue # except: # pass if not ut.in_pyinstaller_package(): raise else: doctest_modname_list2.append(modname) module_list = [sys.modules[name] for name in doctest_modname_list2] # Write to py.test / nose format if ut.get_argflag('--tonose'): convert_tests_from_ibeis_to_nose(module_list) return 0 nPass, nTotal, failed_cmd_list = ut.doctest_module_list(module_list) if coverage: print('Stoping coverage') cov.stop() print('Saving coverage') cov.save() print('Generating coverage html report') cov.html_report() if nPass != nTotal: return 1 else: return 0
def testdata_expts( defaultdb='testdb1', default_acfgstr_name_list=['default:qindex=0:10:4,dindex=0:20'], default_test_cfg_name_list=['default'], a=None, t=None, p=None, qaid_override=None, daid_override=None, initial_aids=None, use_cache=None, dbdir=None, ibs=None, ): r""" Use this if you want data from an experiment. Command line interface to quickly get testdata for test_results. Command line flags can be used to specify db, aidcfg, pipecfg, qaid override, daid override (and maybe initial aids). CommandLine: python -m wbia.init.main_helpers testdata_expts Example: >>> # DISABLE_DOCTEST >>> from wbia.other.dbinfo import * # NOQA >>> import wbia >>> ibs, testres = wbia.testdata_expts(defaultdb='pz_mtest', >>> a='timectrl:qsize=2', >>> t='invar:ai=[false],ri=false', >>> use_cache=false) >>> print('testres = %r' % (testres,)) """ if ut.VERBOSE: logger.info('[main_helpers] testdata_expts') import wbia from wbia.expt import harness if a is not None: default_acfgstr_name_list = a if t is not None and p is None: p = t if p is not None: default_test_cfg_name_list = p if isinstance(default_acfgstr_name_list, six.string_types): default_acfgstr_name_list = [default_acfgstr_name_list] if isinstance(default_test_cfg_name_list, six.string_types): default_test_cfg_name_list = [default_test_cfg_name_list] # from wbia.expt import experiment_helpers if dbdir is not None: dbdir = ut.truepath(dbdir) if ibs is None: ibs = wbia.opendb(defaultdb=defaultdb, dbdir=dbdir) acfg_name_list = ut.get_argval( ('--aidcfg', '--acfg', '-a'), type_=list, default=default_acfgstr_name_list ) test_cfg_name_list = ut.get_argval( ('-t', '-p'), type_=list, default=default_test_cfg_name_list ) daid_override = ut.get_argval( ('--daid-override', '--daids-override'), type_=list, default=daid_override ) qaid_override = ut.get_argval( ('--qaid', '--qaids-override', '--qaid-override'), type_=list, default=qaid_override, ) # Hack a cache here use_bulk_cache = not ut.get_argflag(('--nocache', '--nocache-hs')) use_bulk_cache &= ut.is_developer() if use_cache is not None: use_bulk_cache &= use_cache use_bulk_cache &= False # use_bulk_cache = True if use_bulk_cache: from os.path import dirname cache_dir = ut.ensuredir((dirname(ut.get_module_dir(wbia)), 'BULK_TESTRES')) _cache_wrp = ut.cached_func('testreslist', cache_dir=cache_dir) _load_testres = _cache_wrp(harness.run_expt) else: _load_testres = harness.run_expt testres = _load_testres( ibs, acfg_name_list, test_cfg_name_list, qaid_override=qaid_override, daid_override=daid_override, initial_aids=initial_aids, use_cache=use_cache, ) # testres = test_result.combine_testres_list(ibs, testres_list) if ut.VERBOSE: logger.info(testres) return ibs, testres
def split_analysis(ibs): """ CommandLine: python -m ibeis.other.dbinfo split_analysis --show python -m ibeis split_analysis --show python -m ibeis split_analysis --show --good Ignore: # mount sshfs -o idmap=user lev:/ ~/lev # unmount fusermount -u ~/lev Example: >>> # DISABLE_DOCTEST GGR >>> from ibeis.other.dbinfo import * # NOQA >>> import ibeis >>> dbdir = '/media/danger/GGR/GGR-IBEIS' >>> dbdir = dbdir if ut.checkpath(dbdir) else ut.truepath('~/lev/media/danger/GGR/GGR-IBEIS') >>> ibs = ibeis.opendb(dbdir=dbdir, allow_newdir=False) >>> import guitool as gt >>> gt.ensure_qtapp() >>> win = split_analysis(ibs) >>> ut.quit_if_noshow() >>> import plottool as pt >>> gt.qtapp_loop(qwin=win) >>> #ut.show_if_requested() """ #nid_list = ibs.get_valid_nids(filter_empty=True) import datetime day1 = datetime.date(2016, 1, 30) day2 = datetime.date(2016, 1, 31) filter_kw = { 'multiple': None, #'view': ['right'], #'minqual': 'good', 'is_known': True, 'min_pername': 1, } aids1 = ibs.filter_annots_general(filter_kw=ut.dict_union( filter_kw, { 'min_unixtime': ut.datetime_to_posixtime(ut.date_to_datetime(day1, 0.0)), 'max_unixtime': ut.datetime_to_posixtime(ut.date_to_datetime(day1, 1.0)), }) ) aids2 = ibs.filter_annots_general(filter_kw=ut.dict_union( filter_kw, { 'min_unixtime': ut.datetime_to_posixtime(ut.date_to_datetime(day2, 0.0)), 'max_unixtime': ut.datetime_to_posixtime(ut.date_to_datetime(day2, 1.0)), }) ) all_aids = aids1 + aids2 all_annots = ibs.annots(all_aids) print('%d annots on day 1' % (len(aids1)) ) print('%d annots on day 2' % (len(aids2)) ) print('%d annots overall' % (len(all_annots)) ) print('%d names overall' % (len(ut.unique(all_annots.nids))) ) nid_list, annots_list = all_annots.group(all_annots.nids) REVIEWED_EDGES = True if REVIEWED_EDGES: aids_list = [annots.aids for annots in annots_list] #aid_pairs = [annots.get_am_aidpairs() for annots in annots_list] # Slower aid_pairs = ibs.get_unflat_am_aidpairs(aids_list) # Faster else: # ALL EDGES aid_pairs = [annots.get_aidpairs() for annots in annots_list] speeds_list = ibs.unflat_map(ibs.get_annotpair_speeds, aid_pairs) import vtool as vt max_speeds = np.array([vt.safe_max(s, nans=False) for s in speeds_list]) nan_idx = np.where(np.isnan(max_speeds))[0] inf_idx = np.where(np.isinf(max_speeds))[0] bad_idx = sorted(ut.unique(ut.flatten([inf_idx, nan_idx]))) ok_idx = ut.index_complement(bad_idx, len(max_speeds)) print('#nan_idx = %r' % (len(nan_idx),)) print('#inf_idx = %r' % (len(inf_idx),)) print('#ok_idx = %r' % (len(ok_idx),)) ok_speeds = max_speeds[ok_idx] ok_nids = ut.take(nid_list, ok_idx) ok_annots = ut.take(annots_list, ok_idx) sortx = np.argsort(ok_speeds)[::-1] sorted_speeds = np.array(ut.take(ok_speeds, sortx)) sorted_annots = np.array(ut.take(ok_annots, sortx)) sorted_nids = np.array(ut.take(ok_nids, sortx)) # NOQA sorted_speeds = np.clip(sorted_speeds, 0, 100) #idx = vt.find_elbow_point(sorted_speeds) #EXCESSIVE_SPEED = sorted_speeds[idx] # http://www.infoplease.com/ipa/A0004737.html # http://www.speedofanimals.com/animals/zebra #ZEBRA_SPEED_MAX = 64 # km/h #ZEBRA_SPEED_RUN = 50 # km/h ZEBRA_SPEED_SLOW_RUN = 20 # km/h #ZEBRA_SPEED_FAST_WALK = 10 # km/h #ZEBRA_SPEED_WALK = 7 # km/h MAX_SPEED = ZEBRA_SPEED_SLOW_RUN #MAX_SPEED = ZEBRA_SPEED_WALK #MAX_SPEED = EXCESSIVE_SPEED flags = sorted_speeds > MAX_SPEED flagged_ok_annots = ut.compress(sorted_annots, flags) inf_annots = ut.take(annots_list, inf_idx) flagged_annots = inf_annots + flagged_ok_annots print('MAX_SPEED = %r km/h' % (MAX_SPEED,)) print('%d annots with infinite speed' % (len(inf_annots),)) print('%d annots with large speed' % (len(flagged_ok_annots),)) print('Marking all pairs of annots above the threshold as non-matching') from ibeis.algo.graph import graph_iden import networkx as nx progkw = dict(freq=1, bs=True, est_window=len(flagged_annots)) bad_edges_list = [] good_edges_list = [] for annots in ut.ProgIter(flagged_annots, lbl='flag speeding names', **progkw): edge_to_speeds = annots.get_speeds() bad_edges = [edge for edge, speed in edge_to_speeds.items() if speed > MAX_SPEED] good_edges = [edge for edge, speed in edge_to_speeds.items() if speed <= MAX_SPEED] bad_edges_list.append(bad_edges) good_edges_list.append(good_edges) all_bad_edges = ut.flatten(bad_edges_list) good_edges_list = ut.flatten(good_edges_list) print('num_bad_edges = %r' % (len(ut.flatten(bad_edges_list)),)) print('num_bad_edges = %r' % (len(ut.flatten(good_edges_list)),)) if 1: from ibeis.viz import viz_graph2 import guitool as gt gt.ensure_qtapp() if ut.get_argflag('--good'): print('Looking at GOOD (no speed problems) edges') aid_pairs = good_edges_list else: print('Looking at BAD (speed problems) edges') aid_pairs = all_bad_edges aids = sorted(list(set(ut.flatten(aid_pairs)))) infr = graph_iden.AnnotInference(ibs, aids, verbose=False) infr.initialize_graph() # Use random scores to randomize sort order rng = np.random.RandomState(0) scores = (-rng.rand(len(aid_pairs)) * 10).tolist() infr.graph.add_edges_from(aid_pairs) if True: edge_sample_size = 250 pop_nids = ut.unique(ibs.get_annot_nids(ut.unique(ut.flatten(aid_pairs)))) sorted_pairs = ut.sortedby(aid_pairs, scores)[::-1][0:edge_sample_size] sorted_nids = ibs.get_annot_nids(ut.take_column(sorted_pairs, 0)) sample_size = len(ut.unique(sorted_nids)) am_rowids = ibs.get_annotmatch_rowid_from_undirected_superkey(*zip(*sorted_pairs)) flags = ut.not_list(ut.flag_None_items(am_rowids)) #am_rowids = ut.compress(am_rowids, flags) positive_tags = ['SplitCase', 'Photobomb'] flags_list = [ut.replace_nones(ibs.get_annotmatch_prop(tag, am_rowids), 0) for tag in positive_tags] print('edge_case_hist: ' + ut.repr3( ['%s %s' % (txt, sum(flags_)) for flags_, txt in zip(flags_list, positive_tags)])) is_positive = ut.or_lists(*flags_list) num_positive = sum(ut.lmap(any, ut.group_items(is_positive, sorted_nids).values())) pop = len(pop_nids) print('A positive is any edge flagged as a %s' % (ut.conj_phrase(positive_tags, 'or'),)) print('--- Sampling wrt edges ---') print('edge_sample_size = %r' % (edge_sample_size,)) print('edge_population_size = %r' % (len(aid_pairs),)) print('num_positive_edges = %r' % (sum(is_positive))) print('--- Sampling wrt names ---') print('name_population_size = %r' % (pop,)) vt.calc_error_bars_from_sample(sample_size, num_positive, pop, conf_level=.95) nx.set_edge_attributes(infr.graph, name='score', values=dict(zip(aid_pairs, scores))) win = viz_graph2.AnnotGraphWidget(infr=infr, use_image=False, init_mode=None) win.populate_edge_model() win.show() return win # Make review interface for only bad edges infr_list = [] iter_ = list(zip(flagged_annots, bad_edges_list)) for annots, bad_edges in ut.ProgIter(iter_, lbl='creating inference', **progkw): aids = annots.aids nids = [1] * len(aids) infr = graph_iden.AnnotInference(ibs, aids, nids, verbose=False) infr.initialize_graph() infr.reset_feedback() infr_list.append(infr) # Check which ones are user defined as incorrect #num_positive = 0 #for infr in infr_list: # flag = np.any(infr.get_feedback_probs()[0] == 0) # num_positive += flag #print('num_positive = %r' % (num_positive,)) #pop = len(infr_list) #print('pop = %r' % (pop,)) iter_ = list(zip(infr_list, bad_edges_list)) for infr, bad_edges in ut.ProgIter(iter_, lbl='adding speed edges', **progkw): flipped_edges = [] for aid1, aid2 in bad_edges: if infr.graph.has_edge(aid1, aid2): flipped_edges.append((aid1, aid2)) infr.add_feedback((aid1, aid2), NEGTV) nx.set_edge_attributes(infr.graph, name='_speed_split', values='orig') nx.set_edge_attributes(infr.graph, name='_speed_split', values={edge: 'new' for edge in bad_edges}) nx.set_edge_attributes(infr.graph, name='_speed_split', values={edge: 'flip' for edge in flipped_edges}) #for infr in ut.ProgIter(infr_list, lbl='flagging speeding edges', **progkw): # annots = ibs.annots(infr.aids) # edge_to_speeds = annots.get_speeds() # bad_edges = [edge for edge, speed in edge_to_speeds.items() if speed > MAX_SPEED] def inference_stats(infr_list_): relabel_stats = [] for infr in infr_list_: num_ccs, num_inconsistent = infr.relabel_using_reviews() state_hist = ut.dict_hist(nx.get_edge_attributes(infr.graph, 'decision').values()) if POSTV not in state_hist: state_hist[POSTV] = 0 hist = ut.dict_hist(nx.get_edge_attributes(infr.graph, '_speed_split').values()) subgraphs = infr.positive_connected_compoments() subgraph_sizes = [len(g) for g in subgraphs] info = ut.odict([ ('num_nonmatch_edges', state_hist[NEGTV]), ('num_match_edges', state_hist[POSTV]), ('frac_nonmatch_edges', state_hist[NEGTV] / (state_hist[POSTV] + state_hist[NEGTV])), ('num_inconsistent', num_inconsistent), ('num_ccs', num_ccs), ('edges_flipped', hist.get('flip', 0)), ('edges_unchanged', hist.get('orig', 0)), ('bad_unreviewed_edges', hist.get('new', 0)), ('orig_size', len(infr.graph)), ('new_sizes', subgraph_sizes), ]) relabel_stats.append(info) return relabel_stats relabel_stats = inference_stats(infr_list) print('\nAll Split Info:') lines = [] for key in relabel_stats[0].keys(): data = ut.take_column(relabel_stats, key) if key == 'new_sizes': data = ut.flatten(data) lines.append('stats(%s) = %s' % (key, ut.repr2(ut.get_stats(data, use_median=True), precision=2))) print('\n'.join(ut.align_lines(lines, '='))) num_incon_list = np.array(ut.take_column(relabel_stats, 'num_inconsistent')) can_split_flags = num_incon_list == 0 print('Can trivially split %d / %d' % (sum(can_split_flags), len(can_split_flags))) splittable_infrs = ut.compress(infr_list, can_split_flags) relabel_stats = inference_stats(splittable_infrs) print('\nTrival Split Info:') lines = [] for key in relabel_stats[0].keys(): if key in ['num_inconsistent']: continue data = ut.take_column(relabel_stats, key) if key == 'new_sizes': data = ut.flatten(data) lines.append('stats(%s) = %s' % ( key, ut.repr2(ut.get_stats(data, use_median=True), precision=2))) print('\n'.join(ut.align_lines(lines, '='))) num_match_edges = np.array(ut.take_column(relabel_stats, 'num_match_edges')) num_nonmatch_edges = np.array(ut.take_column(relabel_stats, 'num_nonmatch_edges')) flags1 = np.logical_and(num_match_edges > num_nonmatch_edges, num_nonmatch_edges < 3) reasonable_infr = ut.compress(splittable_infrs, flags1) new_sizes_list = ut.take_column(relabel_stats, 'new_sizes') flags2 = [len(sizes) == 2 and sum(sizes) > 4 and (min(sizes) / max(sizes)) > .3 for sizes in new_sizes_list] reasonable_infr = ut.compress(splittable_infrs, flags2) print('#reasonable_infr = %r' % (len(reasonable_infr),)) for infr in ut.InteractiveIter(reasonable_infr): annots = ibs.annots(infr.aids) edge_to_speeds = annots.get_speeds() print('max_speed = %r' % (max(edge_to_speeds.values())),) infr.initialize_visual_node_attrs() infr.show_graph(use_image=True, only_reviewed=True) rest = ~np.logical_or(flags1, flags2) nonreasonable_infr = ut.compress(splittable_infrs, rest) rng = np.random.RandomState(0) random_idx = ut.random_indexes(len(nonreasonable_infr) - 1, 15, rng=rng) random_infr = ut.take(nonreasonable_infr, random_idx) for infr in ut.InteractiveIter(random_infr): annots = ibs.annots(infr.aids) edge_to_speeds = annots.get_speeds() print('max_speed = %r' % (max(edge_to_speeds.values())),) infr.initialize_visual_node_attrs() infr.show_graph(use_image=True, only_reviewed=True) #import scipy.stats as st #conf_interval = .95 #st.norm.cdf(conf_interval) # view-source:http://www.surveysystem.com/sscalc.htm #zval = 1.96 # 95 percent confidence #zValC = 3.8416 # #zValC = 6.6564 #import statsmodels.stats.api as sms #es = sms.proportion_effectsize(0.5, 0.75) #sms.NormalIndPower().solve_power(es, power=0.9, alpha=0.05, ratio=1) pop = 279 num_positive = 3 sample_size = 15 conf_level = .95 #conf_level = .99 vt.calc_error_bars_from_sample(sample_size, num_positive, pop, conf_level) print('---') vt.calc_error_bars_from_sample(sample_size + 38, num_positive, pop, conf_level) print('---') vt.calc_error_bars_from_sample(sample_size + 38 / 3, num_positive, pop, conf_level) print('---') vt.calc_error_bars_from_sample(15 + 38, num_positive=3, pop=675, conf_level=.95) vt.calc_error_bars_from_sample(15, num_positive=3, pop=675, conf_level=.95) pop = 279 #err_frac = .05 # 5% err_frac = .10 # 10% conf_level = .95 vt.calc_sample_from_error_bars(err_frac, pop, conf_level) pop = 675 vt.calc_sample_from_error_bars(err_frac, pop, conf_level) vt.calc_sample_from_error_bars(.05, pop, conf_level=.95, prior=.1) vt.calc_sample_from_error_bars(.05, pop, conf_level=.68, prior=.2) vt.calc_sample_from_error_bars(.10, pop, conf_level=.68) vt.calc_error_bars_from_sample(100, num_positive=5, pop=675, conf_level=.95) vt.calc_error_bars_from_sample(100, num_positive=5, pop=675, conf_level=.68)
def initialize_repo_managers(CODE_DIR, pythoncmd, PY2, PY3): import utool as ut WITH_CNN = True WITH_PYRF = True # WITH_TPL = True WITH_QT = not ut.get_argflag('--no-qt') WITH_GUI = not ut.get_argflag('--no-gui') WITH_CUSTOM_TPL = True WITH_FLUKEMATCH = True WITH_CURVRANK = True # ----------- # IBEIS project repos # ----------- # if True: # jon_repo_base = 'https://github.com/WildbookOrg' # jason_repo_base = 'https://github.com/WildbookOrg' # else: # jon_repo_base = 'https://github.com/wildme' # jason_repo_base = 'https://github.com/wildme' # else: # jon_repo_base = '[email protected] # jason_repo_base = '[email protected] wbia_rman = ut.RepoManager( [ 'https://github.com/WildbookOrg/utool.git', # 'https://github.com/WildbookOrg/sandbox_utools.git', 'https://github.com/WildbookOrg/vtool_ibeis.git', 'https://github.com/WildbookOrg/dtool_ibeis.git', 'https://github.com/Erotemic/ubelt.git', 'https://github.com/WildbookOrg/detecttools.git', ], CODE_DIR, label='core', pythoncmd=pythoncmd, ) tpl_rman = ut.RepoManager([], CODE_DIR, label='tpl', pythoncmd=pythoncmd) if not GET_ARGFLAG('--ignore-opencv'): cv_repo = ut.Repo('https://github.com/Itseez/opencv.git', CODE_DIR, modname='cv2') tpl_rman.add_repo(cv_repo) if WITH_GUI: wbia_rman.add_repos( ['https://github.com/WildbookOrg/plottool_ibeis.git']) if WITH_QT: wbia_rman.add_repos( ['https://github.com/WildbookOrg/guitool_ibeis.git']) tpl_rman.add_repo(ut.Repo(modname=('PyQt4', 'PyQt5', 'PyQt'))) if WITH_CUSTOM_TPL: flann_repo = ut.Repo('https://github.com/WildbookOrg/flann.git', CODE_DIR, modname='pyflann') wbia_rman.add_repo(flann_repo) wbia_rman.add_repos(['https://github.com/WildbookOrg/hesaff.git']) if WITH_CNN: wbia_rman.add_repos([ 'https://github.com/WildbookOrg/wbia_cnn.git', 'https://github.com/WildbookOrg/pydarknet.git', ]) # NEW CNN Dependencies tpl_rman.add_repos(['https://github.com/pytorch/pytorch.git']) # if GET_ARGFLAG('--libgpuarray'): tpl_rman.add_repos(['https://github.com/Theano/libgpuarray.git']) # CNN Dependencies tpl_rman.add_repos([ 'https://github.com/Theano/Theano.git', # 'https://github.com/lisa-lab/pylearn2.git', 'https://github.com/Lasagne/Lasagne.git', ]) if WITH_FLUKEMATCH: wbia_rman.add_repos( ['https://github.com/WildbookOrg/ibeis-flukematch-module.git']) if WITH_CURVRANK: wbia_rman.add_repos( ['https://github.com/WildbookOrg/ibeis-curvrank-module.git']) if WITH_PYRF: wbia_rman.add_repos(['https://github.com/WildbookOrg/pyrf.git']) if False: # Depricated wbia_rman.add_repos([ # 'https://github.com/WildbookOrg/pybing.git', # 'https://github.com/aweinstock314/cyth.git', # 'https://github.com/hjweide/pygist', ]) # Add main repo (Must be checked last due to dependency issues) wbia_rman.add_repos(['https://github.com/WildbookOrg/wbia.git']) # ----------- # Custom third party build/install scripts # ----------- define_custom_scripts(tpl_rman, wbia_rman, PY2, PY3) return tpl_rman, wbia_rman
It is better to use constant variables instead of hoping you spell the same string correctly every time you use it. (Also it makes it much easier if a string name changes) """ import logging import six import numpy as np import math import utool as ut from collections import OrderedDict from os.path import join (print, rrr, profile) = ut.inject2(__name__) logger = logging.getLogger('wbia') CONTAINERIZED = ut.get_argflag('--containerized') PRODUCTION = ut.get_argflag('--production') HTTPS = ut.get_argflag('--https') CONTAINER_NAME = ut.get_argval('--container-name', type_=str, default=ut.get_computer_name()) ENGINE_SLOT = ut.get_argval('--engine-slot', type_=str, default='default') PI = math.pi TAU = 2.0 * PI VIEWTEXT_TO_YAW_RADIANS = OrderedDict([ ('right', 0.000 * TAU), ('frontright', 0.125 * TAU), ('front', 0.250 * TAU),
def postload_commands(ibs, back): """ Postload commands deal with a specific ibeis database ibeis --db PZ_MTEST --occur "*All Images" --query 1 ibeis --db PZ_MTEST --occur "*All Images" --query-intra """ if ut.NOT_QUIET: print('\n[main_cmd] postload_commands') if params.args.view_database_directory: print('got arg --vdd') vdd(ibs) if params.args.set_default_dbdir: sysres.set_default_dbdir(ibs.get_dbdir()) if params.args.update_query_cfg is not None: # Set query parameters from command line using the --cfg flag cfgdict = ut.parse_cfgstr_list(params.args.update_query_cfg) print('Custom cfgdict specified') print(ut.repr2(cfgdict)) ibs.update_query_cfg(**cfgdict) if params.args.edit_notes: ut.editfile(ibs.get_dbnotes_fpath(ensure=True)) if params.args.delete_cache: ibs.delete_cache() if params.args.delete_cache_complete: ibs.delete_cache(delete_imagesets=True) if params.args.delete_query_cache: ibs.delete_qres_cache() if params.args.set_all_species is not None: ibs._overwrite_all_annot_species_to(params.args.set_all_species) if params.args.dump_schema: ibs.db.print_schema() if ut.get_argflag('--ipynb'): back.launch_ipy_notebook() select_imgsetid = ut.get_argval( ('--select-imgsetid', '--imgsetid', '--occur', '--gsid'), None) if select_imgsetid is not None: print('\n+ --- CMD SELECT IMGSETID=%r ---' % (select_imgsetid, )) # Whoa: this doesnt work. weird. #back.select_imgsetid(select_imgsetid) # This might be the root of gui problems #back.front._change_imageset(select_imgsetid) back.front.select_imageset_tab(select_imgsetid) print('L ___ CMD SELECT IMGSETID=%r ___\n' % (select_imgsetid, )) # Send commands to GUIBack if params.args.select_aid is not None: if back is not None: try: ibsfuncs.assert_valid_aids(ibs, (params.args.select_aid, )) except AssertionError: print('Valid RIDs are: %r' % (ibs.get_valid_aids(), )) raise back.select_aid(params.args.select_aid) if params.args.select_gid is not None: back.select_gid(params.args.select_gid) if params.args.select_nid is not None: back.select_nid(params.args.select_nid) select_name = ut.get_argval('--select-name') if select_name is not None: import ibeis.gui.guiheaders as gh back.ibswgt.select_table_indicies_from_text(gh.NAMES_TREE, select_name, allow_table_change=True) if ut.get_argflag( ('--intra-occur-query', '--query-intra-occur', '--query-intra')): back.special_query_funcs['intra_occurrence'](cfgdict={ 'use_k_padding': False }) qaid_list = ut.get_argval(('--query-aid', '--query'), type_=list, default=None) if qaid_list is not None: #qaid_list = params.args.query_aid # fix stride case if len(qaid_list) == 1 and isinstance(qaid_list[0], tuple): qaid_list = list(qaid_list[0]) daids_mode = ut.get_argval('--daids-mode', type_=str, default=const.VS_EXEMPLARS_KEY) back.compute_queries(qaid_list=qaid_list, daids_mode=daids_mode, ranks_top=10) if ut.get_argflag('--inc-query'): back.incremental_query() if ut.get_argflag(('--dbinfo', '--display_dbinfo')): back.display_dbinfo() pass aidcmd = ut.get_argval('--aidcmd', default=None) aid = ut.get_argval('--aid', type_=int, default=1) if aidcmd: #aidcmd = 'Interact image' metadata = ibs.get_annot_lazy_dict(aid) annot_context_options = metadata['annot_context_options'] aidcmd_dict = dict(annot_context_options) print('aidcmd_dict = %s' % (ut.repr3(aidcmd_dict), )) command = aidcmd_dict[aidcmd] command() #import utool #utool.embed() #back.start_web_server_parallel() if ut.get_argflag('--start-web'): back.start_web_server_parallel() if ut.get_argflag('--name-tab'): from ibeis.gui.guiheaders import NAMES_TREE back.front.set_table_tab(NAMES_TREE) view = back.front.views[NAMES_TREE] model = view.model() view._set_sort(model.col_name_list.index('nAids'), col_sort_reverse=True) if ut.get_argflag('--graph'): back.make_qt_graph_interface() screengrab_fpath = ut.get_argval('--screengrab') if screengrab_fpath: from guitool_ibeis.__PYQT__.QtGui import QPixmap from PyQt4.QtTest import QTest from PyQt4.QtCore import Qt fpath = ut.truepath(screengrab_fpath) import guitool_ibeis #ut.embed() timer2 = guitool_ibeis.__PYQT__.QtCore.QTimer() done = [1000] def delayed_screenshot_func(): if done[0] == 500: #back.mainwin.menubar.triggered.emit(back.mainwin.menuFile) print('Mouseclick') QTest.mouseClick(back.mainwin.menuFile, Qt.LeftButton) # This works #QTest.mouseClick(back.front.import_button, Qt.LeftButton) if done[0] == 1: timer2.stop() print('screengrab to %r' % (fpath, )) screenimg = QPixmap.grabWindow(back.mainwin.winId()) screenimg.save(fpath, 'jpg') ut.startfile(fpath) print('lub dub2') done[0] -= 1 return None CLICK_FILE_MENU = True if CLICK_FILE_MENU: #ut.embed() #QTest::keyClick(menu, Qt::Key_Down) pass timer2.delayed_screenshot_func = delayed_screenshot_func timer2.timeout.connect(timer2.delayed_screenshot_func) timer2.start(1) back.mainwin.timer2 = timer2 guitool_ibeis.activate_qwindow(back.mainwin) #QPixmap.grabWindow(back.mainwin.winId()).save(fpath, 'jpg') #ut.startfile(fpath) #ut.embed() pass if params.args.postload_exit: print('[main_cmd] postload exit') sys.exit(0)
def update_wildbook_install_config(webapps_dpath, unpacked_war_dpath): """ CommandLine: python -m ibeis ensure_local_war python -m ibeis update_wildbook_install_config python -m ibeis update_wildbook_install_config --show Example: >>> from ibeis.control.wildbook_manager import * # NOQA >>> import ibeis >>> tomcat_dpath = find_installed_tomcat() >>> webapps_dpath = join(tomcat_dpath, 'webapps') >>> wb_target = ibeis.const.WILDBOOK_TARGET >>> unpacked_war_dpath = join(webapps_dpath, wb_target) >>> locals_ = ut.exec_func_src(update_wildbook_install_config, globals()) >>> #update_wildbook_install_config(webapps_dpath, unpacked_war_dpath) >>> ut.quit_if_noshow() >>> ut.vd(unpacked_war_dpath) >>> ut.editfile(locals_['permission_fpath']) >>> ut.editfile(locals_['jdoconfig_fpath']) >>> ut.editfile(locals_['asset_store_fpath']) """ mysql_mode = not ut.get_argflag('--nomysql') #if ut.get_argflag('--vd'): # ut.vd(unpacked_war_dpath) #find_installed_tomcat # Make sure permissions are correctly set in wildbook # Comment out the line that requires authentication permission_fpath = join(unpacked_war_dpath, 'WEB-INF/web.xml') ut.assertpath(permission_fpath) permission_text = ut.readfrom(permission_fpath) lines_to_remove = [ # '/ImageSetSetMarkedIndividual = authc, roles[admin]' '/EncounterSetMarkedIndividual = authc, roles[admin]' ] new_permission_text = permission_text[:] for line in lines_to_remove: re.search(re.escape(line), permission_text) prefix = ut.named_field('prefix', '\\s*') suffix = ut.named_field('suffix', '\\s*\n') pattern = ('^' + prefix + re.escape(line) + suffix) match = re.search(pattern, permission_text, flags=re.MULTILINE | re.DOTALL) if match is None: continue newline = '<!--%s -->' % (line, ) repl = ut.bref_field('prefix') + newline + ut.bref_field('suffix') new_permission_text = re.sub(pattern, repl, permission_text, flags=re.MULTILINE | re.DOTALL) assert new_permission_text != permission_text, ( 'text should have changed') if new_permission_text != permission_text: print('Need to write new permission texts') ut.writeto(permission_fpath, new_permission_text) else: print('Permission file seems to be ok') # Make sure we are using a non-process based database jdoconfig_fpath = join(unpacked_war_dpath, 'WEB-INF/classes/bundles/jdoconfig.properties') print('Fixing backend database config') print('jdoconfig_fpath = %r' % (jdoconfig_fpath, )) ut.assertpath(jdoconfig_fpath) jdoconfig_text = ut.readfrom(jdoconfig_fpath) #ut.vd(dirname(jdoconfig_fpath)) #ut.editfile(jdoconfig_fpath) if mysql_mode: jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'mysql', False) jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'derby', 1) jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'sqlite', 1) mysql_user = '******' mysql_passwd = 'somepassword' mysql_dbname = 'ibeiswbtestdb' # Use mysql jdoconfig_text = re.sub('datanucleus.ConnectionUserName = .*$', 'datanucleus.ConnectionUserName = '******'datanucleus.ConnectionPassword = .*$', 'datanucleus.ConnectionPassword = '******'datanucleus.ConnectionURL *= *jdbc:mysql:.*$', 'datanucleus.ConnectionURL = jdbc:mysql://localhost:3306/' + mysql_dbname, jdoconfig_text, flags=re.MULTILINE) jdoconfig_text = re.sub('^.*jdbc:mysql://localhost:3306/shepherd.*$', '', jdoconfig_text, flags=re.MULTILINE) else: # Use SQLIIte jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'derby', 1) jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'mysql', 1) jdoconfig_text = ut.toggle_comment_lines(jdoconfig_text, 'sqlite', False) ut.writeto(jdoconfig_fpath, jdoconfig_text) # Need to make sure wildbook can store information in a reasonalbe place #tomcat_data_dir = join(tomcat_startup_dir, 'webapps', 'wildbook_data_dir') tomcat_data_dir = join(webapps_dpath, 'wildbook_data_dir') ut.ensuredir(tomcat_data_dir) ut.writeto(join(tomcat_data_dir, 'test.txt'), 'A hosted test file') asset_store_fpath = join(unpacked_war_dpath, 'createAssetStore.jsp') asset_store_text = ut.read_from(asset_store_fpath) #data_path_pat = ut.named_field('data_path', 'new File(".*?").toPath') new_line = 'LocalAssetStore as = new LocalAssetStore("example Local AssetStore", new File("%s").toPath(), "%s", true);' % ( tomcat_data_dir, 'http://localhost:8080/' + basename(tomcat_data_dir)) # HACKY asset_store_text2 = re.sub('^LocalAssetStore as = .*$', new_line, asset_store_text, flags=re.MULTILINE) ut.writeto(asset_store_fpath, asset_store_text2)
# Change annotations names back to normal python -m wbia wildbook_signal_annot_name_changes:2 """ import logging import utool as ut import requests from wbia.control import controller_inject from wbia.control import wildbook_manager as wb_man # NOQA from wbia.control.controller_inject import make_ibs_register_decorator from wbia.constants import WILDBOOK_TARGET print, rrr, profile = ut.inject2(__name__) logger = logging.getLogger('wbia') DISABLE_WILDBOOK_SIGNAL = ut.get_argflag('--no-wb-signal') CLASS_INJECT_KEY, register_ibs_method = make_ibs_register_decorator(__name__) register_api = controller_inject.get_wbia_flask_api(__name__) # PREFERED_BROWSER = 'chrome' # webbrowser._tryorder PREFERED_BROWSER = None if ut.get_computer_name() == 'hyrule': PREFERED_BROWSER = 'firefox' @register_ibs_method def get_wildbook_base_url(ibs, wb_target=None): if DISABLE_WILDBOOK_SIGNAL:
def flann_add_time_experiment(): """ builds plot of number of annotations vs indexer build time. TODO: time experiment CommandLine: python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --show python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_Master0 --show utprof.py -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --show valgrind --tool=memcheck --suppressions=valgrind-python.supp python -m ibeis.algo.hots._neighbor_experiment --test-flann_add_time_experiment --db PZ_MTEST --no-with-reindex Example: >>> # DISABLE_DOCTEST >>> from ibeis.algo.hots._neighbor_experiment import * # NOQA >>> import ibeis >>> #ibs = ibeis.opendb('PZ_MTEST') >>> result = flann_add_time_experiment() >>> # verify results >>> print(result) >>> ut.show_if_requested() """ import ibeis import utool as ut import numpy as np import plottool as pt def make_flann_index(vecs, flann_params): flann = pyflann.FLANN() flann.build_index(vecs, **flann_params) return flann db = ut.get_argval('--db') ibs = ibeis.opendb(db=db) # Input if ibs.get_dbname() == 'PZ_MTEST': initial = 1 reindex_stride = 16 addition_stride = 4 max_ceiling = 120 elif ibs.get_dbname() == 'PZ_Master0': #ibs = ibeis.opendb(db='GZ_ALL') initial = 32 reindex_stride = 32 addition_stride = 16 max_ceiling = 300001 else: assert False #max_ceiling = 32 all_daids = ibs.get_valid_aids() max_num = min(max_ceiling, len(all_daids)) flann_params = vt.get_flann_params() # Output count_list, time_list_reindex = [], [] count_list2, time_list_addition = [], [] # Setup #all_randomize_daids_ = ut.deterministic_shuffle(all_daids[:]) all_randomize_daids_ = all_daids # ensure all features are computed ibs.get_annot_vecs(all_randomize_daids_) def reindex_step(count, count_list, time_list_reindex): daids = all_randomize_daids_[0:count] vecs = np.vstack(ibs.get_annot_vecs(daids)) with ut.Timer(verbose=False) as t: flann = make_flann_index(vecs, flann_params) # NOQA count_list.append(count) time_list_reindex.append(t.ellapsed) def addition_step(count, flann, count_list2, time_list_addition): daids = all_randomize_daids_[count:count + 1] vecs = np.vstack(ibs.get_annot_vecs(daids)) with ut.Timer(verbose=False) as t: flann.add_points(vecs) count_list2.append(count) time_list_addition.append(t.ellapsed) def make_initial_index(initial): daids = all_randomize_daids_[0:initial + 1] vecs = np.vstack(ibs.get_annot_vecs(daids)) flann = make_flann_index(vecs, flann_params) return flann WITH_REINDEX = not ut.get_argflag('--no-with-reindex') if WITH_REINDEX: # Reindex Part reindex_lbl = 'Reindexing' _reindex_iter = range(1, max_num, reindex_stride) reindex_iter = ut.ProgressIter(_reindex_iter, lbl=reindex_lbl, freq=1) for count in reindex_iter: reindex_step(count, count_list, time_list_reindex) # Add Part flann = make_initial_index(initial) addition_lbl = 'Addition' _addition_iter = range(initial + 1, max_num, addition_stride) addition_iter = ut.ProgressIter(_addition_iter, lbl=addition_lbl) for count in addition_iter: addition_step(count, flann, count_list2, time_list_addition) print('---') print('Reindex took time_list_reindex %.2s seconds' % sum(time_list_reindex)) print('Addition took time_list_reindex %.2s seconds' % sum(time_list_addition)) print('---') statskw = dict(precision=2, newlines=True) print('Reindex stats ' + ut.get_stats_str(time_list_reindex, **statskw)) print('Addition stats ' + ut.get_stats_str(time_list_addition, **statskw)) print('Plotting') #with pt.FigureContext: next_fnum = iter(range(0, 2)).next # python3 PY3 pt.figure(fnum=next_fnum()) if WITH_REINDEX: pt.plot2(count_list, time_list_reindex, marker='-o', equal_aspect=False, x_label='num_annotations', label=reindex_lbl + ' Time', dark=False) #pt.figure(fnum=next_fnum()) pt.plot2(count_list2, time_list_addition, marker='-o', equal_aspect=False, x_label='num_annotations', label=addition_lbl + ' Time') pt pt.legend()
# -*- coding: utf-8 -*- import sys import time import utool as ut import matplotlib as mpl from wbia.plottool import custom_figure # from .custom_constants import golden_wh SLEEP_TIME = 0.01 __QT4_WINDOW_LIST__ = [] ut.noinject(__name__, '[fig_presenter]') VERBOSE = ut.get_argflag(('--verbose-fig', '--verbfig', '--verb-pt')) # (print, print_, printDBG, rrr, profile) = ut.inject(__name__, '[fig_presenter]', DEBUG=True) def unregister_qt4_win(win): global __QT4_WINDOW_LIST__ if win == 'all': __QT4_WINDOW_LIST__ = [] else: try: # index = __QT4_WINDOW_LIST__.index(win) __QT4_WINDOW_LIST__.remove(win) except ValueError: pass def register_qt4_win(win): global __QT4_WINDOW_LIST__
from six.moves import zip import tempfile import subprocess import shlex import os from os.path import abspath, dirname, expanduser, join, exists # NOQA import numpy as np import scipy.io (print, rrr, profile) = ut.inject2(__name__, '[selective search]') logger = logging.getLogger('wbia') # SCRIPT_PATH = abspath(dirname(__file__)) SCRIPT_PATH = abspath(expanduser(join('~', 'code', 'selective_search_ijcv_with_python'))) if not ut.get_argflag('--no-selective-search'): try: assert exists(SCRIPT_PATH) except AssertionError: logger.info( 'WARNING Failed to find selective_search_ijcv_with_python. ' 'Selective Search is unavailable' ) # if ut.SUPER_STRICT: # raise VERBOSE_SS = ut.get_argflag('--verbdss') or ut.VERBOSE def detect_gid_list(ibs, gid_list, downsample=True, verbose=VERBOSE_SS, **kwargs):
def autogen_ipynb(ibs, launch=None, run=None): r""" Autogenerates standard IBEIS Image Analysis IPython notebooks. CommandLine: python -m ibeis autogen_ipynb --run --db lynx python -m ibeis autogen_ipynb --run --db lynx python -m ibeis autogen_ipynb --ipynb --db PZ_MTEST -p :proot=smk,num_words=64000 default python -m ibeis autogen_ipynb --ipynb --db PZ_MTEST --asreport python -m ibeis autogen_ipynb --ipynb --db PZ_MTEST --noexample --withtags python -m ibeis autogen_ipynb --ipynb --db PZ_MTEST python -m ibeis autogen_ipynb --ipynb --db STS_SandTigers python -m ibeis autogen_ipynb --db PZ_MTEST # TODO: Add support for dbdir to be specified python -m ibeis autogen_ipynb --db ~/work/PZ_MTEST python -m ibeis autogen_ipynb --ipynb --db Oxford -a default:qhas_any=\(query,\),dpername=1,exclude_reference=True,dminqual=good python -m ibeis autogen_ipynb --ipynb --db PZ_MTEST -a default -t best:lnbnn_normalizer=[None,normlnbnn-test] python -m ibeis.templates.generate_notebook --exec-autogen_ipynb --db wd_peter_blinston --ipynb python -m ibeis autogen_ipynb --db PZ_Master1 --ipynb python -m ibeis autogen_ipynb --db PZ_Master1 -a timectrl:qindex=0:100 -t best best:normsum=True --ipynb --noexample python -m ibeis autogen_ipynb --db PZ_Master1 -a timectrl --run jupyter-notebook Experiments-lynx.ipynb killall python python -m ibeis autogen_ipynb --db humpbacks --ipynb -t default:proot=BC_DTW -a default:has_any=hasnotch python -m ibeis autogen_ipynb --db humpbacks --ipynb -t default:proot=BC_DTW default:proot=vsmany -a default:has_any=hasnotch,mingt=2,qindex=0:50 --noexample python -m ibeis autogen_ipynb --db testdb_curvrank --ipynb -t default:proot=CurvRankDorsal python -m ibeis autogen_ipynb --db testdb_curvrank --ipynb -t default:proot=CurvRankFluke python -m ibeis autogen_ipynb --db PW_Master --ipynb -t default:proot=CurvRankDorsal Ignore: python -m ibeis autogen_ipynb --db WS_ALL Example: >>> # SCRIPT >>> from ibeis.templates.generate_notebook import * # NOQA >>> import ibeis >>> ibs = ibeis.opendb(defaultdb='testdb1') >>> result = autogen_ipynb(ibs) >>> print(result) """ dbname = ibs.get_dbname() fname = 'Experiments-' + dbname nb_fpath = fname + '.ipynb' if ut.get_argflag('--cells'): notebook_cells = make_ibeis_cell_list(ibs) print('\n# ---- \n'.join(notebook_cells)) return # TODO: Add support for dbdir to be specified notebook_str = make_ibeis_notebook(ibs) ut.writeto(nb_fpath, notebook_str) run = ut.get_argflag('--run') if run is None else run launch = launch if launch is not None else ut.get_argflag('--ipynb') if run: run_nb = ut.run_ipython_notebook(notebook_str) output_fpath = ut.export_notebook(run_nb, fname) ut.startfile(output_fpath) elif launch: command = ' '.join([ 'jupyter-notebook', '--NotebookApp.iopub_data_rate_limit=10000000', '--NotebookApp.token=', nb_fpath ]) ut.cmd2(command, detatch=True, verbose=True) else: print('notebook_str =\n%s' % (notebook_str, ))
def __init__(ibs, dbdir=None, ensure=True, wbaddr=None, verbose=True, request_dbversion=None, request_stagingversion=None, force_serial=None): """ Creates a new IBEIS Controller associated with one database """ #if verbose and ut.VERBOSE: print('\n[ibs.__init__] new IBEISController') # HACK try: from ibeis_flukematch import plugin # NOQA except Exception as ex: msg = ('Cannot import the flukematch plugin. ' 'It does not exist or has not been built.') ut.printex(ex, msg, iswarning=True) ibs.dbname = None # an dict to hack in temporary state ibs.const = const ibs.readonly = None ibs.depc_image = None ibs.depc_annot = None ibs.depc_part = None #ibs.allow_override = 'override+warn' ibs.allow_override = True if force_serial is None: if ut.get_argflag( ('--utool-force-serial', '--force-serial', '--serial')): force_serial = True else: force_serial = not ut.in_main_process() ibs.force_serial = force_serial # observer_weakref_list keeps track of the guibacks connected to this # controller ibs.observer_weakref_list = [] # not completely working decorator cache ibs.table_cache = None ibs._initialize_self() ibs._init_dirs(dbdir=dbdir, ensure=ensure) # _send_wildbook_request will do nothing if no wildbook address is # specified ibs._send_wildbook_request(wbaddr) ibs._init_sql(request_dbversion=request_dbversion, request_stagingversion=request_stagingversion) ibs._init_config() if not ut.get_argflag('--noclean') and not ibs.readonly: # ibs._init_burned_in_species() ibs._clean_species() ibs.job_manager = None # Hack for changing the way chips compute # by default use serial because warpAffine is weird with multiproc ibs._parallel_chips = False ibs.containerized = ut.get_argflag('--containerized') if ibs.containerized: print('[ibs.__init__] CONTAINERIZED: True\n') print('[ibs.__init__] END new IBEISController\n')
def get_default_cell_template_list(ibs): """ Defines the order of ipython notebook cells """ cells = notebook_cells noexample = not ut.get_argflag('--examples') asreport = ut.get_argflag('--asreport') withtags = ut.get_argflag('--withtags') cell_template_list = [] info_cells = [ cells.pipe_config_info, cells.annot_config_info, # cells.per_encounter_stats, cells.timestamp_distribution, ] dev_analysis = [ cells.config_overlap, #cells.dbsize_expt, # None if ibs.get_dbname() == 'humpbacks' else cells.feat_score_sep, cells.all_annot_scoresep, cells.success_annot_scoresep, ] cell_template_list += [ cells.introduction if asreport else None, cells.nb_init, cells.db_init, None if ibs.get_dbname() != 'humpbacks' else cells.fluke_select, ] if not asreport: cell_template_list += info_cells if not noexample: cell_template_list += [ cells.example_annotations, cells.example_names, ] cell_template_list += [ cells.per_annotation_accuracy, cells.per_name_accuracy, cells.easy_success_cases, cells.hard_success_cases, cells.failure_type1_cases, cells.failure_type2_cases, cells.total_failure_cases, cells.timedelta_distribution, ] if withtags: cell_template_list += [ cells.investigate_specific_case, cells.view_intereseting_tags, ] if asreport: # Append our debug stuff at the bottom cell_template_list += [cells.IGNOREAFTER] cell_template_list += info_cells cell_template_list += dev_analysis cell_template_list += [ cells.config_disagree_cases, ] cell_template_list = ut.filter_Nones(cell_template_list) cell_template_list = ut.lmap(ut.normalize_cells, cell_template_list) if not asreport: # Remove all of the extra fluff cell_template_list = [(header.split('\n')[0], code, None) for (header, code, footer) in cell_template_list] return cell_template_list
def GET_ARGFLAG(arg, *args, **kwargs): import utool as ut return arg.lstrip('--') in sys.argv or ut.get_argflag(arg, *args, **kwargs)
def make_name_graph_interaction( ibs, nids=None, aids=None, selected_aids=[], with_all=True, invis_edges=None, ensure_edges=None, use_image=False, temp_nids=None, **kwargs, ): r""" CommandLine: python -m wbia --tf make_name_graph_interaction --db PZ_MTEST \ --aids=1,2,3,4,5,6,7,8,9 --show python -m wbia --tf make_name_graph_interaction --db LEWA_splits \ --nids=1 --show --split Example: >>> # DISABLE_DOCTEST >>> from wbia.viz.viz_graph import * # NOQA >>> import wbia >>> import wbia.plottool as pt >>> exec(ut.execstr_funckw(make_name_graph_interaction), globals()) >>> defaultdb='testdb1' >>> ibs = wbia.opendb(defaultdb=defaultdb) >>> aids = ut.get_argval('--aids', type_=list, default=None) >>> nids = ut.get_argval('--nids', type_=list, default=ibs.get_valid_nids()[0:5]) >>> nids = None if aids is not None else nids >>> with_all = not ut.get_argflag('--no-with-all') >>> make_name_graph_interaction(ibs, nids, aids, with_all=with_all) >>> #pt.zoom_factory() >>> ut.show_if_requested() """ if aids is None and nids is not None: aids = ut.flatten(ibs.get_name_aids(nids)) elif nids is not None and aids is not None: aids += ibs.get_name_aids(nids) aids = ut.unique(aids) if with_all: nids = ut.unique(ibs.get_annot_name_rowids(aids)) aids = ut.flatten(ibs.get_name_aids(nids)) # aids = aids[0:10] nids = ibs.get_annot_name_rowids(aids) # from wbia.algo.graph import graph_iden # infr = graph_iden.AnnotInference(aids, nids, temp_nids) # NOQA # import utool # utool.embed() from wbia.algo.graph import graph_iden infr = graph_iden.AnnotInference(ibs, aids, nids, temp_nids) infr.initialize_graph() # infr.apply_scores() # infr.apply_weights() if ut.get_argflag('--cut'): infr.apply_all() # import wbia.guitool as gt # gt.ensure_qtapp() # logger.info('infr = %r' % (infr,)) # win = test_qt_graphs(infr=infr, use_image=use_image) # self = win # gt.qtapp_loop(qwin=win, freq=10) self = AnnotGraphInteraction(infr, selected_aids=selected_aids, use_image=use_image) self.show_page() self.show() return self
def draw_feat_scoresep(testres, f=None, disttype=None): r""" SeeAlso: ibeis.algo.hots.scorenorm.train_featscore_normalizer CommandLine: python -m ibeis --tf TestResult.draw_feat_scoresep --show python -m ibeis --tf TestResult.draw_feat_scoresep --show -t default:sv_on=[True,False] python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 --disttype=L2_sift,fg python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 --disttype=L2_sift python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST -t best:lnbnn_on=True --namemode=True python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST -t best:lnbnn_on=True --namemode=False python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST --disttype=L2_sift python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST --disttype=L2_sift -t best:SV=False utprof.py -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 utprof.py -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 --fsvx=1:2 utprof.py -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 --fsvx=0:1 utprof.py -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_Master1 -t best:lnbnn_on=False,bar_l2_on=True --fsvx=0:1 # We want to query the oxford annots taged query # and we want the database to contain # K correct images per query, as well as the distractors python -m ibeis --tf TestResult.draw_feat_scoresep --show --db Oxford -a default:qhas_any=\(query,\),dpername=1,exclude_reference=True,minqual=ok python -m ibeis --tf TestResult.draw_feat_scoresep --show --db Oxford -a default:qhas_any=\(query,\),dpername=1,exclude_reference=True,minqual=good python -m ibeis --tf get_annotcfg_list --db PZ_Master1 -a timectrl --acfginfo --verbtd --veryverbtd --nocache-aid python -m ibeis --tf TestResult.draw_feat_scoresep --show --db PZ_MTEST --disttype=ratio Example: >>> # SCRIPT >>> from ibeis.expt.test_result import * # NOQA >>> from ibeis.init import main_helpers >>> disttype = ut.get_argval('--disttype', type_=list, default=None) >>> ibs, testres = main_helpers.testdata_expts( >>> defaultdb='PZ_MTEST', a=['timectrl'], t=['best']) >>> f = ut.get_argval(('--filt', '-f'), type_=list, default=['']) >>> testres.draw_feat_scoresep(f=f) >>> ut.show_if_requested() """ print('[testres] draw_feat_scoresep') import plottool_ibeis as pt def load_feat_scores(qreq_, qaids): import ibeis # NOQA from os.path import dirname, join # NOQA # HACKY CACHE cfgstr = qreq_.get_cfgstr(with_input=True) cache_dir = join(dirname(dirname(ibeis.__file__)), 'TMP_FEATSCORE_CACHE') namemode = ut.get_argval('--namemode', default=True) fsvx = ut.get_argval('--fsvx', type_='fuzzy_subset', default=slice(None, None, None)) threshx = ut.get_argval('--threshx', type_=int, default=None) thresh = ut.get_argval('--thresh', type_=float, default=.9) num = ut.get_argval('--num', type_=int, default=1) cfg_components = [ cfgstr, disttype, namemode, fsvx, threshx, thresh, f, num ] cache_cfgstr = ','.join(ut.lmap(six.text_type, cfg_components)) cache_hashid = ut.hashstr27(cache_cfgstr + '_v1') cache_name = ('get_cfgx_feat_scores_' + cache_hashid) @ut.cached_func(cache_name, cache_dir=cache_dir, key_argx=[], use_cache=True) def get_cfgx_feat_scores(qreq_, qaids): from ibeis.algo.hots import scorenorm cm_list = qreq_.execute(qaids) # print('Done loading cached chipmatches') tup = scorenorm.get_training_featscores(qreq_, cm_list, disttype, namemode, fsvx, threshx, thresh, num=num) # print(ut.depth_profile(tup)) tp_scores, tn_scores, scorecfg = tup return tp_scores, tn_scores, scorecfg tp_scores, tn_scores, scorecfg = get_cfgx_feat_scores(qreq_, qaids) return tp_scores, tn_scores, scorecfg valid_case_pos = testres.case_sample2(filt_cfg=f, return_mask=False) cfgx2_valid_qxs = ut.group_items(valid_case_pos.T[0], valid_case_pos.T[1]) test_qaids = testres.get_test_qaids() cfgx2_valid_qaids = ut.map_dict_vals(ut.partial(ut.take, test_qaids), cfgx2_valid_qxs) join_acfgs = True # TODO: option to average over pipeline configurations if join_acfgs: groupxs = testres.get_cfgx_groupxs() else: groupxs = list(zip(range(len(testres.cfgx2_qreq_)))) grouped_qreqs = ut.apply_grouping(testres.cfgx2_qreq_, groupxs) grouped_scores = [] for cfgxs, qreq_group in zip(groupxs, grouped_qreqs): # testres.print_pcfg_info() score_group = [] for cfgx, qreq_ in zip(cfgxs, testres.cfgx2_qreq_): print('Loading cached chipmatches') qaids = cfgx2_valid_qaids[cfgx] tp_scores, tn_scores, scorecfg = load_feat_scores(qreq_, qaids) score_group.append((tp_scores, tn_scores, scorecfg)) grouped_scores.append(score_group) cfgx2_shortlbl = testres.get_short_cfglbls(join_acfgs=join_acfgs) for score_group, lbl in zip(grouped_scores, cfgx2_shortlbl): tp_scores = np.hstack(ut.take_column(score_group, 0)) tn_scores = np.hstack(ut.take_column(score_group, 1)) scorecfg = '+++'.join(ut.unique(ut.take_column(score_group, 2))) score_group # TODO: learn this score normalizer as a model # encoder = vt.ScoreNormalizer(adjust=4, monotonize=False) encoder = vt.ScoreNormalizer(adjust=2, monotonize=True) encoder.fit_partitioned(tp_scores, tn_scores, verbose=False) figtitle = 'Feature Scores: %s, %s' % (scorecfg, lbl) fnum = None vizkw = {} sephack = ut.get_argflag('--sephack') if not sephack: vizkw['target_tpr'] = .95 vizkw['score_range'] = (0, 1.0) encoder.visualize( figtitle=figtitle, fnum=fnum, with_scores=False, #with_prebayes=True, with_prebayes=False, with_roc=True, with_postbayes=False, #with_postbayes=True, **vizkw) icon = testres.ibs.get_database_icon() if icon is not None: pt.overlay_icon(icon, coords=(1, 0), bbox_alignment=(1, 0)) if ut.get_argflag('--contextadjust'): pt.adjust_subplots(left=.1, bottom=.25, wspace=.2, hspace=.2) pt.adjust_subplots(use_argv=True) return encoder
def test_job_engine(): """ CommandLine: python -m ibeis.web.job_engine --exec-test_job_engine python -b -m ibeis.web.job_engine --exec-test_job_engine python -m ibeis.web.job_engine test_job_engine python -m ibeis.web.job_engine test_job_engine --bg python -m ibeis.web.job_engine test_job_engine --fg Example: >>> # SCRIPT >>> from ibeis.web.job_engine import * # NOQA >>> test_job_engine() """ _init_signals() # now start a few clients, and fire off some requests client_id = np.random.randint(1000) jobiface = JobInterface(client_id) reciever = JobBackend() from ibeis.init import sysres if ut.get_argflag('--bg'): dbdir = sysres.get_args_dbdir('cache', False, None, None, cache_priority=False) reciever.initialize_background_processes(dbdir) print('[testzmq] parent process is looping forever') while True: time.sleep(1) elif ut.get_argflag('--fg'): jobiface.initialize_client_thread() else: dbdir = sysres.get_args_dbdir('cache', False, None, None, cache_priority=False) reciever.initialize_background_processes(dbdir) jobiface.initialize_client_thread() # Foreground test script print('... waiting for jobs') if ut.get_argflag('--cmd'): ut.embed() #jobiface.queue_job() else: print('[test] ... emit test1') callback_url = None callback_method = None args = (1, ) jobid1 = jobiface.queue_job('helloworld', callback_url, callback_method, *args) jobiface.wait_for_job_result(jobid1) jobid_list = [] args = ([1], [3, 4, 5]) kwargs = dict(cfgdict={'K': 1}) identify_jobid = jobiface.queue_job('query_chips_simple_dict', callback_url, callback_method, *args, **kwargs) for jobid in jobid_list: jobiface.wait_for_job_result(jobid) jobiface.wait_for_job_result(identify_jobid) print('FINISHED TEST SCRIPT')
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals import utool as ut print, rrr, profile = ut.inject2(__name__) VERB_PIPELINE = ut.get_argflag( ('--verb-pipeline', '--verb-pipe')) or ut.VERYVERBOSE VERB_TESTDATA = ut.get_argflag('--verb-testdata') or ut.VERYVERBOSE def testrun_pipeline_upto(qreq_, stop_node='end', verbose=True): r""" Main tester function. Runs the pipeline by mirroring `request_ibeis_query_L0`, but stops at a requested breakpoint and returns the local variables. convinience: runs pipeline for tests this should mirror request_ibeis_query_L0 Ignore: >>> # TODO: autogenerate >>> # The following is a stub that starts the autogeneration process >>> import utool as ut >>> from ibeis.algo.hots import pipeline >>> source = ut.get_func_sourcecode(pipeline.request_ibeis_query_L0, >>> strip_docstr=True, stripdef=True, >>> strip_comments=True) >>> import re >>> source = re.sub(r'^\s*$\n', '', source, flags=re.MULTILINE) >>> print(source) >>> ut.replace_between_tags(source, '', sentinal)
def learn_k(): r""" CommandLine: python -m ibeis.other.optimize_k --test-learn_k python -m ibeis.other.optimize_k --test-learn_k --show python -m ibeis.other.optimize_k --test-learn_k --show --dummy Example: >>> # DISABLE_DOCTEST >>> from ibeis.other.optimize_k import * # NOQA >>> import plottool as pt >>> # build test data >>> # execute function >>> known_nd_data, known_target_points, given_data_dims, opt_model_params = learn_k() >>> # verify results >>> ut.quit_if_noshow() >>> plot_search_surface(known_nd_data, known_target_points, given_data_dims, opt_model_params) >>> pt.all_figures_bring_to_front() >>> pt.show_if_requested() """ # Compute Training Data varydict = { #'K': [4, 7, 10, 13, 16, 19, 22, 25][:4], #'K': [1, 2, 3, 4, 8, 10, 13, 15], 'K': [1, 2, 4, 8, 16], #'nDaids': [20, 100, 250, 500, 750, 1000], } nDaids_basis = [ 20, 30, 50, 75, 100, 200, 250, 300, 325, 350, 400, 500, 600, 750, 800, 900, 1000, 1500 ] DUMMY = ut.get_argflag('--dummy') if DUMMY: nDaids_list, K_list, nError_list = test_training_data( varydict, nDaids_basis) nError_list = nError_list.astype(np.float32) / nError_list.max() else: dbname = ut.get_argval('--db', default='PZ_Master0') ibs = ibeis.opendb(dbname) verbose = False qaids, daids_list = collect_ibeis_training_annotations(ibs, nDaids_basis, verbose=verbose) nDaids_list, K_list, nError_list = evaluate_training_data( ibs, qaids, daids_list, varydict, nDaids_basis, verbose=verbose) nError_list = nError_list.astype(np.float32) / len(qaids) print('\nFinished Get Training Data') print('len(qaids) = %r' % (len(qaids))) print(ut.get_stats_str(nError_list)) #unique_nDaids = np.unique(nDaids_list) # Alias to general optimization problem known_nd_data = np.vstack([nDaids_list, K_list]).T known_target_points = nError_list # Mark the data we are given vs what we want to learn given_data_dims = [0] #learn_data_dims = [1] # Minimize K params opt_model_params = minimize_compute_K_params(known_nd_data, known_target_points, given_data_dims) return known_nd_data, known_target_points, given_data_dims, opt_model_params
ctx = zmq.Context.instance() # FIXME: needs to use correct number of ports url = 'tcp://127.0.0.1' _portgen = functools.partial(six.next, itertools.count(51381)) engine_url1 = url + ':' + six.text_type(_portgen()) engine_url2 = url + ':' + six.text_type(_portgen()) collect_url1 = url + ':' + six.text_type(_portgen()) collect_url2 = url + ':' + six.text_type(_portgen()) collect_pushpull_url = url + ':' + six.text_type(_portgen()) NUM_JOBS = 2 NUM_ENGINES = 1 VERBOSE_JOBS = ut.get_argflag('--bg') or ut.get_argflag('--fg') def update_proctitle(procname): try: import setproctitle print('CHANGING PROCESS TITLE') old_title = setproctitle.getproctitle() print('old_title = %r' % (old_title, )) #new_title = 'IBEIS_' + procname + ' ' + old_title #new_title = procname + ' ' + old_title new_title = 'ibeis_zmq_loop' print('new_title = %r' % (new_title, )) setproctitle.setproctitle(new_title) except ImportError: print('pip install setproctitle')
#'ibeis.web.apis_engine', ] """ # Should import python -c "import ibeis" # Should not import python -c "import ibeis" --no-cnn UTOOL_NO_CNN=True python -c "import ibeis" """ for modname in ut.ProgIter(AUTOLOAD_PLUGIN_MODNAMES, 'loading plugins', enabled=ut.VERYVERBOSE, adjust=False, freq=1): if isinstance(modname, tuple): flag, modname = modname if ut.get_argflag(flag): continue try: ut.import_modname(modname) except ImportError as ex: ut.printex(ex, iswarning=True) # NOTE: new plugin code needs to be hacked in here currently # this is not a long term solution. THE Long term solution is to get these # working (which are partially integrated) # python -m ibeis --tf dev_autogen_explicit_imports # python -m ibeis --tf dev_autogen_explicit_injects # Ensure that all injectable modules are imported before constructing the # class instance
def start_tornado(ibs, port=None, browser=None, url_suffix=None, start_web_loop=True, fallback=True): """Initialize the web server""" if browser is None: browser = ut.get_argflag('--browser') if url_suffix is None: url_suffix = ut.get_argval('--url', default='') # from wbia import constants as const # ibs.https = const.HTTPS def _start_tornado(ibs_, port_): # Get Flask app app = controller_inject.get_flask_app() app.ibs = ibs_ # Try to ascertain the socket's domain name socket.setdefaulttimeout(0.1) try: app.server_domain = socket.gethostbyname(socket.gethostname()) except socket.gaierror: app.server_domain = '127.0.0.1' socket.setdefaulttimeout(None) app.server_port = port_ # URL for the web instance app.server_url = 'http://%s:%s' % (app.server_domain, app.server_port) logger.info('[web] Tornado server starting at %s' % (app.server_url, )) # Launch the web browser to view the web interface and API if browser: url = app.server_url + url_suffix import webbrowser logger.info('[web] opening browser with url = %r' % (url, )) webbrowser.open(url) if PROMETHEUS: # Add prometheus wsgi middleware to route /metrics requests logger.info('LOADING PROMETHEUS') app_ = DispatcherMiddleware( app, {'/metrics': prometheus_client.make_wsgi_app()}) # Migrate the most essential settings app_.server_port = app.server_port app_.server_url = app.server_url app_.ibs = app.ibs app = app_ else: logger.info('SKIPPING PROMETHEUS') # Start the tornado web handler # WSGI = Web Server Gateway Interface # WSGI is Python standard described in detail in PEP 3333 wsgi_container = TimedWSGIContainer(app) # # Try wrapping with newrelic performance monitoring # try: # import newrelic # wsgi_container = newrelic.agent.WSGIApplicationWrapper(wsgi_container) # except (ImportError, AttributeError): # pass http_server = tornado.httpserver.HTTPServer(wsgi_container) try: http_server.listen(app.server_port) except socket.error: fallback_port = ut.find_open_port(app.server_port) if fallback: logger.info( 'Port %s is unavailable, using fallback_port = %r' % ( port, fallback_port, )) start_tornado( ibs, port=fallback_port, browser=browser, url_suffix=url_suffix, start_web_loop=start_web_loop, fallback=False, ) else: raise RuntimeError( (('The specified IBEIS web port %d is not available, ' 'but %d is') % (app.server_port, fallback_port))) # Add more verbose logging try: utool_logfile_handler = ut.util_logging.__UTOOL_ROOT_LOGGER__ except Exception: utool_logfile_handler = None if utool_logfile_handler is not None: logger_list = [] try: logger_list += [ app.logger, ] except AttributeError: pass try: logger_list += [ app.app.logger, ] except AttributeError: pass logger_list += [ logging.getLogger('concurrent'), logging.getLogger('concurrent.futures'), logging.getLogger('flask_cors.core'), logging.getLogger('flask_cors'), logging.getLogger('flask_cors.decorator'), logging.getLogger('flask_cors.extension'), logging.getLogger('urllib3'), logging.getLogger('requests'), logging.getLogger('tornado'), logging.getLogger('tornado.access'), logging.getLogger('tornado.application'), logging.getLogger('tornado.general'), logging.getLogger('websocket'), ] for logger_ in logger_list: logger_.setLevel(logging.INFO) logger_.addHandler(utool_logfile_handler) if start_web_loop: tornado.ioloop.IOLoop.instance().start() # Get the port if unspecified if port is None: port = appf.DEFAULT_WEB_API_PORT # Launch the web handler _start_tornado(ibs, port)